Basic block renumbering removal.

From-SVN: r53522
This commit is contained in:
Zdenek Dvorak 2002-05-16 10:34:53 -07:00 committed by Richard Henderson
parent 5a566bed2b
commit 355e4ec445
45 changed files with 1948 additions and 1948 deletions

View File

@ -1,3 +1,151 @@
2002-05-16 Zdenek Dvorak <rakdver@atrey.karlin.mff.cuni.cz>
Basic block renumbering removal:
* basic_block.h (struct basic_block_def): Renamed index to sindex,
added prev_bb and next_bb fields.
(n_basic_blocks): Renamed to num_basic_blocks.
(last_basic_block): New, index of last basic block.
(FOR_BB_BETWEEN, FOR_ALL_BB, FOR_ALL_BB_REVERSE): New macros for
traversing basic block chain.
(BLOCK_NUM): index -> sindex.
(create_basic_block_structure, create_basic_block): Declaration changed.
(debug_num2bb): Declare.
(expunge_block_nocompact): Declaration removed.
(link_block, unlink_block, compact_blocks): Declare.
* bb-reorder.c (make_reorder_chain, make_reorder_chain_1): Modified.
* cfg.c (entry_exit_blocks): Initialize new fields.
(clear_edges, alloc_block, expunge_block, cached_make_edge,
redirect_edge_pred, dump_flow_info, dump_edge_info,
alloc_aux_for_blocks, clear_aux_for_blocks, alloc_aux_for_edges,
free_aux_for_edges): Modified.
(link_block, unlink_block, compact_blocks, debug_num2bb): New.
(expunge_block_nocompact): Removed.
* cfganal.c (can_fallthru, mark_dfs_back_edges, flow_call_edges_add,
find_unreachable_blocks, create_edge_list, print_edge_list,
verify_edge_list, flow_edge_list_print, remove_fake_successors,
remove_fake_edges, flow_reverse_top_sort_order_compute,
flow_depth_first_order_compute, flow_preorder_transversal_compute,
flow_dfs_compute_reverse_init, flow_dfs_compute_reverse_add_bb,
flow_dfs_compute_reverse_execute): Modified.
* cfgbuild.c (make_edges, make_eh_edge, find_basic_blocks_1,
find_basic_blocks, find_many_sub_basic_blocks, find_sub_basic_blocks):
Modified.
* cfgcleanup.c (try_simplify_condjump, try_forward_edges,
merge_blocks_move_predecessor_nojumps,
merge_blocks_move_successor_nojumps, merge_blocks,
outgoing_edges_match, try_crossjump_to_edge, try_crossjump_bb,
try_optimize_cfg, delete_unreachable_blocks, cleanup_cfg): Modified.
* cfglayout.c (skip_insns_after_block, label_for_bb,
record_effective_endpoints, scope_to_insns_finalize,
fixup_reorder_chain, verify_insn_chain, cleanup_unconditional_jumps,
fixup_fallthru_exit_predecessor, cfg_layout_redirect_edge,
cfg_layout_duplicate_bb): Modified.
* cfgloop.c (flow_loops_cfg_dump, flow_loop_dump, flow_loops_dump,
flow_loop_entry_edges_find, flow_loop_exit_edges_find,
flow_loop_nodes_find, flow_loop_pre_header_find, flow_loop_scan,
flow_loops_find, flow_loop_outside_edge_p): Modified.
* cfgrtl.c (create_basic_block_structure, create_basic_block,
flow_delete_block, compute_bb_for_insn, split_block,
try_redirect_by_replacing_jump, redirect_edge_and_branch,
force_nonfallthru_and_redirect, tidy_fallthru_edge,
back_edge_of_syntactic_loop_p, split_edge, commit_one_edge_insertion,
commit_edge_insertions, commit_edge_insertions_watch_calls,
dump_bb, print_rtl_with_bb, verify_flow_info, purge_dead_edges,
purge_all_dead_edges): Modified.
* combine.c (combine_instructions, set_nonzero_bits_and_sign_copies,
try_combine, nonzero_bits, num_sign_bit_copies, get_last_value_validate,
get_last_value, reg_dead_at_p, distribute_notes, distribute_links):
Modified.
* conflict.c (conflict_graph_compute): Modified.
* df.c (FOR_ALL_BBS): Removed.
(df_bitmaps_alloc, df_bitmaps_free, df_alloc, df_analyse_1,
df_modified_p, df_analyse, df_refs_unlink, df_insn_modify,
df_dump, hybrid_search_bitmap, iterative_dataflow_sbitmap): Modified.
* df.h (DF_BB_INFO, DF_REF_BBNO): Modified.
* dominance.c (init_dom_info, calc_dfs_tree_nonrec, calc_dfs_tree,
calc_idoms, idoms_to_doms, calculate_dominance_info): Modified.
* final.c (compute_alignments, final_scan_insn): Modified.
* flow.c (verify_local_live_at_start, update_life_info,
update_life_info_in_dirty_blocks, free_basic_block_vars,
delete_noop_moves, calculate_global_regs_live,
initialize_uninitialized_subregs, allocate_bb_life_data,
regno_uninitialized, regno_clobbered_at_setjmp, mark_set_1,
mark_used_reg, count_or_remove_death_notes): Modified.
* function.c (thread_prologue_and_epilogue_insns): Modified.
* gcse.c (struct null_pointer_info): Change typo of current_block
to basic_block.
(gcse_main, alloc_gcse_mem, compute_local_properties, compute_sets,
oprs_unchanged_p, load_killed_in_block_p, record_last_reg_set_info,
compute_hash_table, alloc_rd_mem, handle_rd_kill_set, compute_kill_rd,
alloc_avail_expr_mem, expr_killed_p, compute_ae_kill,
expr_reaches_here_p_work, expr_reaches_here_p, handle_avail_expr,
classic_gcse, one_classic_gcse_pass, compute_transp, cprop,
one_cprop_pass, compute_pre_data, pre_expr_reaches_here_p_work,
pre_expr_reaches_here_p, insert_insn_end_bb, pre_edge_insert,
pre_delete, one_pre_gcse_pass, compute_transpout,
invalidate_nonnull_info, delete_null_pointer_checks_1,
free_code_hoist_mem, compute_code_hoist_vbeinout,
hoist_expr_reaches_here_p, hoist_code, one_code_hoisting_pass,
compute_ld_motion_mems, store_ops_ok, find_moveable_store,
compute_store_table, build_store_vectors, insert_insn_start_bb,
insert_store, replace_store_insn, free_store_memory, store_motion):
Modified.
* global.c (global_alloc, global_conflicts, mark_elimination,
build_insn_chain): Modified.
* graph.c (print_rtl_graph_with_bb): Modified.
* haifa-sched.c (sched_init): Modified.
* ifcvt.c (SET_ORIG_INDEX, ORIG_INDEX): Removed.
(find_if_block, find_cond_trap, find_if_case_1, find_if_case_2,
if_convert): Modified.
* lcm.c (compute_antinout_edge, compute_earliest, compute_laterin,
compute_insert_delete, pre_edge_lcm, compute_available,
compute_farthest, compute_nearerout, compute_rev_insert_delete,
pre_edge_rev_lcm, make_preds_opaque, optimize_mode_switching):
Modified.
* local-alloc.c (alloc_qty, local_alloc, update_equiv_regs): Modified.
* loop.c (loop_dump_aux): Modified.
* predict.c (combine_predictions_for_insn, estimate_probability,
last_basic_block_p, process_note_prediction, process_note_predictions,
note_prediction_to_br_prob, propagate_freq, counts_to_freqs,
expensive_function_p, estimate_bb_frequencies,
compute_function_frequency): Modified.
* print-rtl.c (print_rtx): Modified.
* profile.c (GCOV_INDEX_TO_BB, BB_TO_GCOV_INDEX, instrument_edges,
get_exec_counts, compute_branch_probabilities, compute_checksum,
branch_prob, find_spanning_tree): Modified.
* recog.c (split_all_insns, peephole2_optimize): Modified.
* reg-stack.c (reg_to_stack, convert_regs_entry, compensate_edge,
convert_regs_1, convert_regs_2, convert_regs): Modified.
* regclass.c (scan_one_insn, regclass): Modified.
* regmove.c (mark_flags_life_zones, regmove_optimize,
combine_stack_adjustments): Modified.
* regrename.c (regrename_optimize, copyprop_hardreg_forward): Modified.
* reload1.c (reload, reload_combine, copy_eh_notes): Modified.
* reorg.c (dbr_schedule): Modified.
* resource.c (find_basic_block, init_resource_info): Modified.
* sbitmap.c (sbitmap_intersection_of_succs,
sbitmap_intersection_of_preds, sbitmap_union_of_succs,
sbitmap_union_of_preds): Modified.
* sched-deps.c (init_dependency_caches): Modified.
* sched-ebb.c (schedule_ebbs): Modified.
* sched-rgn.c (is_cfg_nonregular, build_control_flow, debug_regions,
find_rgns, compute_trg_info, init_regions, schedule_insns): Modified.
* sibcall.c (optimize_sibling_and_tail_recursive_call): Modified.
* ssa-ccp.c (examine_flow_edges, optimize_unexecutable_edges,
ssa_ccp_substitute_constants, ssa_ccp_df_delete_unreachable_insns,
ssa_const_prop): Modified.
* ssa-dce.c (set_control_dependent_block_to_edge_map_,
find_control_dependence, find_pdom, ssa_eliminate_dead_code): Modified.
* ssa.c (remove_phi_alternative, find_evaluations,
compute_dominance_frontiers_1, compute_iterated_dominance_frontiers,
insert_phi_node, rename_block, convert_to_ssa, eliminate_phi,
make_regs_equivalent_over_bad_edges,
make_equivalent_phi_alternatives_equival,
compute_conservative_reg_partition,
coalesce_regs_in_successor_phi_nodes, compute_coalesced_reg_partition,
rename_equivalent_regs, convert_from_ssa, for_each_successor_phi):
Modified.
2002-05-16 Mark Mitchell <mark@codesourcery.com>
* cfgrtl.c (purge_dead_edges): Correct handling of EDGE_EH.
@ -10,7 +158,7 @@
calling CONSTANT_POOL_ADDRESS_P.
* config/arm/arm.h (TARGET_CPU_CPP_BUILTINS): Fix typo in code
to decide whether to define __arm__ or __thumb.
(THUMB_GO_IF_LEGITIMATE_ADDRESS): Check for RTX being a
(THUMB_GO_IF_LEGITIMATE_ADDRESS): Check for RTX being a
SYMBOL_REF before calling CONSTANT_POOL_ADDRESS_P.
2002-05-16 Neil Booth <neil@daikokuya.demon.co.uk>
@ -33,10 +181,10 @@
2002-05-15 Aldy Hernandez <aldyh@redhat.com>
* config/rs6000/altivec.h: Cleanups for tighter typechecking.
Cleanups for accepting modifiers on pointers.
Fix predicate typos.
Allow long pointers as well as int pointers.
* config/rs6000/altivec.h: Cleanups for tighter typechecking.
Cleanups for accepting modifiers on pointers.
Fix predicate typos.
Allow long pointers as well as int pointers.
2002-05-15 Richard Henderson <rth@redhat.com>
@ -45,7 +193,7 @@
2002-05-15 Matt Hiller <hiller@redhat.com>
* testsuite/gcc.c-torture/compile/20000804-1.x: Don't return 1 if
XFAILing.
XFAILing.
* testsuite/gcc.c-torture/compile/20001226-1.x: Ditto.
* testsuite/gcc.c-torture/compile/920520-1.x: Ditto.
* testsuite/gcc.c-torture/compile/mipscop-1.x: XFAIL for now.
@ -55,39 +203,39 @@
2002-05-15 Aldy Hernandez <aldyh@redhat.com>
* reload1.c (forget_old_reloads_1): Do not use subreg offset.
* reload1.c (forget_old_reloads_1): Do not use subreg offset.
2002-05-15 Aldy Hernandez <aldyh@redhat.com>
* config/rs6000/rs6000.md ("altivec_mtvscr"): Set VSCR register.
("altivec_mfvscr"): Read from VSCR.
* config/rs6000/rs6000.md ("altivec_mtvscr"): Set VSCR register.
("altivec_mfvscr"): Read from VSCR.
Add vscr sets for the following insns: altivec_vctuxs,
altivec_vaddubs, altivec_vaddsbs, altivec_vadduhs,
altivec_vaddshs, altivec_vadduws, altivec_vaddsws, altivec_vctsxs,
altivec_vmhaddshs, altivec_vmhraddshs, altivec_vmsumuhs,
altivec_vmsumshs, altivec_vpkuhss, altivec_vpkshss,
altivec_vpkuwss, altivec_vpkswss, altivec_vpkuhus,
altivec_vpkshus, altivec_vpkuwus, altivec_vpkswus,
altivec_vsububs, altivec_vsubsbs, altivec_vsubuhs,
altivec_vsubshs, altivec_vsubuws, altivec_vsubsws,
altivec_vsum4ubs, altivec_vsum4sbs, altivec_vsum4shs,
altivec_vsum2sws, altivec_vsumsws.
Add vscr sets for the following insns: altivec_vctuxs,
altivec_vaddubs, altivec_vaddsbs, altivec_vadduhs,
altivec_vaddshs, altivec_vadduws, altivec_vaddsws, altivec_vctsxs,
altivec_vmhaddshs, altivec_vmhraddshs, altivec_vmsumuhs,
altivec_vmsumshs, altivec_vpkuhss, altivec_vpkshss,
altivec_vpkuwss, altivec_vpkswss, altivec_vpkuhus,
altivec_vpkshus, altivec_vpkuwus, altivec_vpkswus,
altivec_vsububs, altivec_vsubsbs, altivec_vsubuhs,
altivec_vsubshs, altivec_vsubuws, altivec_vsubsws,
altivec_vsum4ubs, altivec_vsum4sbs, altivec_vsum4shs,
altivec_vsum2sws, altivec_vsumsws.
* config/rs6000/rs6000.h: Add VSCR fixed register.
(CALL_REALLY_USED_REGISTERS): Add vscr.
(CALL_USED_REGISTERS): Same.
(FIXED_REGISTERS): Same.
(REG_ALLOC_ORDER): Same.
(reg_class): Add VSCR_REGS.
(REG_CLASS_NAMES): Same.
(REG_CLASS_CONTENTS): Same.
(VSCR_REGNO): New.
(REGISTER_NAMES): Add vscr.
(DEBUG_REGISTER_NAMES): Same.
(ADDITIONAL_REGISTER_NAMES): Same.
(FIRST_PSEUDO_REGISTER): Increment.
(CONDITIONAL_REGISTER_USAGE): Set VSCR as a global register.
* config/rs6000/rs6000.h: Add VSCR fixed register.
(CALL_REALLY_USED_REGISTERS): Add vscr.
(CALL_USED_REGISTERS): Same.
(FIXED_REGISTERS): Same.
(REG_ALLOC_ORDER): Same.
(reg_class): Add VSCR_REGS.
(REG_CLASS_NAMES): Same.
(REG_CLASS_CONTENTS): Same.
(VSCR_REGNO): New.
(REGISTER_NAMES): Add vscr.
(DEBUG_REGISTER_NAMES): Same.
(ADDITIONAL_REGISTER_NAMES): Same.
(FIRST_PSEUDO_REGISTER): Increment.
(CONDITIONAL_REGISTER_USAGE): Set VSCR as a global register.
2002-05-15 Jakub Jelinek <jakub@redhat.com>

View File

@ -203,8 +203,11 @@ typedef struct basic_block_def {
/* Auxiliary info specific to a pass. */
void *aux;
/* The index of this block. */
int index;
/* The index of a block. */
int sindex;
/* Previous and next blocks in the chain. */
struct basic_block_def *prev_bb, *next_bb;
/* The loop depth of this block. */
int loop_depth;
@ -228,7 +231,11 @@ typedef struct basic_block_def {
/* Number of basic blocks in the current function. */
extern int n_basic_blocks;
extern int num_basic_blocks;
/* First free basic block number. */
extern int last_basic_block;
/* Number of edges in the current function. */
@ -240,6 +247,16 @@ extern varray_type basic_block_info;
#define BASIC_BLOCK(N) (VARRAY_BB (basic_block_info, (N)))
/* For iterating over basic blocks. */
#define FOR_BB_BETWEEN(BB, FROM, TO, DIR) \
for (BB = FROM; BB != TO; BB = BB->DIR)
#define FOR_ALL_BB(BB) \
FOR_BB_BETWEEN (BB, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb)
#define FOR_ALL_BB_REVERSE(BB) \
FOR_BB_BETWEEN (BB, EXIT_BLOCK_PTR->prev_bb, ENTRY_BLOCK_PTR, prev_bb)
/* What registers are live at the setjmp call. */
extern regset regs_live_at_setjmp;
@ -284,7 +301,7 @@ extern struct basic_block_def entry_exit_blocks[2];
extern varray_type basic_block_for_insn;
#define BLOCK_FOR_INSN(INSN) VARRAY_BB (basic_block_for_insn, INSN_UID (INSN))
#define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->index + 0)
#define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->sindex + 0)
extern void compute_bb_for_insn PARAMS ((int));
extern void free_bb_for_insn PARAMS ((void));
@ -314,8 +331,8 @@ extern void remove_edge PARAMS ((edge));
extern void redirect_edge_succ PARAMS ((edge, basic_block));
extern edge redirect_edge_succ_nodup PARAMS ((edge, basic_block));
extern void redirect_edge_pred PARAMS ((edge, basic_block));
extern basic_block create_basic_block_structure PARAMS ((int, rtx, rtx, rtx));
extern basic_block create_basic_block PARAMS ((int, rtx, rtx));
extern basic_block create_basic_block_structure PARAMS ((int, rtx, rtx, rtx, basic_block));
extern basic_block create_basic_block PARAMS ((rtx, rtx, basic_block));
extern int flow_delete_block PARAMS ((basic_block));
extern int flow_delete_block_noexpunge PARAMS ((basic_block));
extern void clear_bb_flags PARAMS ((void));
@ -639,12 +656,15 @@ extern void reorder_basic_blocks PARAMS ((void));
extern void dump_bb PARAMS ((basic_block, FILE *));
extern void debug_bb PARAMS ((basic_block));
extern void debug_bb_n PARAMS ((int));
extern basic_block debug_num2bb PARAMS ((int));
extern void dump_regset PARAMS ((regset, FILE *));
extern void debug_regset PARAMS ((regset));
extern void allocate_reg_life_data PARAMS ((void));
extern void allocate_bb_life_data PARAMS ((void));
extern void expunge_block PARAMS ((basic_block));
extern void expunge_block_nocompact PARAMS ((basic_block));
extern void link_block PARAMS ((basic_block, basic_block));
extern void unlink_block PARAMS ((basic_block));
extern void compact_blocks PARAMS ((void));
extern basic_block alloc_block PARAMS ((void));
extern void find_unreachable_blocks PARAMS ((void));
extern int delete_noop_moves PARAMS ((rtx));

View File

@ -102,14 +102,11 @@ static void
make_reorder_chain ()
{
basic_block prev = NULL;
int nbb_m1 = n_basic_blocks - 1;
basic_block next;
basic_block next, bb;
/* Loop until we've placed every block. */
do
{
int i;
next = NULL;
/* Find the next unplaced block. */
@ -119,12 +116,13 @@ make_reorder_chain ()
remove from the list as we place. The head of that list is
what we're looking for here. */
for (i = 0; i <= nbb_m1 && !next; ++i)
{
basic_block bb = BASIC_BLOCK (i);
if (! RBI (bb)->visited)
FOR_ALL_BB (bb)
if (! RBI (bb)->visited)
{
next = bb;
}
break;
}
if (next)
prev = make_reorder_chain_1 (next, prev);
}
@ -158,13 +156,13 @@ make_reorder_chain_1 (bb, prev)
restart:
RBI (prev)->next = bb;
if (rtl_dump_file && prev->index + 1 != bb->index)
if (rtl_dump_file && prev->next_bb != bb)
fprintf (rtl_dump_file, "Reordering block %d after %d\n",
bb->index, prev->index);
bb->sindex, prev->sindex);
}
else
{
if (bb->index != 0)
if (bb->prev_bb != ENTRY_BLOCK_PTR)
abort ();
}
RBI (bb)->visited = 1;
@ -214,7 +212,7 @@ make_reorder_chain_1 (bb, prev)
if (! next)
{
for (e = bb->succ; e ; e = e->succ_next)
if (e->dest->index == bb->index + 1)
if (e->dest == bb->next_bb)
{
if ((e->flags & EDGE_FALLTHRU)
|| (e->dest->succ
@ -258,7 +256,7 @@ make_reorder_chain_1 (bb, prev)
void
reorder_basic_blocks ()
{
if (n_basic_blocks <= 1)
if (num_basic_blocks <= 1)
return;
if ((* targetm.cannot_modify_jumps_p) ())

155
gcc/cfg.c
View File

@ -63,7 +63,10 @@ static char *flow_firstobj;
/* Number of basic blocks in the current function. */
int n_basic_blocks;
int num_basic_blocks;
/* First free basic block number. */
int last_basic_block;
/* Number of edges in the current function. */
@ -93,6 +96,8 @@ struct basic_block_def entry_exit_blocks[2]
NULL, /* global_live_at_end */
NULL, /* aux */
ENTRY_BLOCK, /* index */
NULL, /* prev_bb */
EXIT_BLOCK_PTR, /* next_bb */
0, /* loop_depth */
0, /* count */
0, /* frequency */
@ -111,6 +116,8 @@ struct basic_block_def entry_exit_blocks[2]
NULL, /* global_live_at_end */
NULL, /* aux */
EXIT_BLOCK, /* index */
ENTRY_BLOCK_PTR, /* prev_bb */
NULL, /* next_bb */
0, /* loop_depth */
0, /* count */
0, /* frequency */
@ -163,12 +170,11 @@ free_edge (e)
void
clear_edges ()
{
int i;
basic_block bb;
edge e;
for (i = 0; i < n_basic_blocks; ++i)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
edge e = bb->succ;
while (e)
@ -220,36 +226,66 @@ alloc_block ()
return bb;
}
/* Remove block B from the basic block array and compact behind it. */
/* Link block B to chain after AFTER. */
void
expunge_block_nocompact (b)
link_block (b, after)
basic_block b, after;
{
b->next_bb = after->next_bb;
b->prev_bb = after;
after->next_bb = b;
b->next_bb->prev_bb = b;
}
/* Unlink block B from chain. */
void
unlink_block (b)
basic_block b;
{
/* Invalidate data to make bughunting easier. */
memset (b, 0, sizeof *b);
b->index = -3;
b->succ = (edge) first_deleted_block;
first_deleted_block = (basic_block) b;
b->next_bb->prev_bb = b->prev_bb;
b->prev_bb->next_bb = b->next_bb;
}
/* Sequentially order blocks and compact the arrays. */
void
compact_blocks ()
{
basic_block *bbs = xcalloc (num_basic_blocks, sizeof (basic_block));
int i;
basic_block bb;
i = 0;
FOR_ALL_BB (bb)
bbs[i++] = bb;
if (i != num_basic_blocks)
abort ();
for (i = 0; i < num_basic_blocks; i++)
{
bbs[i]->sindex = i;
BASIC_BLOCK (i) = bbs[i];
}
last_basic_block = num_basic_blocks;
free (bbs);
}
/* Remove block B from the basic block array. */
void
expunge_block (b)
basic_block b;
{
int i, n = n_basic_blocks;
unlink_block (b);
BASIC_BLOCK (b->sindex) = NULL;
num_basic_blocks--;
for (i = b->index; i + 1 < n; ++i)
{
basic_block x = BASIC_BLOCK (i + 1);
BASIC_BLOCK (i) = x;
x->index = i;
}
n_basic_blocks--;
basic_block_info->num_elements--;
expunge_block_nocompact (b);
/* Invalidate data to make bughunting easier. */
memset (b, 0, sizeof *b);
b->sindex = -3;
b->succ = (edge) first_deleted_block;
first_deleted_block = (basic_block) b;
}
/* Create an edge connecting SRC and DST with FLAGS optionally using
@ -274,7 +310,7 @@ cached_make_edge (edge_cache, src, dst, flags)
{
default:
/* Quick test for non-existence of the edge. */
if (! TEST_BIT (edge_cache[src->index], dst->index))
if (! TEST_BIT (edge_cache[src->sindex], dst->sindex))
break;
/* The edge exists; early exit if no work to do. */
@ -314,7 +350,7 @@ cached_make_edge (edge_cache, src, dst, flags)
dst->pred = e;
if (use_edge_cache)
SET_BIT (edge_cache[src->index], dst->index);
SET_BIT (edge_cache[src->sindex], dst->sindex);
return e;
}
@ -453,11 +489,10 @@ redirect_edge_pred (e, new_pred)
void
clear_bb_flags ()
{
int i;
ENTRY_BLOCK_PTR->flags = 0;
EXIT_BLOCK_PTR->flags = 0;
for (i = 0; i < n_basic_blocks; i++)
BASIC_BLOCK (i)->flags = 0;
basic_block bb;
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
bb->flags = 0;
}
void
@ -465,6 +500,7 @@ dump_flow_info (file)
FILE *file;
{
int i;
basic_block bb;
static const char * const reg_class_names[] = REG_CLASS_NAMES;
fprintf (file, "%d registers.\n", max_regno);
@ -511,16 +547,17 @@ dump_flow_info (file)
fprintf (file, ".\n");
}
fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks, n_edges);
for (i = 0; i < n_basic_blocks; i++)
fprintf (file, "\n%d basic blocks, %d edges.\n", num_basic_blocks, n_edges);
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
edge e;
int sum;
gcov_type lsum;
fprintf (file, "\nBasic block %d: first insn %d, last %d, ",
i, INSN_UID (bb->head), INSN_UID (bb->end));
bb->sindex, INSN_UID (bb->head), INSN_UID (bb->end));
fprintf (file, "prev %d, next %d, ",
bb->prev_bb->sindex, bb->next_bb->sindex);
fprintf (file, "loop_depth %d, count ", bb->loop_depth);
fprintf (file, HOST_WIDEST_INT_PRINT_DEC, bb->count);
fprintf (file, ", freq %i.\n", bb->frequency);
@ -595,7 +632,7 @@ dump_edge_info (file, e, do_succ)
else if (side == EXIT_BLOCK_PTR)
fputs (" EXIT", file);
else
fprintf (file, " %d", side->index);
fprintf (file, " %d", side->sindex);
if (e->probability)
fprintf (file, " [%.1f%%] ", e->probability * 100.0 / REG_BR_PROB_BASE);
@ -675,10 +712,10 @@ alloc_aux_for_blocks (size)
first_block_aux_obj = (char *) obstack_alloc (&block_aux_obstack, 0);
if (size)
{
int i;
basic_block bb;
for (i = 0; i < n_basic_blocks; i++)
alloc_aux_for_block (BASIC_BLOCK (i), size);
FOR_ALL_BB (bb)
alloc_aux_for_block (bb, size);
alloc_aux_for_block (ENTRY_BLOCK_PTR, size);
alloc_aux_for_block (EXIT_BLOCK_PTR, size);
@ -690,13 +727,10 @@ alloc_aux_for_blocks (size)
void
clear_aux_for_blocks ()
{
int i;
basic_block bb;
for (i = 0; i < n_basic_blocks; i++)
BASIC_BLOCK (i)->aux = NULL;
ENTRY_BLOCK_PTR->aux = NULL;
EXIT_BLOCK_PTR->aux = NULL;
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
bb->aux = NULL;
}
/* Free data allocated in block_aux_obstack and clear AUX pointers
@ -750,16 +784,11 @@ alloc_aux_for_edges (size)
first_edge_aux_obj = (char *) obstack_alloc (&edge_aux_obstack, 0);
if (size)
{
int i;
for (i = -1; i < n_basic_blocks; i++)
{
basic_block bb;
edge e;
basic_block bb;
if (i >= 0)
bb = BASIC_BLOCK (i);
else
bb = ENTRY_BLOCK_PTR;
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
{
edge e;
for (e = bb->succ; e; e = e->succ_next)
alloc_aux_for_edge (e, size);
@ -772,18 +801,12 @@ alloc_aux_for_edges (size)
void
clear_aux_for_edges ()
{
int i;
basic_block bb;
for (i = -1; i < n_basic_blocks; i++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
{
basic_block bb;
edge e;
if (i >= 0)
bb = BASIC_BLOCK (i);
else
bb = ENTRY_BLOCK_PTR;
for (e = bb->succ; e; e = e->succ_next)
e->aux = NULL;
}
@ -802,3 +825,11 @@ free_aux_for_edges ()
clear_aux_for_edges ();
}
/* The same as BASIC_BLOCK, but usable from debugger. */
basic_block
debug_num2bb (num)
int num;
{
return BASIC_BLOCK (num);
}

View File

@ -87,7 +87,7 @@ can_fallthru (src, target)
rtx insn = src->end;
rtx insn2 = target->head;
if (src->index + 1 != target->index)
if (src->next_bb != target)
return 0;
if (!active_insn_p (insn2))
@ -120,15 +120,15 @@ mark_dfs_back_edges ()
bool found = false;
/* Allocate the preorder and postorder number arrays. */
pre = (int *) xcalloc (n_basic_blocks, sizeof (int));
post = (int *) xcalloc (n_basic_blocks, sizeof (int));
pre = (int *) xcalloc (last_basic_block, sizeof (int));
post = (int *) xcalloc (last_basic_block, sizeof (int));
/* Allocate stack for back-tracking up CFG. */
stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge));
stack = (edge *) xmalloc ((num_basic_blocks + 1) * sizeof (edge));
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
visited = sbitmap_alloc (n_basic_blocks);
visited = sbitmap_alloc (last_basic_block);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
@ -149,12 +149,12 @@ mark_dfs_back_edges ()
e->flags &= ~EDGE_DFS_BACK;
/* Check if the edge destination has been visited yet. */
if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->sindex))
{
/* Mark that we have visited the destination. */
SET_BIT (visited, dest->index);
SET_BIT (visited, dest->sindex);
pre[dest->index] = prenum++;
pre[dest->sindex] = prenum++;
if (dest->succ)
{
/* Since the DEST node has been visited for the first
@ -162,17 +162,17 @@ mark_dfs_back_edges ()
stack[sp++] = dest->succ;
}
else
post[dest->index] = postnum++;
post[dest->sindex] = postnum++;
}
else
{
if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR
&& pre[src->index] >= pre[dest->index]
&& post[dest->index] == 0)
&& pre[src->sindex] >= pre[dest->sindex]
&& post[dest->sindex] == 0)
e->flags |= EDGE_DFS_BACK, found = true;
if (! e->succ_next && src != ENTRY_BLOCK_PTR)
post[src->index] = postnum++;
post[src->sindex] = postnum++;
if (e->succ_next)
stack[sp - 1] = e->succ_next;
@ -194,10 +194,10 @@ mark_dfs_back_edges ()
void
set_edge_can_fallthru_flag ()
{
int i;
for (i = 0; i < n_basic_blocks; i++)
basic_block bb;
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
edge e;
/* The FALLTHRU edge is also CAN_FALLTHRU edge. */
@ -258,29 +258,16 @@ flow_call_edges_add (blocks)
{
int i;
int blocks_split = 0;
int bb_num = 0;
basic_block *bbs;
int last_bb = last_basic_block;
bool check_last_block = false;
/* Map bb indices into basic block pointers since split_block
will renumber the basic blocks. */
bbs = xmalloc (n_basic_blocks * sizeof (*bbs));
if (num_basic_blocks == 0)
return 0;
if (! blocks)
{
for (i = 0; i < n_basic_blocks; i++)
bbs[bb_num++] = BASIC_BLOCK (i);
check_last_block = true;
}
check_last_block = true;
else
EXECUTE_IF_SET_IN_SBITMAP (blocks, 0, i,
{
bbs[bb_num++] = BASIC_BLOCK (i);
if (i == n_basic_blocks - 1)
check_last_block = true;
});
check_last_block = TEST_BIT (blocks, EXIT_BLOCK_PTR->prev_bb->sindex);
/* In the last basic block, before epilogue generation, there will be
a fallthru edge to EXIT. Special care is required if the last insn
@ -296,7 +283,7 @@ flow_call_edges_add (blocks)
Handle this by adding a dummy instruction in a new last basic block. */
if (check_last_block)
{
basic_block bb = BASIC_BLOCK (n_basic_blocks - 1);
basic_block bb = EXIT_BLOCK_PTR->prev_bb;
rtx insn = bb->end;
/* Back up past insns that must be kept in the same block as a call. */
@ -321,12 +308,18 @@ flow_call_edges_add (blocks)
calls since there is no way that we can determine if they will
return or not... */
for (i = 0; i < bb_num; i++)
for (i = 0; i < last_bb; i++)
{
basic_block bb = bbs[i];
basic_block bb = BASIC_BLOCK (i);
rtx insn;
rtx prev_insn;
if (!bb)
continue;
if (blocks && !TEST_BIT (blocks, i))
continue;
for (insn = bb->end; ; insn = prev_insn)
{
prev_insn = PREV_INSN (insn);
@ -374,7 +367,6 @@ flow_call_edges_add (blocks)
if (blocks_split)
verify_flow_info ();
free (bbs);
return blocks_split;
}
@ -386,16 +378,15 @@ void
find_unreachable_blocks ()
{
edge e;
int i, n;
basic_block *tos, *worklist;
basic_block *tos, *worklist, bb;
n = n_basic_blocks;
tos = worklist = (basic_block *) xmalloc (sizeof (basic_block) * n);
tos = worklist =
(basic_block *) xmalloc (sizeof (basic_block) * num_basic_blocks);
/* Clear all the reachability flags. */
for (i = 0; i < n; ++i)
BASIC_BLOCK (i)->flags &= ~BB_REACHABLE;
FOR_ALL_BB (bb)
bb->flags &= ~BB_REACHABLE;
/* Add our starting points to the worklist. Almost always there will
be only one. It isn't inconceivable that we might one day directly
@ -445,27 +436,22 @@ create_edge_list ()
struct edge_list *elist;
edge e;
int num_edges;
int x;
int block_count;
basic_block bb;
block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */
block_count = num_basic_blocks + 2; /* Include the entry and exit blocks. */
num_edges = 0;
/* Determine the number of edges in the flow graph by counting successor
edges on each basic block. */
for (x = 0; x < n_basic_blocks; x++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
{
basic_block bb = BASIC_BLOCK (x);
for (e = bb->succ; e; e = e->succ_next)
num_edges++;
}
/* Don't forget successors of the entry block. */
for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
num_edges++;
elist = (struct edge_list *) xmalloc (sizeof (struct edge_list));
elist->num_blocks = block_count;
elist->num_edges = num_edges;
@ -473,18 +459,10 @@ create_edge_list ()
num_edges = 0;
/* Follow successors of the entry block, and register these edges. */
for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
elist->index_to_edge[num_edges++] = e;
for (x = 0; x < n_basic_blocks; x++)
{
basic_block bb = BASIC_BLOCK (x);
/* Follow all successors of blocks, and register these edges. */
for (e = bb->succ; e; e = e->succ_next)
elist->index_to_edge[num_edges++] = e;
}
/* Follow successors of blocks, and register these edges. */
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
for (e = bb->succ; e; e = e->succ_next)
elist->index_to_edge[num_edges++] = e;
return elist;
}
@ -520,12 +498,12 @@ print_edge_list (f, elist)
if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR)
fprintf (f, "entry,");
else
fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index);
fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->sindex);
if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR)
fprintf (f, "exit)\n");
else
fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index);
fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->sindex);
}
}
@ -538,17 +516,16 @@ verify_edge_list (f, elist)
FILE *f;
struct edge_list *elist;
{
int x, pred, succ, index;
int index, pred, succ;
edge e;
basic_block bb, p, s;
for (x = 0; x < n_basic_blocks; x++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
{
basic_block bb = BASIC_BLOCK (x);
for (e = bb->succ; e; e = e->succ_next)
{
pred = e->src->index;
succ = e->dest->index;
pred = e->src->sindex;
succ = e->dest->sindex;
index = EDGE_INDEX (elist, e->src, e->dest);
if (index == EDGE_INDEX_NO_EDGE)
{
@ -556,42 +533,21 @@ verify_edge_list (f, elist)
continue;
}
if (INDEX_EDGE_PRED_BB (elist, index)->index != pred)
if (INDEX_EDGE_PRED_BB (elist, index)->sindex != pred)
fprintf (f, "*p* Pred for index %d should be %d not %d\n",
index, pred, INDEX_EDGE_PRED_BB (elist, index)->index);
if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ)
index, pred, INDEX_EDGE_PRED_BB (elist, index)->sindex);
if (INDEX_EDGE_SUCC_BB (elist, index)->sindex != succ)
fprintf (f, "*p* Succ for index %d should be %d not %d\n",
index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index);
index, succ, INDEX_EDGE_SUCC_BB (elist, index)->sindex);
}
}
for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
{
pred = e->src->index;
succ = e->dest->index;
index = EDGE_INDEX (elist, e->src, e->dest);
if (index == EDGE_INDEX_NO_EDGE)
{
fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ);
continue;
}
if (INDEX_EDGE_PRED_BB (elist, index)->index != pred)
fprintf (f, "*p* Pred for index %d should be %d not %d\n",
index, pred, INDEX_EDGE_PRED_BB (elist, index)->index);
if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ)
fprintf (f, "*p* Succ for index %d should be %d not %d\n",
index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index);
}
/* We've verified that all the edges are in the list, no lets make sure
there are no spurious edges in the list. */
for (pred = 0; pred < n_basic_blocks; pred++)
for (succ = 0; succ < n_basic_blocks; succ++)
FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
{
basic_block p = BASIC_BLOCK (pred);
basic_block s = BASIC_BLOCK (succ);
int found_edge = 0;
for (e = p->succ; e; e = e->succ_next)
@ -608,78 +564,16 @@ verify_edge_list (f, elist)
break;
}
if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ))
if (EDGE_INDEX (elist, p, s)
== EDGE_INDEX_NO_EDGE && found_edge != 0)
fprintf (f, "*** Edge (%d, %d) appears to not have an index\n",
pred, succ);
if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ))
p->sindex, s->sindex);
if (EDGE_INDEX (elist, p, s)
!= EDGE_INDEX_NO_EDGE && found_edge == 0)
fprintf (f, "*** Edge (%d, %d) has index %d, but there is no edge\n",
pred, succ, EDGE_INDEX (elist, BASIC_BLOCK (pred),
BASIC_BLOCK (succ)));
p->sindex, s->sindex, EDGE_INDEX (elist, p, s));
}
for (succ = 0; succ < n_basic_blocks; succ++)
{
basic_block p = ENTRY_BLOCK_PTR;
basic_block s = BASIC_BLOCK (succ);
int found_edge = 0;
for (e = p->succ; e; e = e->succ_next)
if (e->dest == s)
{
found_edge = 1;
break;
}
for (e = s->pred; e; e = e->pred_next)
if (e->src == p)
{
found_edge = 1;
break;
}
if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ))
== EDGE_INDEX_NO_EDGE && found_edge != 0)
fprintf (f, "*** Edge (entry, %d) appears to not have an index\n",
succ);
if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ))
!= EDGE_INDEX_NO_EDGE && found_edge == 0)
fprintf (f, "*** Edge (entry, %d) has index %d, but no edge exists\n",
succ, EDGE_INDEX (elist, ENTRY_BLOCK_PTR,
BASIC_BLOCK (succ)));
}
for (pred = 0; pred < n_basic_blocks; pred++)
{
basic_block p = BASIC_BLOCK (pred);
basic_block s = EXIT_BLOCK_PTR;
int found_edge = 0;
for (e = p->succ; e; e = e->succ_next)
if (e->dest == s)
{
found_edge = 1;
break;
}
for (e = s->pred; e; e = e->pred_next)
if (e->src == p)
{
found_edge = 1;
break;
}
if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR)
== EDGE_INDEX_NO_EDGE && found_edge != 0)
fprintf (f, "*** Edge (%d, exit) appears to not have an index\n",
pred);
if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR)
!= EDGE_INDEX_NO_EDGE && found_edge == 0)
fprintf (f, "*** Edge (%d, exit) has index %d, but no edge exists\n",
pred, EDGE_INDEX (elist, BASIC_BLOCK (pred),
EXIT_BLOCK_PTR));
}
}
/* This routine will determine what, if any, edge there is between
@ -734,8 +628,8 @@ flow_edge_list_print (str, edge_list, num_edges, file)
fprintf (file, "%s { ", str);
for (i = 0; i < num_edges; i++)
fprintf (file, "%d->%d ", edge_list[i]->src->index,
edge_list[i]->dest->index);
fprintf (file, "%d->%d ", edge_list[i]->src->sindex,
edge_list[i]->dest->sindex);
fputs ("}\n", file);
}
@ -768,13 +662,10 @@ remove_fake_successors (bb)
void
remove_fake_edges ()
{
int x;
basic_block bb;
for (x = 0; x < n_basic_blocks; x++)
remove_fake_successors (BASIC_BLOCK (x));
/* We've handled all successors except the entry block's. */
remove_fake_successors (ENTRY_BLOCK_PTR);
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
remove_fake_successors (bb);
}
/* This function will add a fake edge between any block which has no
@ -784,11 +675,11 @@ remove_fake_edges ()
void
add_noreturn_fake_exit_edges ()
{
int x;
basic_block bb;
for (x = 0; x < n_basic_blocks; x++)
if (BASIC_BLOCK (x)->succ == NULL)
make_single_succ_edge (BASIC_BLOCK (x), EXIT_BLOCK_PTR, EDGE_FAKE);
FOR_ALL_BB (bb)
if (bb->succ == NULL)
make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
}
/* This function adds a fake edge between any infinite loops to the
@ -840,11 +731,11 @@ flow_reverse_top_sort_order_compute (rts_order)
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge));
stack = (edge *) xmalloc ((num_basic_blocks + 1) * sizeof (edge));
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
visited = sbitmap_alloc (n_basic_blocks);
visited = sbitmap_alloc (last_basic_block);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
@ -864,22 +755,22 @@ flow_reverse_top_sort_order_compute (rts_order)
dest = e->dest;
/* Check if the edge destination has been visited yet. */
if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->sindex))
{
/* Mark that we have visited the destination. */
SET_BIT (visited, dest->index);
SET_BIT (visited, dest->sindex);
if (dest->succ)
/* Since the DEST node has been visited for the first
time, check its successors. */
stack[sp++] = dest->succ;
else
rts_order[postnum++] = dest->index;
rts_order[postnum++] = dest->sindex;
}
else
{
if (! e->succ_next && src != ENTRY_BLOCK_PTR)
rts_order[postnum++] = src->index;
rts_order[postnum++] = src->sindex;
if (e->succ_next)
stack[sp - 1] = e->succ_next;
@ -907,15 +798,15 @@ flow_depth_first_order_compute (dfs_order, rc_order)
edge *stack;
int sp;
int dfsnum = 0;
int rcnum = n_basic_blocks - 1;
int rcnum = num_basic_blocks - 1;
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge));
stack = (edge *) xmalloc ((num_basic_blocks + 1) * sizeof (edge));
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
visited = sbitmap_alloc (n_basic_blocks);
visited = sbitmap_alloc (last_basic_block);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
@ -935,13 +826,13 @@ flow_depth_first_order_compute (dfs_order, rc_order)
dest = e->dest;
/* Check if the edge destination has been visited yet. */
if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->sindex))
{
/* Mark that we have visited the destination. */
SET_BIT (visited, dest->index);
SET_BIT (visited, dest->sindex);
if (dfs_order)
dfs_order[dfsnum] = dest->index;
dfs_order[dfsnum] = dest->sindex;
dfsnum++;
@ -952,7 +843,7 @@ flow_depth_first_order_compute (dfs_order, rc_order)
else if (rc_order)
/* There are no successors for the DEST node so assign
its reverse completion number. */
rc_order[rcnum--] = dest->index;
rc_order[rcnum--] = dest->sindex;
}
else
{
@ -960,7 +851,7 @@ flow_depth_first_order_compute (dfs_order, rc_order)
&& rc_order)
/* There are no more successors for the SRC node
so assign its reverse completion number. */
rc_order[rcnum--] = src->index;
rc_order[rcnum--] = src->sindex;
if (e->succ_next)
stack[sp - 1] = e->succ_next;
@ -973,12 +864,12 @@ flow_depth_first_order_compute (dfs_order, rc_order)
sbitmap_free (visited);
/* The number of nodes visited should not be greater than
n_basic_blocks. */
if (dfsnum > n_basic_blocks)
num_basic_blocks. */
if (dfsnum > num_basic_blocks)
abort ();
/* There are some nodes left in the CFG that are unreachable. */
if (dfsnum < n_basic_blocks)
if (dfsnum < num_basic_blocks)
abort ();
return dfsnum;
@ -1014,30 +905,30 @@ flow_preorder_transversal_compute (pot_order)
sbitmap visited;
struct dfst_node *node;
struct dfst_node *dfst;
basic_block bb;
/* Allocate stack for back-tracking up CFG. */
stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge));
stack = (edge *) xmalloc ((num_basic_blocks + 1) * sizeof (edge));
sp = 0;
/* Allocate the tree. */
dfst = (struct dfst_node *) xcalloc (n_basic_blocks,
dfst = (struct dfst_node *) xcalloc (last_basic_block,
sizeof (struct dfst_node));
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
max_successors = 0;
for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
for (e = bb->succ; e; e = e->succ_next)
max_successors++;
dfst[i].node
= (max_successors
? (struct dfst_node **) xcalloc (max_successors,
sizeof (struct dfst_node *))
: NULL);
if (max_successors)
dfst[bb->sindex].node
= (struct dfst_node **) xcalloc (max_successors,
sizeof (struct dfst_node *));
}
/* Allocate bitmap to track nodes that have been visited. */
visited = sbitmap_alloc (n_basic_blocks);
visited = sbitmap_alloc (last_basic_block);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
@ -1056,17 +947,17 @@ flow_preorder_transversal_compute (pot_order)
dest = e->dest;
/* Check if the edge destination has been visited yet. */
if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->sindex))
{
/* Mark that we have visited the destination. */
SET_BIT (visited, dest->index);
SET_BIT (visited, dest->sindex);
/* Add the destination to the preorder tree. */
if (src != ENTRY_BLOCK_PTR)
{
dfst[src->index].node[dfst[src->index].nnodes++]
= &dfst[dest->index];
dfst[dest->index].up = &dfst[src->index];
dfst[src->sindex].node[dfst[src->sindex].nnodes++]
= &dfst[dest->sindex];
dfst[dest->sindex].up = &dfst[src->sindex];
}
if (dest->succ)
@ -1088,7 +979,7 @@ flow_preorder_transversal_compute (pot_order)
walking the tree from right to left. */
i = 0;
node = &dfst[0];
node = &dfst[ENTRY_BLOCK_PTR->next_bb->sindex];
pot_order[i++] = 0;
while (node)
@ -1104,7 +995,7 @@ flow_preorder_transversal_compute (pot_order)
/* Free the tree. */
for (i = 0; i < n_basic_blocks; i++)
for (i = 0; i < last_basic_block; i++)
if (dfst[i].node)
free (dfst[i].node);
@ -1146,12 +1037,12 @@ flow_dfs_compute_reverse_init (data)
depth_first_search_ds data;
{
/* Allocate stack for back-tracking up CFG. */
data->stack = (basic_block *) xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1))
data->stack = (basic_block *) xmalloc ((num_basic_blocks - (INVALID_BLOCK + 1))
* sizeof (basic_block));
data->sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
data->visited_blocks = sbitmap_alloc (n_basic_blocks - (INVALID_BLOCK + 1));
data->visited_blocks = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1));
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (data->visited_blocks);
@ -1169,7 +1060,7 @@ flow_dfs_compute_reverse_add_bb (data, bb)
basic_block bb;
{
data->stack[data->sp++] = bb;
SET_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1));
SET_BIT (data->visited_blocks, bb->sindex - (INVALID_BLOCK + 1));
}
/* Continue the depth-first search through the reverse graph starting with the
@ -1183,7 +1074,6 @@ flow_dfs_compute_reverse_execute (data)
{
basic_block bb;
edge e;
int i;
while (data->sp > 0)
{
@ -1192,14 +1082,14 @@ flow_dfs_compute_reverse_execute (data)
/* Perform depth-first search on adjacent vertices. */
for (e = bb->pred; e; e = e->pred_next)
if (!TEST_BIT (data->visited_blocks,
e->src->index - (INVALID_BLOCK + 1)))
e->src->sindex - (INVALID_BLOCK + 1)))
flow_dfs_compute_reverse_add_bb (data, e->src);
}
/* Determine if there are unvisited basic blocks. */
for (i = n_basic_blocks - (INVALID_BLOCK + 1); --i >= 0; )
if (!TEST_BIT (data->visited_blocks, i))
return BASIC_BLOCK (i + (INVALID_BLOCK + 1));
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
if (!TEST_BIT (data->visited_blocks, bb->sindex - (INVALID_BLOCK + 1)))
return bb;
return NULL;
}

View File

@ -50,7 +50,8 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
static int count_basic_blocks PARAMS ((rtx));
static void find_basic_blocks_1 PARAMS ((rtx));
static rtx find_label_refs PARAMS ((rtx, rtx));
static void make_edges PARAMS ((rtx, int, int, int));
static void make_edges PARAMS ((rtx, basic_block,
basic_block, int));
static void make_label_edge PARAMS ((sbitmap *, basic_block,
rtx, int));
static void make_eh_edge PARAMS ((sbitmap *, basic_block, rtx));
@ -280,9 +281,10 @@ make_eh_edge (edge_cache, src, insn)
static void
make_edges (label_value_list, min, max, update_p)
rtx label_value_list;
int min, max, update_p;
basic_block min, max;
int update_p;
{
int i;
basic_block bb;
sbitmap *edge_cache = NULL;
/* Assume no computed jump; revise as we create edges. */
@ -293,28 +295,26 @@ make_edges (label_value_list, min, max, update_p)
amount of time searching the edge lists for duplicates. */
if (forced_labels || label_value_list)
{
edge_cache = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
sbitmap_vector_zero (edge_cache, n_basic_blocks);
edge_cache = sbitmap_vector_alloc (last_basic_block, last_basic_block);
sbitmap_vector_zero (edge_cache, last_basic_block);
if (update_p)
for (i = min; i <= max; ++i)
FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
{
edge e;
for (e = BASIC_BLOCK (i)->succ; e ; e = e->succ_next)
for (e = bb->succ; e ; e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR)
SET_BIT (edge_cache[i], e->dest->index);
SET_BIT (edge_cache[bb->sindex], e->dest->sindex);
}
}
/* By nature of the way these get numbered, block 0 is always the entry. */
if (min == 0)
cached_make_edge (edge_cache, ENTRY_BLOCK_PTR, BASIC_BLOCK (0),
if (min == ENTRY_BLOCK_PTR->next_bb)
cached_make_edge (edge_cache, ENTRY_BLOCK_PTR, min,
EDGE_FALLTHRU);
for (i = min; i <= max; ++i)
FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx insn, x;
enum rtx_code code;
int force_fallthru = 0;
@ -443,15 +443,16 @@ make_edges (label_value_list, min, max, update_p)
/* Find out if we can drop through to the next block. */
insn = next_nonnote_insn (insn);
if (!insn || (i + 1 == n_basic_blocks && force_fallthru))
if (!insn || (bb->next_bb == EXIT_BLOCK_PTR && force_fallthru))
cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
else if (i + 1 < n_basic_blocks)
else if (bb->next_bb != EXIT_BLOCK_PTR)
{
rtx tmp = BLOCK_HEAD (i + 1);
rtx tmp = bb->next_bb->head;
if (GET_CODE (tmp) == NOTE)
tmp = next_nonnote_insn (tmp);
if (force_fallthru || insn == tmp)
cached_make_edge (edge_cache, bb, BASIC_BLOCK (i + 1),
cached_make_edge (edge_cache, bb, bb->next_bb,
EDGE_FALLTHRU);
}
}
@ -470,12 +471,12 @@ find_basic_blocks_1 (f)
rtx f;
{
rtx insn, next;
int i = 0;
rtx bb_note = NULL_RTX;
rtx lvl = NULL_RTX;
rtx trll = NULL_RTX;
rtx head = NULL_RTX;
rtx end = NULL_RTX;
basic_block prev = ENTRY_BLOCK_PTR;
/* We process the instructions in a slightly different way than we did
previously. This is so that we see a NOTE_BASIC_BLOCK after we have
@ -492,7 +493,7 @@ find_basic_blocks_1 (f)
if ((GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == BARRIER)
&& head)
{
create_basic_block_structure (i++, head, end, bb_note);
prev = create_basic_block_structure (last_basic_block++, head, end, bb_note, prev);
head = end = NULL_RTX;
bb_note = NULL_RTX;
}
@ -506,7 +507,7 @@ find_basic_blocks_1 (f)
if (head && control_flow_insn_p (insn))
{
create_basic_block_structure (i++, head, end, bb_note);
prev = create_basic_block_structure (last_basic_block++, head, end, bb_note, prev);
head = end = NULL_RTX;
bb_note = NULL_RTX;
}
@ -588,11 +589,11 @@ find_basic_blocks_1 (f)
}
if (head != NULL_RTX)
create_basic_block_structure (i++, head, end, bb_note);
create_basic_block_structure (last_basic_block++, head, end, bb_note, prev);
else if (bb_note)
delete_insn (bb_note);
if (i != n_basic_blocks)
if (last_basic_block != num_basic_blocks)
abort ();
label_value_list = lvl;
@ -612,6 +613,7 @@ find_basic_blocks (f, nregs, file)
FILE *file ATTRIBUTE_UNUSED;
{
int max_uid;
basic_block bb;
timevar_push (TV_CFG);
basic_block_for_insn = 0;
@ -619,20 +621,21 @@ find_basic_blocks (f, nregs, file)
/* Flush out existing data. */
if (basic_block_info != NULL)
{
int i;
clear_edges ();
/* Clear bb->aux on all extant basic blocks. We'll use this as a
tag for reuse during create_basic_block, just in case some pass
copies around basic block notes improperly. */
for (i = 0; i < n_basic_blocks; ++i)
BASIC_BLOCK (i)->aux = NULL;
FOR_ALL_BB (bb)
bb->aux = NULL;
VARRAY_FREE (basic_block_info);
}
n_basic_blocks = count_basic_blocks (f);
num_basic_blocks = count_basic_blocks (f);
last_basic_block = 0;
ENTRY_BLOCK_PTR->next_bb = EXIT_BLOCK_PTR;
EXIT_BLOCK_PTR->prev_bb = ENTRY_BLOCK_PTR;
/* Size the basic block table. The actual structures will be allocated
by find_basic_blocks_1, since we want to keep the structure pointers
@ -642,7 +645,7 @@ find_basic_blocks (f, nregs, file)
instructions at all until close to the end of compilation when we
actually lay them out. */
VARRAY_BB_INIT (basic_block_info, n_basic_blocks, "basic_block_info");
VARRAY_BB_INIT (basic_block_info, num_basic_blocks, "basic_block_info");
find_basic_blocks_1 (f);
@ -661,7 +664,7 @@ find_basic_blocks (f, nregs, file)
compute_bb_for_insn (max_uid);
/* Discover the edges of our cfg. */
make_edges (label_value_list, 0, n_basic_blocks - 1, 0);
make_edges (label_value_list, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR->prev_bb, 0);
/* Do very simple cleanup now, for the benefit of code that runs between
here and cleanup_cfg, e.g. thread_prologue_and_epilogue_insns. */
@ -790,25 +793,24 @@ void
find_many_sub_basic_blocks (blocks)
sbitmap blocks;
{
int i;
int min, max;
basic_block bb, min, max;
for (i = 0; i < n_basic_blocks; i++)
SET_STATE (BASIC_BLOCK (i),
TEST_BIT (blocks, i) ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL);
FOR_ALL_BB (bb)
SET_STATE (bb,
TEST_BIT (blocks, bb->sindex) ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL);
for (i = 0; i < n_basic_blocks; i++)
if (STATE (BASIC_BLOCK (i)) == BLOCK_TO_SPLIT)
find_bb_boundaries (BASIC_BLOCK (i));
FOR_ALL_BB (bb)
if (STATE (bb) == BLOCK_TO_SPLIT)
find_bb_boundaries (bb);
for (i = 0; i < n_basic_blocks; i++)
if (STATE (BASIC_BLOCK (i)) != BLOCK_ORIGINAL)
FOR_ALL_BB (bb)
if (STATE (bb) != BLOCK_ORIGINAL)
break;
min = max = i;
for (; i < n_basic_blocks; i++)
if (STATE (BASIC_BLOCK (i)) != BLOCK_ORIGINAL)
max = i;
min = max = bb;
for (; bb != EXIT_BLOCK_PTR; bb = bb->next_bb)
if (STATE (bb) != BLOCK_ORIGINAL)
max = bb;
/* Now re-scan and wire in all edges. This expect simple (conditional)
jumps at the end of each new basic blocks. */
@ -816,29 +818,28 @@ find_many_sub_basic_blocks (blocks)
/* Update branch probabilities. Expect only (un)conditional jumps
to be created with only the forward edges. */
for (i = min; i <= max; i++)
FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
{
edge e;
basic_block b = BASIC_BLOCK (i);
if (STATE (b) == BLOCK_ORIGINAL)
if (STATE (bb) == BLOCK_ORIGINAL)
continue;
if (STATE (b) == BLOCK_NEW)
if (STATE (bb) == BLOCK_NEW)
{
b->count = 0;
b->frequency = 0;
for (e = b->pred; e; e=e->pred_next)
bb->count = 0;
bb->frequency = 0;
for (e = bb->pred; e; e=e->pred_next)
{
b->count += e->count;
b->frequency += EDGE_FREQUENCY (e);
bb->count += e->count;
bb->frequency += EDGE_FREQUENCY (e);
}
}
compute_outgoing_frequencies (b);
compute_outgoing_frequencies (bb);
}
for (i = 0; i < n_basic_blocks; i++)
SET_STATE (BASIC_BLOCK (i), 0);
FOR_ALL_BB (bb)
SET_STATE (bb, 0);
}
/* Like above but for single basic block only. */
@ -847,14 +848,12 @@ void
find_sub_basic_blocks (bb)
basic_block bb;
{
int i;
int min, max;
basic_block next = (bb->index == n_basic_blocks - 1
? NULL : BASIC_BLOCK (bb->index + 1));
basic_block min, max, b;
basic_block next = bb->next_bb;
min = bb->index;
min = bb;
find_bb_boundaries (bb);
max = (next ? next->index : n_basic_blocks) - 1;
max = next->prev_bb;
/* Now re-scan and wire in all edges. This expect simple (conditional)
jumps at the end of each new basic blocks. */
@ -862,12 +861,11 @@ find_sub_basic_blocks (bb)
/* Update branch probabilities. Expect only (un)conditional jumps
to be created with only the forward edges. */
for (i = min; i <= max; i++)
FOR_BB_BETWEEN (b, min, max->next_bb, next_bb)
{
edge e;
basic_block b = BASIC_BLOCK (i);
if (i != min)
if (b != min)
{
b->count = 0;
b->frequency = 0;

View File

@ -147,7 +147,7 @@ try_simplify_condjump (cbranch_block)
unconditional jump. */
jump_block = cbranch_fallthru_edge->dest;
if (jump_block->pred->pred_next
|| jump_block->index == n_basic_blocks - 1
|| jump_block->next_bb == EXIT_BLOCK_PTR
|| !FORWARDER_BLOCK_P (jump_block))
return false;
jump_dest_block = jump_block->succ->dest;
@ -439,7 +439,7 @@ try_forward_edges (mode, b)
target = first = e->dest;
counter = 0;
while (counter < n_basic_blocks)
while (counter < num_basic_blocks)
{
basic_block new_target = NULL;
bool new_target_threaded = false;
@ -449,7 +449,7 @@ try_forward_edges (mode, b)
{
/* Bypass trivial infinite loops. */
if (target == target->succ->dest)
counter = n_basic_blocks;
counter = num_basic_blocks;
new_target = target->succ->dest;
}
@ -462,7 +462,7 @@ try_forward_edges (mode, b)
{
if (!threaded_edges)
threaded_edges = xmalloc (sizeof (*threaded_edges)
* n_basic_blocks);
* num_basic_blocks);
else
{
int i;
@ -474,7 +474,7 @@ try_forward_edges (mode, b)
break;
if (i < nthreaded_edges)
{
counter = n_basic_blocks;
counter = num_basic_blocks;
break;
}
}
@ -483,7 +483,7 @@ try_forward_edges (mode, b)
if (t->dest == b)
break;
if (nthreaded_edges >= n_basic_blocks)
if (nthreaded_edges >= num_basic_blocks)
abort ();
threaded_edges[nthreaded_edges++] = t;
@ -524,11 +524,11 @@ try_forward_edges (mode, b)
threaded |= new_target_threaded;
}
if (counter >= n_basic_blocks)
if (counter >= num_basic_blocks)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
target->index);
target->sindex);
}
else if (target == first)
; /* We didn't do anything. */
@ -552,7 +552,7 @@ try_forward_edges (mode, b)
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Forwarding edge %i->%i to %i failed.\n",
b->index, e->dest->index, target->index);
b->sindex, e->dest->sindex, target->sindex);
continue;
}
@ -688,7 +688,6 @@ merge_blocks_move_predecessor_nojumps (a, b)
basic_block a, b;
{
rtx barrier;
int index;
barrier = next_nonnote_insn (a->end);
if (GET_CODE (barrier) != BARRIER)
@ -712,16 +711,11 @@ merge_blocks_move_predecessor_nojumps (a, b)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
a->index, b->index);
a->sindex, b->sindex);
/* Swap the records for the two blocks around. Although we are deleting B,
A is now where B was and we want to compact the BB array from where
A used to be. */
BASIC_BLOCK (a->index) = b;
BASIC_BLOCK (b->index) = a;
index = a->index;
a->index = b->index;
b->index = index;
/* Swap the records for the two blocks around. */
unlink_block (a);
link_block (a, b->prev_bb);
/* Now blocks A and B are contiguous. Merge them. */
merge_blocks_nomove (a, b);
@ -776,7 +770,7 @@ merge_blocks_move_successor_nojumps (a, b)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
b->index, a->index);
b->sindex, a->sindex);
/* Now blocks A and B are contiguous. Merge them. */
merge_blocks_nomove (a, b);
@ -803,7 +797,7 @@ merge_blocks (e, b, c, mode)
/* If B has a fallthru edge to C, no need to move anything. */
if (e->flags & EDGE_FALLTHRU)
{
int b_index = b->index, c_index = c->index;
int b_index = b->sindex, c_index = c->sindex;
merge_blocks_nomove (b, c);
update_forwarder_flag (b);
@ -1230,7 +1224,7 @@ outgoing_edges_match (mode, bb1, bb2)
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
bb1->index, bb2->index, b1->probability, prob2);
bb1->sindex, bb2->sindex, b1->probability, prob2);
return false;
}
@ -1238,7 +1232,7 @@ outgoing_edges_match (mode, bb1, bb2)
if (rtl_dump_file && match)
fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
bb1->index, bb2->index);
bb1->sindex, bb2->sindex);
return match;
}
@ -1371,14 +1365,14 @@ try_crossjump_to_edge (mode, e1, e2)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
src2->index, nmatch);
src2->sindex, nmatch);
redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
}
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Cross jumping from bb %i to bb %i; %i common insns\n",
src1->index, src2->index, nmatch);
src1->sindex, src2->sindex, nmatch);
redirect_to->count += src1->count;
redirect_to->frequency += src1->frequency;
@ -1539,6 +1533,7 @@ try_crossjump_bb (mode, bb)
for (e2 = bb->pred; e2; e2 = nexte2)
{
basic_block foll;
nexte2 = e2->pred_next;
if (e2 == e)
@ -1552,7 +1547,10 @@ try_crossjump_bb (mode, bb)
checks of crossjump(A,B). In order to prevent redundant
checks of crossjump(B,A), require that A be the block
with the lowest index. */
if (e->src->index > e2->src->index)
for (foll = e->src; foll && foll != e2->src; foll = foll->next_bb)
{
}
if (!foll)
continue;
if (try_crossjump_to_edge (mode, e, e2))
@ -1574,16 +1572,16 @@ static bool
try_optimize_cfg (mode)
int mode;
{
int i;
bool changed_overall = false;
bool changed;
int iterations = 0;
basic_block bb, b;
if (mode & CLEANUP_CROSSJUMP)
add_noreturn_fake_exit_edges ();
for (i = 0; i < n_basic_blocks; i++)
update_forwarder_flag (BASIC_BLOCK (i));
FOR_ALL_BB (bb)
update_forwarder_flag (bb);
if (mode & CLEANUP_UPDATE_LIFE)
clear_bb_flags ();
@ -1603,19 +1601,19 @@ try_optimize_cfg (mode)
"\n\ntry_optimize_cfg iteration %i\n\n",
iterations);
for (i = 0; i < n_basic_blocks;)
for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
{
basic_block c, b = BASIC_BLOCK (i);
basic_block c;
edge s;
bool changed_here = false;
/* Delete trivially dead basic blocks. */
while (b->pred == NULL)
{
c = BASIC_BLOCK (b->index - 1);
c = b->prev_bb;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Deleting block %i.\n",
b->index);
b->sindex);
flow_delete_block (b);
changed = true;
@ -1648,7 +1646,7 @@ try_optimize_cfg (mode)
delete_insn_chain (label, label);
if (rtl_dump_file)
fprintf (rtl_dump_file, "Deleted label in block %i.\n",
b->index);
b->sindex);
}
/* If we fall through an empty block, we can remove it. */
@ -1659,14 +1657,14 @@ try_optimize_cfg (mode)
/* Note that forwarder_block_p true ensures that
there is a successor for this block. */
&& (b->succ->flags & EDGE_FALLTHRU)
&& n_basic_blocks > 1)
&& num_basic_blocks > 1)
{
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Deleting fallthru block %i.\n",
b->index);
b->sindex);
c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
redirect_edge_succ_nodup (b->pred, b->succ->dest);
flow_delete_block (b);
changed = true;
@ -1718,7 +1716,7 @@ try_optimize_cfg (mode)
/* Don't get confused by the index shift caused by
deleting blocks. */
if (!changed_here)
i = b->index + 1;
b = b->next_bb;
else
changed = true;
}
@ -1750,33 +1748,22 @@ try_optimize_cfg (mode)
bool
delete_unreachable_blocks ()
{
int i, j;
bool changed = false;
basic_block b, next_bb;
find_unreachable_blocks ();
/* Delete all unreachable basic blocks. Do compaction concurrently,
as otherwise we can wind up with O(N^2) behaviour here when we
have oodles of dead code. */
/* Delete all unreachable basic blocks. */
for (i = j = 0; i < n_basic_blocks; ++i)
for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
{
basic_block b = BASIC_BLOCK (i);
next_bb = b->next_bb;
if (!(b->flags & BB_REACHABLE))
{
flow_delete_block_noexpunge (b);
expunge_block_nocompact (b);
flow_delete_block (b);
changed = true;
}
else
{
BASIC_BLOCK (j) = b;
b->index = j++;
}
}
n_basic_blocks = j;
basic_block_info->num_elements = j;
if (changed)
tidy_fallthru_edges ();
@ -1801,6 +1788,9 @@ cleanup_cfg (mode)
&& !reload_completed)
delete_trivially_dead_insns (get_insns(), max_reg_num ());
}
compact_blocks ();
while (try_optimize_cfg (mode))
{
delete_unreachable_blocks (), changed = true;

View File

@ -86,8 +86,8 @@ skip_insns_after_block (bb)
rtx insn, last_insn, next_head, prev;
next_head = NULL_RTX;
if (bb->index + 1 != n_basic_blocks)
next_head = BASIC_BLOCK (bb->index + 1)->head;
if (bb->next_bb != EXIT_BLOCK_PTR)
next_head = bb->next_bb->head;
for (last_insn = insn = bb->end; (insn = NEXT_INSN (insn)) != 0; )
{
@ -176,7 +176,7 @@ label_for_bb (bb)
if (GET_CODE (label) != CODE_LABEL)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Emitting label for block %d\n", bb->index);
fprintf (rtl_dump_file, "Emitting label for block %d\n", bb->sindex);
label = block_label (bb);
}
@ -191,11 +191,10 @@ static void
record_effective_endpoints ()
{
rtx next_insn = get_insns ();
int i;
for (i = 0; i < n_basic_blocks; i++)
basic_block bb;
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx end;
if (PREV_INSN (bb->head) && next_insn != bb->head)
@ -357,15 +356,15 @@ scope_to_insns_finalize ()
static void
fixup_reorder_chain ()
{
basic_block bb;
basic_block bb, prev_bb;
int index;
rtx insn = NULL;
/* First do the bulk reordering -- rechain the blocks without regard to
the needed changes to jumps and labels. */
for (bb = BASIC_BLOCK (0), index = 0;
bb != 0;
for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
bb;
bb = RBI (bb)->next, index++)
{
if (RBI (bb)->header)
@ -394,7 +393,7 @@ fixup_reorder_chain ()
}
}
if (index != n_basic_blocks)
if (index != num_basic_blocks)
abort ();
NEXT_INSN (insn) = function_footer;
@ -412,7 +411,7 @@ fixup_reorder_chain ()
/* Now add jumps and labels as needed to match the blocks new
outgoing edges. */
for (bb = BASIC_BLOCK (0); bb ; bb = RBI (bb)->next)
for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = RBI (bb)->next)
{
edge e_fall, e_taken, e;
rtx bb_end_insn;
@ -523,29 +522,39 @@ fixup_reorder_chain ()
}
/* Put basic_block_info in the new order. */
if (rtl_dump_file)
{
fprintf (rtl_dump_file, "Reordered sequence:\n");
for (bb = BASIC_BLOCK (0), index = 0; bb; bb = RBI (bb)->next, index ++)
for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
bb;
bb = RBI (bb)->next, index ++)
{
fprintf (rtl_dump_file, " %i ", index);
if (RBI (bb)->original)
fprintf (rtl_dump_file, "duplicate of %i ",
RBI (bb)->original->index);
RBI (bb)->original->sindex);
else if (forwarder_block_p (bb) && GET_CODE (bb->head) != CODE_LABEL)
fprintf (rtl_dump_file, "compensation ");
else
fprintf (rtl_dump_file, "bb %i ", bb->index);
fprintf (rtl_dump_file, "bb %i ", bb->sindex);
fprintf (rtl_dump_file, " [%i]\n", bb->frequency);
}
}
for (bb = BASIC_BLOCK (0), index = 0; bb; bb = RBI (bb)->next, index ++)
prev_bb = ENTRY_BLOCK_PTR;
bb = ENTRY_BLOCK_PTR->next_bb;
index = 0;
for (; bb; prev_bb = bb, bb = RBI (bb)->next, index++)
{
bb->index = index;
bb->sindex = index;
BASIC_BLOCK (index) = bb;
bb->prev_bb = prev_bb;
prev_bb->next_bb = bb;
}
prev_bb->next_bb = EXIT_BLOCK_PTR;
EXIT_BLOCK_PTR->prev_bb = prev_bb;
}
/* Perform sanity checks on the insn chain.
@ -588,11 +597,10 @@ verify_insn_chain ()
static void
cleanup_unconditional_jumps ()
{
int i;
for (i = 0; i < n_basic_blocks; i++)
{
basic_block bb = BASIC_BLOCK (i);
basic_block bb;
FOR_ALL_BB (bb)
{
if (!bb->succ)
continue;
if (bb->succ->flags & EDGE_FALLTHRU)
@ -600,13 +608,14 @@ cleanup_unconditional_jumps ()
if (!bb->succ->succ_next)
{
rtx insn;
if (GET_CODE (bb->head) != CODE_LABEL && forwarder_block_p (bb) && i)
if (GET_CODE (bb->head) != CODE_LABEL && forwarder_block_p (bb)
&& bb->prev_bb != ENTRY_BLOCK_PTR)
{
basic_block prev = BASIC_BLOCK (--i);
basic_block prev = bb->prev_bb;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Removing forwarder BB %i\n",
bb->index);
bb->sindex);
redirect_edge_succ (bb->pred, bb->succ->dest);
flow_delete_block (bb);
@ -618,7 +627,7 @@ cleanup_unconditional_jumps ()
if (rtl_dump_file)
fprintf (rtl_dump_file, "Removing jump %i in BB %i\n",
INSN_UID (jump), bb->index);
INSN_UID (jump), bb->sindex);
delete_insn (jump);
bb->succ->flags |= EDGE_FALLTHRU;
}
@ -663,7 +672,7 @@ fixup_fallthru_exit_predecessor ()
if (bb && RBI (bb)->next)
{
basic_block c = BASIC_BLOCK (0);
basic_block c = ENTRY_BLOCK_PTR->next_bb;
while (RBI (c)->next != bb)
c = RBI (c)->next;
@ -813,14 +822,14 @@ cfg_layout_redirect_edge (e, dest)
edge e;
basic_block dest;
{
int old_index = dest->index;
basic_block src = e->src;
basic_block old_next_bb = src->next_bb;
/* Redirect_edge_and_branch may decide to turn branch into fallthru edge
in the case the basic block appears to be in sequence. Avoid this
transformation. */
dest->index = n_basic_blocks + 1;
src->next_bb = NULL;
if (e->flags & EDGE_FALLTHRU)
{
/* In case we are redirecting fallthru edge to the branch edge
@ -846,7 +855,7 @@ cfg_layout_redirect_edge (e, dest)
delete_barrier (NEXT_INSN (src->end));
src->succ->flags |= EDGE_FALLTHRU;
}
dest->index = old_index;
src->next_bb = old_next_bb;
}
/* Create an duplicate of the basic block BB and redirect edge E into it. */
@ -871,8 +880,9 @@ cfg_layout_duplicate_bb (bb, e)
#endif
insn = duplicate_insn_chain (bb->head, bb->end);
new_bb = create_basic_block (n_basic_blocks, insn,
insn ? get_last_insn () : NULL);
new_bb = create_basic_block (insn,
insn ? get_last_insn () : NULL,
EXIT_BLOCK_PTR->prev_bb);
alloc_aux_for_block (new_bb, sizeof (struct reorder_block_def));
if (RBI (bb)->header)

View File

@ -50,17 +50,18 @@ flow_loops_cfg_dump (loops, file)
FILE *file;
{
int i;
basic_block bb;
if (! loops->num || ! file || ! loops->cfg.dom)
return;
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
edge succ;
fprintf (file, ";; %d succs { ", i);
for (succ = BASIC_BLOCK (i)->succ; succ; succ = succ->succ_next)
fprintf (file, "%d ", succ->dest->index);
fprintf (file, ";; %d succs { ", bb->sindex);
for (succ = bb->succ; succ; succ = succ->succ_next)
fprintf (file, "%d ", succ->dest->sindex);
flow_nodes_print ("} dom", loops->cfg.dom[i], file);
}
@ -68,7 +69,7 @@ flow_loops_cfg_dump (loops, file)
if (loops->cfg.dfs_order)
{
fputs (";; DFS order: ", file);
for (i = 0; i < n_basic_blocks; i++)
for (i = 0; i < num_basic_blocks; i++)
fprintf (file, "%d ", loops->cfg.dfs_order[i]);
fputs ("\n", file);
@ -78,7 +79,7 @@ flow_loops_cfg_dump (loops, file)
if (loops->cfg.rc_order)
{
fputs (";; RC order: ", file);
for (i = 0; i < n_basic_blocks; i++)
for (i = 0; i < num_basic_blocks; i++)
fprintf (file, "%d ", loops->cfg.rc_order[i]);
fputs ("\n", file);
@ -118,9 +119,9 @@ flow_loop_dump (loop, file, loop_dump_aux, verbose)
loop->shared ? " shared" : "", loop->invalid ? " invalid" : "");
fprintf (file, ";; header %d, latch %d, pre-header %d, first %d, last %d\n",
loop->header->index, loop->latch->index,
loop->pre_header ? loop->pre_header->index : -1,
loop->first->index, loop->last->index);
loop->header->sindex, loop->latch->sindex,
loop->pre_header ? loop->pre_header->sindex : -1,
loop->first->sindex, loop->last->sindex);
fprintf (file, ";; depth %d, level %d, outer %ld\n",
loop->depth, loop->level,
(long) (loop->outer ? loop->outer->num : -1));
@ -185,7 +186,7 @@ flow_loops_dump (loops, file, loop_dump_aux, verbose)
smaller ? oloop : loop);
fprintf (file,
";; loop header %d shared by loops %d, %d %s\n",
loop->header->index, i, j,
loop->header->sindex, i, j,
disjoint ? "disjoint" : "nested");
}
}
@ -259,7 +260,7 @@ flow_loop_entry_edges_find (header, nodes, entry_edges)
{
basic_block src = e->src;
if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->index))
if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->sindex))
num_entries++;
}
@ -273,7 +274,7 @@ flow_loop_entry_edges_find (header, nodes, entry_edges)
{
basic_block src = e->src;
if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->index))
if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->sindex))
(*entry_edges)[num_entries++] = e;
}
@ -305,7 +306,7 @@ flow_loop_exit_edges_find (nodes, exit_edges)
{
basic_block dest = e->dest;
if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->index))
if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->sindex))
num_exits++;
}
});
@ -322,7 +323,7 @@ flow_loop_exit_edges_find (nodes, exit_edges)
{
basic_block dest = e->dest;
if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->index))
if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->sindex))
(*exit_edges)[num_exits++] = e;
}
});
@ -344,19 +345,19 @@ flow_loop_nodes_find (header, latch, nodes)
int sp;
int num_nodes = 0;
stack = (basic_block *) xmalloc (n_basic_blocks * sizeof (basic_block));
stack = (basic_block *) xmalloc (num_basic_blocks * sizeof (basic_block));
sp = 0;
/* Start with only the loop header in the set of loop nodes. */
sbitmap_zero (nodes);
SET_BIT (nodes, header->index);
SET_BIT (nodes, header->sindex);
num_nodes++;
header->loop_depth++;
/* Push the loop latch on to the stack. */
if (! TEST_BIT (nodes, latch->index))
if (! TEST_BIT (nodes, latch->sindex))
{
SET_BIT (nodes, latch->index);
SET_BIT (nodes, latch->sindex);
latch->loop_depth++;
num_nodes++;
stack[sp++] = latch;
@ -375,9 +376,9 @@ flow_loop_nodes_find (header, latch, nodes)
/* If each ancestor not marked as part of loop, add to set of
loop nodes and push on to stack. */
if (ancestor != ENTRY_BLOCK_PTR
&& ! TEST_BIT (nodes, ancestor->index))
&& ! TEST_BIT (nodes, ancestor->sindex))
{
SET_BIT (nodes, ancestor->index);
SET_BIT (nodes, ancestor->sindex);
ancestor->loop_depth++;
num_nodes++;
stack[sp++] = ancestor;
@ -444,7 +445,7 @@ flow_loop_pre_header_find (header, dom)
basic_block node = e->src;
if (node != ENTRY_BLOCK_PTR
&& ! TEST_BIT (dom[node->index], header->index))
&& ! TEST_BIT (dom[node->sindex], header->sindex))
{
if (pre_header == NULL)
pre_header = node;
@ -599,15 +600,15 @@ flow_loop_scan (loops, loop, flags)
/* Determine which loop nodes dominate all the exits
of the loop. */
loop->exits_doms = sbitmap_alloc (n_basic_blocks);
loop->exits_doms = sbitmap_alloc (last_basic_block);
sbitmap_copy (loop->exits_doms, loop->nodes);
for (j = 0; j < loop->num_exits; j++)
sbitmap_a_and_b (loop->exits_doms, loop->exits_doms,
loops->cfg.dom[loop->exit_edges[j]->src->index]);
loops->cfg.dom[loop->exit_edges[j]->src->sindex]);
/* The header of a natural loop must dominate
all exits. */
if (! TEST_BIT (loop->exits_doms, loop->header->index))
if (! TEST_BIT (loop->exits_doms, loop->header->sindex))
abort ();
}
@ -635,14 +636,14 @@ flow_loops_find (loops, flags)
struct loops *loops;
int flags;
{
int i;
int b;
int i, b;
int num_loops;
edge e;
sbitmap headers;
sbitmap *dom;
int *dfs_order;
int *rc_order;
basic_block header;
/* This function cannot be repeatedly called with different
flags to build up the loop information. The loop tree
@ -654,24 +655,21 @@ flow_loops_find (loops, flags)
/* Taking care of this degenerate case makes the rest of
this code simpler. */
if (n_basic_blocks == 0)
if (num_basic_blocks == 0)
return 0;
dfs_order = NULL;
rc_order = NULL;
/* Compute the dominators. */
dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
dom = sbitmap_vector_alloc (last_basic_block, last_basic_block);
calculate_dominance_info (NULL, dom, CDI_DOMINATORS);
/* Count the number of loop edges (back edges). This should be the
same as the number of natural loops. */
num_loops = 0;
for (b = 0; b < n_basic_blocks; b++)
FOR_ALL_BB (header)
{
basic_block header;
header = BASIC_BLOCK (b);
header->loop_depth = 0;
for (e = header->pred; e; e = e->pred_next)
@ -684,10 +682,7 @@ flow_loops_find (loops, flags)
loop. It also has single back edge to the header
from a latch node. Note that multiple natural loops
may share the same header. */
if (b != header->index)
abort ();
if (latch != ENTRY_BLOCK_PTR && TEST_BIT (dom[latch->index], b))
if (latch != ENTRY_BLOCK_PTR && TEST_BIT (dom[latch->sindex], header->sindex))
num_loops++;
}
}
@ -696,8 +691,8 @@ flow_loops_find (loops, flags)
{
/* Compute depth first search order of the CFG so that outer
natural loops will be found before inner natural loops. */
dfs_order = (int *) xmalloc (n_basic_blocks * sizeof (int));
rc_order = (int *) xmalloc (n_basic_blocks * sizeof (int));
dfs_order = (int *) xmalloc (num_basic_blocks * sizeof (int));
rc_order = (int *) xmalloc (num_basic_blocks * sizeof (int));
flow_depth_first_order_compute (dfs_order, rc_order);
/* Save CFG derived information to avoid recomputing it. */
@ -709,16 +704,16 @@ flow_loops_find (loops, flags)
loops->array
= (struct loop *) xcalloc (num_loops, sizeof (struct loop));
headers = sbitmap_alloc (n_basic_blocks);
headers = sbitmap_alloc (last_basic_block);
sbitmap_zero (headers);
loops->shared_headers = sbitmap_alloc (n_basic_blocks);
loops->shared_headers = sbitmap_alloc (last_basic_block);
sbitmap_zero (loops->shared_headers);
/* Find and record information about all the natural loops
in the CFG. */
num_loops = 0;
for (b = n_basic_blocks - 1; b >= 0; b--)
for (b = num_basic_blocks - 1; b >= 0; b--)
{
basic_block latch;
@ -738,7 +733,7 @@ flow_loops_find (loops, flags)
latch node. Note that multiple natural loops may share
the same header. */
if (header != EXIT_BLOCK_PTR
&& TEST_BIT (dom[latch->index], header->index))
&& TEST_BIT (dom[latch->sindex], header->sindex))
{
struct loop *loop;
@ -759,12 +754,12 @@ flow_loops_find (loops, flags)
/* Keep track of blocks that are loop headers so
that we can tell which loops should be merged. */
if (TEST_BIT (headers, loop->header->index))
SET_BIT (loops->shared_headers, loop->header->index);
SET_BIT (headers, loop->header->index);
if (TEST_BIT (headers, loop->header->sindex))
SET_BIT (loops->shared_headers, loop->header->sindex);
SET_BIT (headers, loop->header->sindex);
/* Find nodes contained within the loop. */
loop->nodes = sbitmap_alloc (n_basic_blocks);
loop->nodes = sbitmap_alloc (last_basic_block);
loop->num_nodes
= flow_loop_nodes_find (loop->header, loop->latch, loop->nodes);
@ -785,7 +780,7 @@ flow_loops_find (loops, flags)
loops and should be merged. For now just mark loops that share
headers. */
for (i = 0; i < num_loops; i++)
if (TEST_BIT (loops->shared_headers, loops->array[i].header->index))
if (TEST_BIT (loops->shared_headers, loops->array[i].header->sindex))
loops->array[i].shared = 1;
sbitmap_free (headers);
@ -832,5 +827,5 @@ flow_loop_outside_edge_p (loop, e)
abort ();
return (e->src == ENTRY_BLOCK_PTR)
|| ! TEST_BIT (loop->nodes, e->src->index);
|| ! TEST_BIT (loop->nodes, e->src->sindex);
}

View File

@ -248,12 +248,14 @@ delete_insn_chain_and_edges (first, last)
the note and basic block struct in BB_NOTE, if any and do not grow
BASIC_BLOCK chain and should be used directly only by CFG construction code.
END can be NULL in to create new empty basic block before HEAD. Both END
and HEAD can be NULL to create basic block at the end of INSN chain. */
and HEAD can be NULL to create basic block at the end of INSN chain.
AFTER is the basic block we should be put after. */
basic_block
create_basic_block_structure (index, head, end, bb_note)
create_basic_block_structure (index, head, end, bb_note, after)
int index;
rtx head, end, bb_note;
basic_block after;
{
basic_block bb;
@ -309,8 +311,9 @@ create_basic_block_structure (index, head, end, bb_note)
bb->head = head;
bb->end = end;
bb->index = index;
bb->sindex = index;
bb->flags = BB_NEW;
link_block (bb, after);
BASIC_BLOCK (index) = bb;
if (basic_block_for_insn)
update_bb_for_insn (bb);
@ -323,33 +326,23 @@ create_basic_block_structure (index, head, end, bb_note)
}
/* Create new basic block consisting of instructions in between HEAD and END
and place it to the BB chain at position INDEX. END can be NULL in to
and place it to the BB chain after block AFTER. END can be NULL in to
create new empty basic block before HEAD. Both END and HEAD can be NULL to
create basic block at the end of INSN chain. */
basic_block
create_basic_block (index, head, end)
int index;
create_basic_block (head, end, after)
rtx head, end;
basic_block after;
{
basic_block bb;
int i;
int index = last_basic_block++;
/* Place the new block just after the block being split. */
VARRAY_GROW (basic_block_info, ++n_basic_blocks);
/* Place the new block to the end. */
VARRAY_GROW (basic_block_info, last_basic_block);
/* Some parts of the compiler expect blocks to be number in
sequential order so insert the new block immediately after the
block being split.. */
for (i = n_basic_blocks - 1; i > index; --i)
{
basic_block tmp = BASIC_BLOCK (i - 1);
BASIC_BLOCK (i) = tmp;
tmp->index = i;
}
bb = create_basic_block_structure (index, head, end, NULL);
num_basic_blocks++;
bb = create_basic_block_structure (index, head, end, NULL, after);
bb->aux = NULL;
return bb;
}
@ -431,7 +424,7 @@ flow_delete_block (b)
{
int deleted_handler = flow_delete_block_noexpunge (b);
/* Remove the basic block from the array, and compact behind it. */
/* Remove the basic block from the array. */
expunge_block (b);
return deleted_handler;
@ -444,16 +437,15 @@ void
compute_bb_for_insn (max)
int max;
{
int i;
basic_block bb;
if (basic_block_for_insn)
VARRAY_FREE (basic_block_for_insn);
VARRAY_BB_INIT (basic_block_for_insn, max, "basic_block_for_insn");
for (i = 0; i < n_basic_blocks; ++i)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx end = bb->end;
rtx insn;
@ -537,7 +529,7 @@ split_block (bb, insn)
return 0;
/* Create the new basic block. */
new_bb = create_basic_block (bb->index + 1, NEXT_INSN (insn), bb->end);
new_bb = create_basic_block (NEXT_INSN (insn), bb->end, bb);
new_bb->count = bb->count;
new_bb->frequency = bb->frequency;
new_bb->loop_depth = bb->loop_depth;
@ -772,7 +764,7 @@ try_redirect_by_replacing_jump (e, target)
return false;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Redirecting jump %i from %i to %i.\n",
INSN_UID (insn), e->dest->index, target->index);
INSN_UID (insn), e->dest->sindex, target->sindex);
if (!redirect_jump (insn, block_label (target), 0))
{
if (target == EXIT_BLOCK_PTR)
@ -969,7 +961,7 @@ redirect_edge_and_branch (e, target)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Edge %i->%i redirected to %i\n",
e->src->index, e->dest->index, target->index);
e->src->sindex, e->dest->sindex, target->sindex);
if (e->dest != target)
redirect_edge_succ_nodup (e, target);
@ -998,7 +990,7 @@ force_nonfallthru_and_redirect (e, target)
/* We can't redirect the entry block. Create an empty block at the
start of the function which we use to add the new jump. */
edge *pe1;
basic_block bb = create_basic_block (0, e->dest->head, NULL);
basic_block bb = create_basic_block (e->dest->head, NULL, ENTRY_BLOCK_PTR);
/* Change the existing edge's source to be the new block, and add
a new edge from the entry block to the new block. */
@ -1018,8 +1010,7 @@ force_nonfallthru_and_redirect (e, target)
{
/* Create the new structures. */
note = last_loop_beg_note (e->src->end);
jump_block
= create_basic_block (e->src->index + 1, NEXT_INSN (note), NULL);
jump_block = create_basic_block (NEXT_INSN (note), NULL, e->src);
jump_block->count = e->count;
jump_block->frequency = EDGE_FREQUENCY (e);
jump_block->loop_depth = target->loop_depth;
@ -1164,12 +1155,11 @@ tidy_fallthru_edge (e, b, c)
void
tidy_fallthru_edges ()
{
int i;
basic_block b, c;
for (i = 1; i < n_basic_blocks; i++)
for (b = ENTRY_BLOCK_PTR->next_bb, c = b->next_bb;
c && c != EXIT_BLOCK_PTR; b = c, c = c->next_bb)
{
basic_block b = BASIC_BLOCK (i - 1);
basic_block c = BASIC_BLOCK (i);
edge s;
/* We care about simple conditional or unconditional jumps with
@ -1204,12 +1194,18 @@ back_edge_of_syntactic_loop_p (bb1, bb2)
{
rtx insn;
int count = 0;
basic_block bb;
if (bb1->index > bb2->index)
return false;
else if (bb1->index == bb2->index)
if (bb1 == bb2)
return true;
for (bb = bb1; bb && bb != bb2; bb = bb->next_bb)
{
}
if (!bb)
return false;
for (insn = bb1->end; insn != bb2->head && count >= 0;
insn = NEXT_INSN (insn))
if (GET_CODE (insn) == NOTE)
@ -1286,8 +1282,7 @@ split_edge (edge_in)
else
before = NULL_RTX;
bb = create_basic_block (edge_in->dest == EXIT_BLOCK_PTR ? n_basic_blocks
: edge_in->dest->index, before, NULL);
bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
bb->count = edge_in->count;
bb->frequency = EDGE_FREQUENCY (edge_in);
@ -1458,7 +1453,7 @@ commit_one_edge_insertion (e, watch_calls)
e->flags &= ~EDGE_FALLTHRU;
emit_barrier_after (last);
if (before)
delete_insn (before);
}
@ -1481,8 +1476,8 @@ commit_edge_insertions ()
#endif
i = -1;
bb = ENTRY_BLOCK_PTR;
while (1)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
{
edge e, next;
@ -1492,10 +1487,6 @@ commit_edge_insertions ()
if (e->insns)
commit_one_edge_insertion (e, false);
}
if (++i >= n_basic_blocks)
break;
bb = BASIC_BLOCK (i);
}
}
@ -1513,8 +1504,7 @@ commit_edge_insertions_watch_calls ()
#endif
i = -1;
bb = ENTRY_BLOCK_PTR;
while (1)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
{
edge e, next;
@ -1524,10 +1514,6 @@ commit_edge_insertions_watch_calls ()
if (e->insns)
commit_one_edge_insertion (e, true);
}
if (++i >= n_basic_blocks)
break;
bb = BASIC_BLOCK (i);
}
}
@ -1543,7 +1529,7 @@ dump_bb (bb, outf)
edge e;
fprintf (outf, ";; Basic block %d, loop depth %d, count ",
bb->index, bb->loop_depth);
bb->sindex, bb->loop_depth);
fprintf (outf, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) bb->count);
putc ('\n', outf);
@ -1598,7 +1584,6 @@ print_rtl_with_bb (outf, rtx_first)
fprintf (outf, "(nil)\n");
else
{
int i;
enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
int max_uid = get_max_uid ();
basic_block *start
@ -1607,10 +1592,10 @@ print_rtl_with_bb (outf, rtx_first)
= (basic_block *) xcalloc (max_uid, sizeof (basic_block));
enum bb_state *in_bb_p
= (enum bb_state *) xcalloc (max_uid, sizeof (enum bb_state));
basic_block bb;
for (i = n_basic_blocks - 1; i >= 0; i--)
FOR_ALL_BB_REVERSE (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx x;
start[INSN_UID (bb->head)] = bb;
@ -1631,12 +1616,11 @@ print_rtl_with_bb (outf, rtx_first)
for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
{
int did_output;
basic_block bb;
if ((bb = start[INSN_UID (tmp_rtx)]) != NULL)
{
fprintf (outf, ";; Start of basic block %d, registers live:",
bb->index);
bb->sindex);
dump_regset (bb->global_live_at_start, outf);
putc ('\n', outf);
}
@ -1653,7 +1637,7 @@ print_rtl_with_bb (outf, rtx_first)
if ((bb = end[INSN_UID (tmp_rtx)]) != NULL)
{
fprintf (outf, ";; End of basic block %d, registers live:\n",
bb->index);
bb->sindex);
dump_regset (bb->global_live_at_end, outf);
putc ('\n', outf);
}
@ -1718,16 +1702,37 @@ verify_flow_info ()
basic_block *bb_info, *last_visited;
size_t *edge_checksum;
rtx x;
int i, last_bb_num_seen, num_bb_notes, err = 0;
int num_bb_notes, err = 0;
basic_block bb, last_bb_seen;
bb_info = (basic_block *) xcalloc (max_uid, sizeof (basic_block));
last_visited = (basic_block *) xcalloc (n_basic_blocks + 2,
last_visited = (basic_block *) xcalloc (last_basic_block + 2,
sizeof (basic_block));
edge_checksum = (size_t *) xcalloc (n_basic_blocks + 2, sizeof (size_t));
edge_checksum = (size_t *) xcalloc (last_basic_block + 2, sizeof (size_t));
for (i = n_basic_blocks - 1; i >= 0; i--)
/* Check bb chain & numbers. */
last_bb_seen = ENTRY_BLOCK_PTR;
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
{
if (bb != EXIT_BLOCK_PTR
&& bb != BASIC_BLOCK (bb->sindex))
{
error ("bb %d on wrong place", bb->sindex);
err = 1;
}
if (bb->prev_bb != last_bb_seen)
{
error ("prev_bb of %d should be %d, not %d",
bb->sindex, last_bb_seen->sindex, bb->prev_bb->sindex);
err = 1;
}
last_bb_seen = bb;
}
FOR_ALL_BB_REVERSE (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx head = bb->head;
rtx end = bb->end;
@ -1739,7 +1744,7 @@ verify_flow_info ()
if (!x)
{
error ("end insn %d for block %d not found in the insn stream",
INSN_UID (end), bb->index);
INSN_UID (end), bb->sindex);
err = 1;
}
@ -1753,7 +1758,7 @@ verify_flow_info ()
if (bb_info[INSN_UID (x)] != NULL)
{
error ("insn %d is in multiple basic blocks (%d and %d)",
INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
INSN_UID (x), bb->sindex, bb_info[INSN_UID (x)]->sindex);
err = 1;
}
@ -1765,7 +1770,7 @@ verify_flow_info ()
if (!x)
{
error ("head insn %d for block %d not found in the insn stream",
INSN_UID (head), bb->index);
INSN_UID (head), bb->sindex);
err = 1;
}
@ -1773,9 +1778,8 @@ verify_flow_info ()
}
/* Now check the basic blocks (boundaries etc.) */
for (i = n_basic_blocks - 1; i >= 0; i--)
FOR_ALL_BB_REVERSE (bb)
{
basic_block bb = BASIC_BLOCK (i);
int n_fallthru = 0, n_eh = 0, n_call = 0, n_abnormal = 0, n_branch = 0;
edge e;
rtx note;
@ -1795,37 +1799,37 @@ verify_flow_info ()
if (bb->count < 0)
{
error ("verify_flow_info: Wrong count of block %i %i",
bb->index, (int)bb->count);
bb->sindex, (int)bb->count);
err = 1;
}
if (bb->frequency < 0)
{
error ("verify_flow_info: Wrong frequency of block %i %i",
bb->index, bb->frequency);
bb->sindex, bb->frequency);
err = 1;
}
for (e = bb->succ; e; e = e->succ_next)
{
if (last_visited [e->dest->index + 2] == bb)
if (last_visited [e->dest->sindex + 2] == bb)
{
error ("verify_flow_info: Duplicate edge %i->%i",
e->src->index, e->dest->index);
e->src->sindex, e->dest->sindex);
err = 1;
}
if (e->probability < 0 || e->probability > REG_BR_PROB_BASE)
{
error ("verify_flow_info: Wrong probability of edge %i->%i %i",
e->src->index, e->dest->index, e->probability);
e->src->sindex, e->dest->sindex, e->probability);
err = 1;
}
if (e->count < 0)
{
error ("verify_flow_info: Wrong count of edge %i->%i %i",
e->src->index, e->dest->index, (int)e->count);
e->src->sindex, e->dest->sindex, (int)e->count);
err = 1;
}
last_visited [e->dest->index + 2] = bb;
last_visited [e->dest->sindex + 2] = bb;
if (e->flags & EDGE_FALLTHRU)
n_fallthru++;
@ -1847,11 +1851,11 @@ verify_flow_info ()
{
rtx insn;
if (e->src->index + 1 != e->dest->index)
if (e->src->next_bb != e->dest)
{
error
("verify_flow_info: Incorrect blocks for fallthru %i->%i",
e->src->index, e->dest->index);
e->src->sindex, e->dest->sindex);
err = 1;
}
else
@ -1866,7 +1870,7 @@ verify_flow_info ()
)
{
error ("verify_flow_info: Incorrect fallthru %i->%i",
e->src->index, e->dest->index);
e->src->sindex, e->dest->sindex);
fatal_insn ("wrong insn in the fallthru edge", insn);
err = 1;
}
@ -1875,7 +1879,7 @@ verify_flow_info ()
if (e->src != bb)
{
error ("verify_flow_info: Basic block %d succ edge is corrupted",
bb->index);
bb->sindex);
fprintf (stderr, "Predecessor: ");
dump_edge_info (stderr, e, 0);
fprintf (stderr, "\nSuccessor: ");
@ -1884,13 +1888,13 @@ verify_flow_info ()
err = 1;
}
edge_checksum[e->dest->index + 2] += (size_t) e;
edge_checksum[e->dest->sindex + 2] += (size_t) e;
}
if (n_eh && GET_CODE (PATTERN (bb->end)) != RESX
&& !find_reg_note (bb->end, REG_EH_REGION, NULL_RTX))
{
error ("Missing REG_EH_REGION note in the end of bb %i", bb->index);
error ("Missing REG_EH_REGION note in the end of bb %i", bb->sindex);
err = 1;
}
if (n_branch
@ -1898,28 +1902,28 @@ verify_flow_info ()
|| (n_branch > 1 && (any_uncondjump_p (bb->end)
|| any_condjump_p (bb->end)))))
{
error ("Too many outgoing branch edges from bb %i", bb->index);
error ("Too many outgoing branch edges from bb %i", bb->sindex);
err = 1;
}
if (n_fallthru && any_uncondjump_p (bb->end))
{
error ("Fallthru edge after unconditional jump %i", bb->index);
error ("Fallthru edge after unconditional jump %i", bb->sindex);
err = 1;
}
if (n_branch != 1 && any_uncondjump_p (bb->end))
{
error ("Wrong amount of branch edges after unconditional jump %i", bb->index);
error ("Wrong amount of branch edges after unconditional jump %i", bb->sindex);
err = 1;
}
if (n_branch != 1 && any_condjump_p (bb->end)
&& JUMP_LABEL (bb->end) != BASIC_BLOCK (bb->index + 1)->head)
&& JUMP_LABEL (bb->end) != bb->next_bb->head)
{
error ("Wrong amount of branch edges after conditional jump %i", bb->index);
error ("Wrong amount of branch edges after conditional jump %i", bb->sindex);
err = 1;
}
if (n_call && GET_CODE (bb->end) != CALL_INSN)
{
error ("Call edges for non-call insn in bb %i", bb->index);
error ("Call edges for non-call insn in bb %i", bb->sindex);
err = 1;
}
if (n_abnormal
@ -1928,7 +1932,7 @@ verify_flow_info ()
|| any_condjump_p (bb->end)
|| any_uncondjump_p (bb->end)))
{
error ("Abnormal edges for no purpose in bb %i", bb->index);
error ("Abnormal edges for no purpose in bb %i", bb->sindex);
err = 1;
}
@ -1943,7 +1947,7 @@ verify_flow_info ()
|| (GET_CODE (insn) == NOTE
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK))
{
error ("missing barrier after block %i", bb->index);
error ("missing barrier after block %i", bb->sindex);
err = 1;
break;
}
@ -1953,7 +1957,7 @@ verify_flow_info ()
{
if (e->dest != bb)
{
error ("basic block %d pred edge is corrupted", bb->index);
error ("basic block %d pred edge is corrupted", bb->sindex);
fputs ("Predecessor: ", stderr);
dump_edge_info (stderr, e, 0);
fputs ("\nSuccessor: ", stderr);
@ -1961,7 +1965,7 @@ verify_flow_info ()
fputc ('\n', stderr);
err = 1;
}
edge_checksum[e->dest->index + 2] -= (size_t) e;
edge_checksum[e->dest->sindex + 2] -= (size_t) e;
}
for (x = bb->head; x != NEXT_INSN (bb->end); x = NEXT_INSN (x))
@ -1971,11 +1975,11 @@ verify_flow_info ()
if (! BLOCK_FOR_INSN (x))
error
("insn %d inside basic block %d but block_for_insn is NULL",
INSN_UID (x), bb->index);
INSN_UID (x), bb->sindex);
else
error
("insn %d inside basic block %d but block_for_insn is %i",
INSN_UID (x), bb->index, BLOCK_FOR_INSN (x)->index);
INSN_UID (x), bb->sindex, BLOCK_FOR_INSN (x)->sindex);
err = 1;
}
@ -1989,7 +1993,7 @@ verify_flow_info ()
if (bb->end == x)
{
error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
bb->index);
bb->sindex);
err = 1;
}
@ -1999,7 +2003,7 @@ verify_flow_info ()
if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
{
error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
bb->index);
bb->sindex);
err = 1;
}
@ -2012,7 +2016,7 @@ verify_flow_info ()
if (NOTE_INSN_BASIC_BLOCK_P (x))
{
error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
INSN_UID (x), bb->index);
INSN_UID (x), bb->sindex);
err = 1;
}
@ -2023,7 +2027,7 @@ verify_flow_info ()
|| GET_CODE (x) == CODE_LABEL
|| GET_CODE (x) == BARRIER)
{
error ("in basic block %d:", bb->index);
error ("in basic block %d:", bb->sindex);
fatal_insn ("flow control insn inside a basic block", x);
}
}
@ -2034,32 +2038,33 @@ verify_flow_info ()
edge e;
for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next)
edge_checksum[e->dest->index + 2] += (size_t) e;
edge_checksum[e->dest->sindex + 2] += (size_t) e;
for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next)
edge_checksum[e->dest->index + 2] -= (size_t) e;
edge_checksum[e->dest->sindex + 2] -= (size_t) e;
}
for (i = -2; i < n_basic_blocks; ++i)
if (edge_checksum[i + 2])
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
if (edge_checksum[bb->sindex + 2])
{
error ("basic block %i edge lists are corrupted", i);
error ("basic block %i edge lists are corrupted", bb->sindex);
err = 1;
}
last_bb_num_seen = -1;
num_bb_notes = 0;
last_bb_seen = ENTRY_BLOCK_PTR;
for (x = rtx_first; x; x = NEXT_INSN (x))
{
if (NOTE_INSN_BASIC_BLOCK_P (x))
{
basic_block bb = NOTE_BASIC_BLOCK (x);
bb = NOTE_BASIC_BLOCK (x);
num_bb_notes++;
if (bb->index != last_bb_num_seen + 1)
if (bb != last_bb_seen->next_bb)
internal_error ("basic blocks not numbered consecutively");
last_bb_num_seen = bb->index;
last_bb_seen = bb;
}
if (!bb_info[INSN_UID (x)])
@ -2093,10 +2098,10 @@ verify_flow_info ()
fatal_insn ("return not followed by barrier", x);
}
if (num_bb_notes != n_basic_blocks)
if (num_bb_notes != num_basic_blocks)
internal_error
("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
num_bb_notes, n_basic_blocks);
("number of bb notes in insn chain (%d) != num_basic_blocks (%d)",
num_bb_notes, num_basic_blocks);
if (err)
internal_error ("verify_flow_info failed");
@ -2215,7 +2220,7 @@ purge_dead_edges (bb)
return purged;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Purged edges from bb %i\n", bb->index);
fprintf (rtl_dump_file, "Purged edges from bb %i\n", bb->sindex);
if (!optimize)
return purged;
@ -2274,7 +2279,7 @@ purge_dead_edges (bb)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Purged non-fallthru edges from bb %i\n",
bb->index);
bb->sindex);
return purged;
}
@ -2285,22 +2290,23 @@ bool
purge_all_dead_edges (update_life_p)
int update_life_p;
{
int i, purged = false;
int purged = false;
sbitmap blocks = 0;
basic_block bb;
if (update_life_p)
{
blocks = sbitmap_alloc (n_basic_blocks);
blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
}
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
bool purged_here = purge_dead_edges (BASIC_BLOCK (i));
bool purged_here = purge_dead_edges (bb);
purged |= purged_here;
if (purged_here && update_life_p)
SET_BIT (blocks, i);
SET_BIT (blocks, bb->sindex);
}
if (update_life_p && purged)

View File

@ -192,8 +192,8 @@ static HARD_REG_SET newpat_used_regs;
static rtx added_links_insn;
/* Basic block number of the block in which we are performing combines. */
static int this_basic_block;
/* Basic block which we are performing combines. */
static basic_block this_basic_block;
/* A bitmap indicating which blocks had registers go dead at entry.
After combine, we'll need to re-do global life analysis with
@ -578,7 +578,7 @@ combine_instructions (f, nregs)
setup_incoming_promotions ();
refresh_blocks = sbitmap_alloc (n_basic_blocks);
refresh_blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (refresh_blocks);
need_refresh = 0;
@ -610,139 +610,138 @@ combine_instructions (f, nregs)
/* Now scan all the insns in forward order. */
this_basic_block = -1;
label_tick = 1;
last_call_cuid = 0;
mem_last_set = 0;
init_reg_last_arrays ();
setup_incoming_promotions ();
for (insn = f; insn; insn = next ? next : NEXT_INSN (insn))
FOR_ALL_BB (this_basic_block)
{
next = 0;
/* If INSN starts a new basic block, update our basic block number. */
if (this_basic_block + 1 < n_basic_blocks
&& BLOCK_HEAD (this_basic_block + 1) == insn)
this_basic_block++;
if (GET_CODE (insn) == CODE_LABEL)
label_tick++;
else if (INSN_P (insn))
for (insn = this_basic_block->head;
insn != NEXT_INSN (this_basic_block->end);
insn = next ? next : NEXT_INSN (insn))
{
/* See if we know about function return values before this
insn based upon SUBREG flags. */
check_promoted_subreg (insn, PATTERN (insn));
next = 0;
/* Try this insn with each insn it links back to. */
if (GET_CODE (insn) == CODE_LABEL)
label_tick++;
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
if ((next = try_combine (insn, XEXP (links, 0),
NULL_RTX, &new_direct_jump_p)) != 0)
goto retry;
/* Try each sequence of three linked insns ending with this one. */
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
else if (INSN_P (insn))
{
rtx link = XEXP (links, 0);
/* See if we know about function return values before this
insn based upon SUBREG flags. */
check_promoted_subreg (insn, PATTERN (insn));
/* If the linked insn has been replaced by a note, then there
is no point in pursuing this chain any further. */
if (GET_CODE (link) == NOTE)
continue;
/* Try this insn with each insn it links back to. */
for (nextlinks = LOG_LINKS (link);
nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, link,
XEXP (nextlinks, 0),
&new_direct_jump_p)) != 0)
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
if ((next = try_combine (insn, XEXP (links, 0),
NULL_RTX, &new_direct_jump_p)) != 0)
goto retry;
}
/* Try each sequence of three linked insns ending with this one. */
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
{
rtx link = XEXP (links, 0);
/* If the linked insn has been replaced by a note, then there
is no point in pursuing this chain any further. */
if (GET_CODE (link) == NOTE)
continue;
for (nextlinks = LOG_LINKS (link);
nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, link,
XEXP (nextlinks, 0),
&new_direct_jump_p)) != 0)
goto retry;
}
#ifdef HAVE_cc0
/* Try to combine a jump insn that uses CC0
with a preceding insn that sets CC0, and maybe with its
logical predecessor as well.
This is how we make decrement-and-branch insns.
We need this special code because data flow connections
via CC0 do not get entered in LOG_LINKS. */
/* Try to combine a jump insn that uses CC0
with a preceding insn that sets CC0, and maybe with its
logical predecessor as well.
This is how we make decrement-and-branch insns.
We need this special code because data flow connections
via CC0 do not get entered in LOG_LINKS. */
if (GET_CODE (insn) == JUMP_INSN
&& (prev = prev_nonnote_insn (insn)) != 0
&& GET_CODE (prev) == INSN
&& sets_cc0_p (PATTERN (prev)))
{
if ((next = try_combine (insn, prev,
NULL_RTX, &new_direct_jump_p)) != 0)
goto retry;
if (GET_CODE (insn) == JUMP_INSN
&& (prev = prev_nonnote_insn (insn)) != 0
&& GET_CODE (prev) == INSN
&& sets_cc0_p (PATTERN (prev)))
{
if ((next = try_combine (insn, prev,
NULL_RTX, &new_direct_jump_p)) != 0)
goto retry;
for (nextlinks = LOG_LINKS (prev); nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, prev,
XEXP (nextlinks, 0),
&new_direct_jump_p)) != 0)
for (nextlinks = LOG_LINKS (prev); nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, prev,
XEXP (nextlinks, 0),
&new_direct_jump_p)) != 0)
goto retry;
}
/* Do the same for an insn that explicitly references CC0. */
if (GET_CODE (insn) == INSN
&& (prev = prev_nonnote_insn (insn)) != 0
&& GET_CODE (prev) == INSN
&& sets_cc0_p (PATTERN (prev))
&& GET_CODE (PATTERN (insn)) == SET
&& reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
{
if ((next = try_combine (insn, prev,
NULL_RTX, &new_direct_jump_p)) != 0)
goto retry;
for (nextlinks = LOG_LINKS (prev); nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, prev,
XEXP (nextlinks, 0),
&new_direct_jump_p)) != 0)
goto retry;
}
/* Finally, see if any of the insns that this insn links to
explicitly references CC0. If so, try this insn, that insn,
and its predecessor if it sets CC0. */
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
if (GET_CODE (XEXP (links, 0)) == INSN
&& GET_CODE (PATTERN (XEXP (links, 0))) == SET
&& reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
&& (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
&& GET_CODE (prev) == INSN
&& sets_cc0_p (PATTERN (prev))
&& (next = try_combine (insn, XEXP (links, 0),
prev, &new_direct_jump_p)) != 0)
goto retry;
}
/* Do the same for an insn that explicitly references CC0. */
if (GET_CODE (insn) == INSN
&& (prev = prev_nonnote_insn (insn)) != 0
&& GET_CODE (prev) == INSN
&& sets_cc0_p (PATTERN (prev))
&& GET_CODE (PATTERN (insn)) == SET
&& reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
{
if ((next = try_combine (insn, prev,
NULL_RTX, &new_direct_jump_p)) != 0)
goto retry;
for (nextlinks = LOG_LINKS (prev); nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, prev,
XEXP (nextlinks, 0),
&new_direct_jump_p)) != 0)
goto retry;
}
/* Finally, see if any of the insns that this insn links to
explicitly references CC0. If so, try this insn, that insn,
and its predecessor if it sets CC0. */
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
if (GET_CODE (XEXP (links, 0)) == INSN
&& GET_CODE (PATTERN (XEXP (links, 0))) == SET
&& reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
&& (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
&& GET_CODE (prev) == INSN
&& sets_cc0_p (PATTERN (prev))
&& (next = try_combine (insn, XEXP (links, 0),
prev, &new_direct_jump_p)) != 0)
goto retry;
#endif
/* Try combining an insn with two different insns whose results it
uses. */
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
for (nextlinks = XEXP (links, 1); nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, XEXP (links, 0),
XEXP (nextlinks, 0),
&new_direct_jump_p)) != 0)
goto retry;
/* Try combining an insn with two different insns whose results it
uses. */
for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
for (nextlinks = XEXP (links, 1); nextlinks;
nextlinks = XEXP (nextlinks, 1))
if ((next = try_combine (insn, XEXP (links, 0),
XEXP (nextlinks, 0),
&new_direct_jump_p)) != 0)
goto retry;
if (GET_CODE (insn) != NOTE)
record_dead_and_set_regs (insn);
if (GET_CODE (insn) != NOTE)
record_dead_and_set_regs (insn);
retry:
;
retry:
;
}
}
}
clear_bb_flags ();
EXECUTE_IF_SET_IN_SBITMAP (refresh_blocks, 0, this_basic_block,
BASIC_BLOCK (this_basic_block)->flags |= BB_DIRTY);
EXECUTE_IF_SET_IN_SBITMAP (refresh_blocks, 0, i,
BASIC_BLOCK (i)->flags |= BB_DIRTY);
new_direct_jump_p |= purge_all_dead_edges (0);
delete_noop_moves (f);
@ -860,7 +859,7 @@ set_nonzero_bits_and_sign_copies (x, set, data)
&& REGNO (x) >= FIRST_PSEUDO_REGISTER
/* If this register is undefined at the start of the file, we can't
say what its contents were. */
&& ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start, REGNO (x))
&& ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, REGNO (x))
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
{
if (set == 0 || GET_CODE (set) == CLOBBER)
@ -2388,8 +2387,8 @@ try_combine (i3, i2, i1, new_direct_jump_p)
which we know will be a NOTE. */
for (insn = NEXT_INSN (i3);
insn && (this_basic_block == n_basic_blocks - 1
|| insn != BLOCK_HEAD (this_basic_block + 1));
insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
|| insn != this_basic_block->next_bb->head);
insn = NEXT_INSN (insn))
{
if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
@ -2606,8 +2605,8 @@ try_combine (i3, i2, i1, new_direct_jump_p)
&& ! find_reg_note (i2, REG_UNUSED,
SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
for (temp = NEXT_INSN (i2);
temp && (this_basic_block == n_basic_blocks - 1
|| BLOCK_HEAD (this_basic_block) != temp);
temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
|| this_basic_block->head != temp);
temp = NEXT_INSN (temp))
if (temp != i3 && INSN_P (temp))
for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
@ -8068,7 +8067,7 @@ nonzero_bits (x, mode)
&& (reg_last_set_label[REGNO (x)] == label_tick
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
&& ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start,
&& ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start,
REGNO (x))))
&& INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
return reg_last_set_nonzero_bits[REGNO (x)] & nonzero;
@ -8483,7 +8482,7 @@ num_sign_bit_copies (x, mode)
&& (reg_last_set_label[REGNO (x)] == label_tick
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
&& ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start,
&& ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start,
REGNO (x))))
&& INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
return reg_last_set_sign_bit_copies[REGNO (x)];
@ -11492,7 +11491,7 @@ get_last_value_validate (loc, insn, tick, replace)
|| (! (regno >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (regno) == 1
&& (! REGNO_REG_SET_P
(BASIC_BLOCK (0)->global_live_at_start, regno)))
(ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno)))
&& reg_last_set_label[j] > tick))
{
if (replace)
@ -11566,7 +11565,7 @@ get_last_value (x)
&& (regno < FIRST_PSEUDO_REGISTER
|| REG_N_SETS (regno) != 1
|| (REGNO_REG_SET_P
(BASIC_BLOCK (0)->global_live_at_start, regno)))))
(ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno)))))
return 0;
/* If the value was set in a later insn than the ones we are processing,
@ -11685,7 +11684,7 @@ reg_dead_at_p (reg, insn)
rtx reg;
rtx insn;
{
int block;
basic_block block;
unsigned int i;
/* Set variables for reg_dead_at_p_1. */
@ -11720,19 +11719,19 @@ reg_dead_at_p (reg, insn)
/* Get the basic block number that we were in. */
if (insn == 0)
block = 0;
block = ENTRY_BLOCK_PTR->next_bb;
else
{
for (block = 0; block < n_basic_blocks; block++)
if (insn == BLOCK_HEAD (block))
FOR_ALL_BB (block)
if (insn == block->head)
break;
if (block == n_basic_blocks)
if (block == EXIT_BLOCK_PTR)
return 0;
}
for (i = reg_dead_regno; i < reg_dead_endregno; i++)
if (REGNO_REG_SET_P (BASIC_BLOCK (block)->global_live_at_start, i))
if (REGNO_REG_SET_P (block->global_live_at_start, i))
return 0;
return 1;
@ -12375,7 +12374,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
if (place == 0)
{
basic_block bb = BASIC_BLOCK (this_basic_block);
basic_block bb = this_basic_block;
for (tem = PREV_INSN (i3); place == 0; tem = PREV_INSN (tem))
{
@ -12519,7 +12518,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
&& REGNO_REG_SET_P (bb->global_live_at_start,
REGNO (XEXP (note, 0))))
{
SET_BIT (refresh_blocks, this_basic_block);
SET_BIT (refresh_blocks, this_basic_block->sindex);
need_refresh = 1;
}
}
@ -12539,7 +12538,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
after we remove them in delete_noop_moves. */
if (noop_move_p (place))
{
SET_BIT (refresh_blocks, this_basic_block);
SET_BIT (refresh_blocks, this_basic_block->sindex);
need_refresh = 1;
}
@ -12589,7 +12588,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
i += HARD_REGNO_NREGS (i, reg_raw_mode[i]))
{
rtx piece = gen_rtx_REG (reg_raw_mode[i], i);
basic_block bb = BASIC_BLOCK (this_basic_block);
basic_block bb = this_basic_block;
if (! dead_or_set_p (place, piece)
&& ! reg_bitfield_target_p (piece,
@ -12612,7 +12611,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
if (tem == bb->head)
{
SET_BIT (refresh_blocks,
this_basic_block);
this_basic_block->sindex);
need_refresh = 1;
break;
}
@ -12717,8 +12716,8 @@ distribute_links (links)
since most links don't point very far away. */
for (insn = NEXT_INSN (XEXP (link, 0));
(insn && (this_basic_block == n_basic_blocks - 1
|| BLOCK_HEAD (this_basic_block + 1) != insn));
(insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
|| this_basic_block->next_bb->head != insn));
insn = NEXT_INSN (insn))
if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
{

View File

@ -447,19 +447,18 @@ conflict_graph_compute (regs, p)
regset regs;
partition p;
{
int b;
conflict_graph graph = conflict_graph_new (max_reg_num ());
regset_head live_head;
regset live = &live_head;
regset_head born_head;
regset born = &born_head;
basic_block bb;
INIT_REG_SET (live);
INIT_REG_SET (born);
for (b = n_basic_blocks; --b >= 0; )
FOR_ALL_BB_REVERSE (bb)
{
basic_block bb = BASIC_BLOCK (b);
rtx insn;
rtx head;

244
gcc/df.c
View File

@ -171,12 +171,6 @@ Perhaps there should be a bitmap argument to df_analyse to specify
#include "df.h"
#include "fibheap.h"
#define FOR_ALL_BBS(BB, CODE) \
do { \
int node_; \
for (node_ = 0; node_ < n_basic_blocks; node_++) \
{(BB) = BASIC_BLOCK (node_); CODE;};} while (0)
#define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
do { \
unsigned int node_; \
@ -406,8 +400,8 @@ df_bitmaps_alloc (df, flags)
struct df *df;
int flags;
{
unsigned int i;
int dflags = 0;
basic_block bb;
/* Free the bitmaps if they need resizing. */
if ((flags & DF_LR) && df->n_regs < (unsigned int)max_reg_num ())
@ -423,9 +417,8 @@ df_bitmaps_alloc (df, flags)
df->n_defs = df->def_id;
df->n_uses = df->use_id;
for (i = 0; i < df->n_bbs; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (flags & DF_RD && ! bb_info->rd_in)
@ -474,11 +467,10 @@ df_bitmaps_free (df, flags)
struct df *df ATTRIBUTE_UNUSED;
int flags;
{
unsigned int i;
basic_block bb;
for (i = 0; i < df->n_bbs; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (!bb_info)
@ -534,7 +526,7 @@ df_alloc (df, n_regs)
int n_regs;
{
int n_insns;
int i;
basic_block bb;
gcc_obstack_init (&df_ref_obstack);
@ -555,7 +547,7 @@ df_alloc (df, n_regs)
df->uses = xmalloc (df->use_size * sizeof (*df->uses));
df->n_regs = n_regs;
df->n_bbs = n_basic_blocks;
df->n_bbs = last_basic_block;
/* Allocate temporary working array used during local dataflow analysis. */
df->reg_def_last = xmalloc (df->n_regs * sizeof (struct ref *));
@ -569,11 +561,11 @@ df_alloc (df, n_regs)
df->flags = 0;
df->bbs = xcalloc (df->n_bbs, sizeof (struct bb_info));
df->bbs = xcalloc (last_basic_block, sizeof (struct bb_info));
df->all_blocks = BITMAP_XMALLOC ();
for (i = 0; i < n_basic_blocks; i++)
bitmap_set_bit (df->all_blocks, i);
FOR_ALL_BB (bb)
bitmap_set_bit (df->all_blocks, bb->sindex);
}
@ -1946,8 +1938,10 @@ df_analyse_1 (df, blocks, flags, update)
int aflags;
int dflags;
int i;
basic_block bb;
dflags = 0;
aflags = flags;
aflags = flags;
if (flags & DF_UD_CHAIN)
aflags |= DF_RD | DF_RD_CHAIN;
@ -2009,16 +2003,16 @@ df_analyse_1 (df, blocks, flags, update)
df_reg_use_chain_create (df, blocks);
}
df->dfs_order = xmalloc (sizeof(int) * n_basic_blocks);
df->rc_order = xmalloc (sizeof(int) * n_basic_blocks);
df->rts_order = xmalloc (sizeof(int) * n_basic_blocks);
df->inverse_dfs_map = xmalloc (sizeof(int) * n_basic_blocks);
df->inverse_rc_map = xmalloc (sizeof(int) * n_basic_blocks);
df->inverse_rts_map = xmalloc (sizeof(int) * n_basic_blocks);
df->dfs_order = xmalloc (sizeof(int) * num_basic_blocks);
df->rc_order = xmalloc (sizeof(int) * num_basic_blocks);
df->rts_order = xmalloc (sizeof(int) * num_basic_blocks);
df->inverse_dfs_map = xmalloc (sizeof(int) * last_basic_block);
df->inverse_rc_map = xmalloc (sizeof(int) * last_basic_block);
df->inverse_rts_map = xmalloc (sizeof(int) * last_basic_block);
flow_depth_first_order_compute (df->dfs_order, df->rc_order);
flow_reverse_top_sort_order_compute (df->rts_order);
for (i = 0; i < n_basic_blocks; i ++)
for (i = 0; i < num_basic_blocks; i ++)
{
df->inverse_dfs_map[df->dfs_order[i]] = i;
df->inverse_rc_map[df->rc_order[i]] = i;
@ -2029,17 +2023,16 @@ df_analyse_1 (df, blocks, flags, update)
/* Compute the sets of gens and kills for the defs of each bb. */
df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
{
int i;
bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
bitmap *gen = xmalloc (sizeof (bitmap) * n_basic_blocks);
bitmap *kill = xmalloc (sizeof (bitmap) * n_basic_blocks);
for (i = 0; i < n_basic_blocks; i ++)
bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
FOR_ALL_BB (bb)
{
in[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_in;
out[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_out;
gen[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_gen;
kill[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_kill;
in[bb->sindex] = DF_BB_INFO (df, bb)->rd_in;
out[bb->sindex] = DF_BB_INFO (df, bb)->rd_out;
gen[bb->sindex] = DF_BB_INFO (df, bb)->rd_gen;
kill[bb->sindex] = DF_BB_INFO (df, bb)->rd_kill;
}
iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
FORWARD, UNION, df_rd_transfer_function,
@ -2066,17 +2059,16 @@ df_analyse_1 (df, blocks, flags, update)
uses in each bb. */
df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
{
int i;
bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
bitmap *gen = xmalloc (sizeof (bitmap) * n_basic_blocks);
bitmap *kill = xmalloc (sizeof (bitmap) * n_basic_blocks);
for (i = 0; i < n_basic_blocks; i ++)
bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
FOR_ALL_BB (bb)
{
in[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_in;
out[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_out;
gen[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_gen;
kill[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_kill;
in[bb->sindex] = DF_BB_INFO (df, bb)->ru_in;
out[bb->sindex] = DF_BB_INFO (df, bb)->ru_out;
gen[bb->sindex] = DF_BB_INFO (df, bb)->ru_gen;
kill[bb->sindex] = DF_BB_INFO (df, bb)->ru_kill;
}
iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
BACKWARD, UNION, df_ru_transfer_function,
@ -2106,17 +2098,16 @@ df_analyse_1 (df, blocks, flags, update)
/* Compute the sets of defs and uses of live variables. */
df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
{
int i;
bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
bitmap *use = xmalloc (sizeof (bitmap) * n_basic_blocks);
bitmap *def = xmalloc (sizeof (bitmap) * n_basic_blocks);
for (i = 0; i < n_basic_blocks; i ++)
bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
bitmap *use = xmalloc (sizeof (bitmap) * last_basic_block);
bitmap *def = xmalloc (sizeof (bitmap) * last_basic_block);
FOR_ALL_BB (bb)
{
in[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_in;
out[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_out;
use[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_use;
def[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_def;
in[bb->sindex] = DF_BB_INFO (df, bb)->lr_in;
out[bb->sindex] = DF_BB_INFO (df, bb)->lr_out;
use[bb->sindex] = DF_BB_INFO (df, bb)->lr_use;
def[bb->sindex] = DF_BB_INFO (df, bb)->lr_def;
}
iterative_dataflow_bitmap (in, out, use, def, df->all_blocks,
BACKWARD, UNION, df_lr_transfer_function,
@ -2270,12 +2261,15 @@ df_modified_p (df, blocks)
struct df *df;
bitmap blocks;
{
unsigned int j;
int update = 0;
basic_block bb;
for (j = 0; j < df->n_bbs; j++)
if (bitmap_bit_p (df->bbs_modified, j)
&& (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, j)))
if (!df->n_bbs)
return 0;
FOR_ALL_BB (bb)
if (bitmap_bit_p (df->bbs_modified, bb->sindex)
&& (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, bb->sindex)))
{
update = 1;
break;
@ -2298,7 +2292,7 @@ df_analyse (df, blocks, flags)
/* We could deal with additional basic blocks being created by
rescanning everything again. */
if (df->n_bbs && df->n_bbs != (unsigned int)n_basic_blocks)
if (df->n_bbs && df->n_bbs != (unsigned int) last_basic_block)
abort ();
update = df_modified_p (df, blocks);
@ -2408,10 +2402,8 @@ df_refs_unlink (df, blocks)
}
else
{
FOR_ALL_BBS (bb,
{
FOR_ALL_BB (bb)
df_bb_refs_unlink (df, bb);
});
}
}
#endif
@ -2459,7 +2451,7 @@ df_insn_modify (df, bb, insn)
if (uid >= df->insn_size)
df_insn_table_realloc (df, 0);
bitmap_set_bit (df->bbs_modified, bb->index);
bitmap_set_bit (df->bbs_modified, bb->sindex);
bitmap_set_bit (df->insns_modified, uid);
/* For incremental updating on the fly, perhaps we could make a copy
@ -3274,7 +3266,6 @@ df_dump (df, flags, file)
int flags;
FILE *file;
{
unsigned int i;
unsigned int j;
if (! df || ! file)
@ -3286,22 +3277,23 @@ df_dump (df, flags, file)
if (flags & DF_RD)
{
basic_block bb;
fprintf (file, "Reaching defs:\n");
for (i = 0; i < df->n_bbs; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (! bb_info->rd_in)
continue;
fprintf (file, "bb %d in \t", i);
fprintf (file, "bb %d in \t", bb->sindex);
dump_bitmap (file, bb_info->rd_in);
fprintf (file, "bb %d gen \t", i);
fprintf (file, "bb %d gen \t", bb->sindex);
dump_bitmap (file, bb_info->rd_gen);
fprintf (file, "bb %d kill\t", i);
fprintf (file, "bb %d kill\t", bb->sindex);
dump_bitmap (file, bb_info->rd_kill);
fprintf (file, "bb %d out \t", i);
fprintf (file, "bb %d out \t", bb->sindex);
dump_bitmap (file, bb_info->rd_out);
}
}
@ -3328,22 +3320,23 @@ df_dump (df, flags, file)
if (flags & DF_RU)
{
basic_block bb;
fprintf (file, "Reaching uses:\n");
for (i = 0; i < df->n_bbs; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (! bb_info->ru_in)
continue;
fprintf (file, "bb %d in \t", i);
fprintf (file, "bb %d in \t", bb->sindex);
dump_bitmap (file, bb_info->ru_in);
fprintf (file, "bb %d gen \t", i);
fprintf (file, "bb %d gen \t", bb->sindex);
dump_bitmap (file, bb_info->ru_gen);
fprintf (file, "bb %d kill\t", i);
fprintf (file, "bb %d kill\t", bb->sindex);
dump_bitmap (file, bb_info->ru_kill);
fprintf (file, "bb %d out \t", i);
fprintf (file, "bb %d out \t", bb->sindex);
dump_bitmap (file, bb_info->ru_out);
}
}
@ -3370,22 +3363,23 @@ df_dump (df, flags, file)
if (flags & DF_LR)
{
basic_block bb;
fprintf (file, "Live regs:\n");
for (i = 0; i < df->n_bbs; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (! bb_info->lr_in)
continue;
fprintf (file, "bb %d in \t", i);
fprintf (file, "bb %d in \t", bb->sindex);
dump_bitmap (file, bb_info->lr_in);
fprintf (file, "bb %d use \t", i);
fprintf (file, "bb %d use \t", bb->sindex);
dump_bitmap (file, bb_info->lr_use);
fprintf (file, "bb %d def \t", i);
fprintf (file, "bb %d def \t", bb->sindex);
dump_bitmap (file, bb_info->lr_def);
fprintf (file, "bb %d out \t", i);
fprintf (file, "bb %d out \t", bb->sindex);
dump_bitmap (file, bb_info->lr_out);
}
}
@ -3408,7 +3402,7 @@ df_dump (df, flags, file)
basic_block bb = df_regno_bb (df, j);
if (bb)
fprintf (file, " bb %d", bb->index);
fprintf (file, " bb %d", bb->sindex);
else
fprintf (file, " bb ?");
}
@ -3609,11 +3603,11 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
void *data;
{
int changed;
int i = block->index;
int i = block->sindex;
edge e;
basic_block bb= block;
SET_BIT (visited, block->index);
if (TEST_BIT (pending, block->index))
basic_block bb = block;
SET_BIT (visited, block->sindex);
if (TEST_BIT (pending, block->sindex))
{
if (dir == FORWARD)
{
@ -3626,10 +3620,10 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
switch (conf_op)
{
case UNION:
bitmap_a_or_b (in[i], in[i], out[e->src->index]);
bitmap_a_or_b (in[i], in[i], out[e->src->sindex]);
break;
case INTERSECTION:
bitmap_a_and_b (in[i], in[i], out[e->src->index]);
bitmap_a_and_b (in[i], in[i], out[e->src->sindex]);
break;
}
}
@ -3645,10 +3639,10 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
switch (conf_op)
{
case UNION:
bitmap_a_or_b (out[i], out[i], in[e->dest->index]);
bitmap_a_or_b (out[i], out[i], in[e->dest->sindex]);
break;
case INTERSECTION:
bitmap_a_and_b (out[i], out[i], in[e->dest->index]);
bitmap_a_and_b (out[i], out[i], in[e->dest->sindex]);
break;
}
}
@ -3662,18 +3656,18 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
if (e->dest == EXIT_BLOCK_PTR || e->dest == block)
continue;
SET_BIT (pending, e->dest->index);
SET_BIT (pending, e->dest->sindex);
}
}
else
{
for (e = bb->pred; e != 0; e = e->pred_next)
{
if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
if (e->src == ENTRY_BLOCK_PTR || e->dest == block)
continue;
SET_BIT (pending, e->src->index);
SET_BIT (pending, e->src->sindex);
}
}
}
@ -3682,11 +3676,11 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
if (e->dest == EXIT_BLOCK_PTR || e->dest == block)
continue;
if (!TEST_BIT (visited, e->dest->index))
hybrid_search_bitmap (e->dest, in, out, gen, kill, dir,
conf_op, transfun, visited, pending,
if (!TEST_BIT (visited, e->dest->sindex))
hybrid_search_bitmap (e->dest, in, out, gen, kill, dir,
conf_op, transfun, visited, pending,
data);
}
}
@ -3694,9 +3688,9 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
{
for (e = bb->pred; e != 0; e = e->pred_next)
{
if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
if (e->src == ENTRY_BLOCK_PTR || e->src == block)
continue;
if (!TEST_BIT (visited, e->src->index))
if (!TEST_BIT (visited, e->src->sindex))
hybrid_search_bitmap (e->src, in, out, gen, kill, dir,
conf_op, transfun, visited, pending,
data);
@ -3720,11 +3714,11 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
void *data;
{
int changed;
int i = block->index;
int i = block->sindex;
edge e;
basic_block bb= block;
SET_BIT (visited, block->index);
if (TEST_BIT (pending, block->index))
basic_block bb = block;
SET_BIT (visited, block->sindex);
if (TEST_BIT (pending, block->sindex))
{
if (dir == FORWARD)
{
@ -3737,10 +3731,10 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
switch (conf_op)
{
case UNION:
sbitmap_a_or_b (in[i], in[i], out[e->src->index]);
sbitmap_a_or_b (in[i], in[i], out[e->src->sindex]);
break;
case INTERSECTION:
sbitmap_a_and_b (in[i], in[i], out[e->src->index]);
sbitmap_a_and_b (in[i], in[i], out[e->src->sindex]);
break;
}
}
@ -3756,10 +3750,10 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
switch (conf_op)
{
case UNION:
sbitmap_a_or_b (out[i], out[i], in[e->dest->index]);
sbitmap_a_or_b (out[i], out[i], in[e->dest->sindex]);
break;
case INTERSECTION:
sbitmap_a_and_b (out[i], out[i], in[e->dest->index]);
sbitmap_a_and_b (out[i], out[i], in[e->dest->sindex]);
break;
}
}
@ -3773,18 +3767,18 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
if (e->dest == EXIT_BLOCK_PTR || e->dest == block)
continue;
SET_BIT (pending, e->dest->index);
SET_BIT (pending, e->dest->sindex);
}
}
else
{
for (e = bb->pred; e != 0; e = e->pred_next)
{
if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
if (e->src == ENTRY_BLOCK_PTR || e->dest == block)
continue;
SET_BIT (pending, e->src->index);
SET_BIT (pending, e->src->sindex);
}
}
}
@ -3793,9 +3787,9 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
if (e->dest == EXIT_BLOCK_PTR || e->dest == block)
continue;
if (!TEST_BIT (visited, e->dest->index))
if (!TEST_BIT (visited, e->dest->sindex))
hybrid_search_sbitmap (e->dest, in, out, gen, kill, dir,
conf_op, transfun, visited, pending,
data);
@ -3805,9 +3799,9 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
{
for (e = bb->pred; e != 0; e = e->pred_next)
{
if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
if (e->src == ENTRY_BLOCK_PTR || e->src == block)
continue;
if (!TEST_BIT (visited, e->src->index))
if (!TEST_BIT (visited, e->src->sindex))
hybrid_search_sbitmap (e->src, in, out, gen, kill, dir,
conf_op, transfun, visited, pending,
data);
@ -3853,8 +3847,8 @@ iterative_dataflow_sbitmap (in, out, gen, kill, blocks,
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
pending = sbitmap_alloc (n_basic_blocks);
visited = sbitmap_alloc (n_basic_blocks);
pending = sbitmap_alloc (last_basic_block);
visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
@ -3873,7 +3867,7 @@ iterative_dataflow_sbitmap (in, out, gen, kill, blocks,
{
i = (size_t) fibheap_extract_min (worklist);
bb = BASIC_BLOCK (i);
if (!TEST_BIT (visited, bb->index))
if (!TEST_BIT (visited, bb->sindex))
hybrid_search_sbitmap (bb, in, out, gen, kill, dir,
conf_op, transfun, visited, pending, data);
}
@ -3912,8 +3906,8 @@ iterative_dataflow_bitmap (in, out, gen, kill, blocks,
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
pending = sbitmap_alloc (n_basic_blocks);
visited = sbitmap_alloc (n_basic_blocks);
pending = sbitmap_alloc (last_basic_block);
visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
@ -3932,7 +3926,7 @@ iterative_dataflow_bitmap (in, out, gen, kill, blocks,
{
i = (size_t) fibheap_extract_min (worklist);
bb = BASIC_BLOCK (i);
if (!TEST_BIT (visited, bb->index))
if (!TEST_BIT (visited, bb->sindex))
hybrid_search_bitmap (bb, in, out, gen, kill, dir,
conf_op, transfun, visited, pending, data);
}

View File

@ -158,7 +158,7 @@ struct df_map
};
#define DF_BB_INFO(REFS, BB) (&REFS->bbs[(BB)->index])
#define DF_BB_INFO(REFS, BB) (&REFS->bbs[(BB)->sindex])
/* Macros to access the elements within the ref structure. */
@ -175,7 +175,7 @@ struct df_map
#define DF_REF_LOC(REF) ((REF)->loc)
#endif
#define DF_REF_BB(REF) (BLOCK_FOR_INSN ((REF)->insn))
#define DF_REF_BBNO(REF) (BLOCK_FOR_INSN ((REF)->insn)->index)
#define DF_REF_BBNO(REF) (BLOCK_FOR_INSN ((REF)->insn)->sindex)
#define DF_REF_INSN(REF) ((REF)->insn)
#define DF_REF_INSN_UID(REF) (INSN_UID ((REF)->insn))
#define DF_REF_TYPE(REF) ((REF)->type)

View File

@ -45,7 +45,7 @@
number of the corresponding basic block. Please note, that we include the
artificial ENTRY_BLOCK (or EXIT_BLOCK in the post-dom case) in our lists to
support multiple entry points. As it has no real basic block index we use
'n_basic_blocks' for that. Its dfs number is of course 1. */
'last_basic_block' for that. Its dfs number is of course 1. */
/* Type of Basic Block aka. TBB */
typedef unsigned int TBB;
@ -140,9 +140,9 @@ static void
init_dom_info (di)
struct dom_info *di;
{
/* We need memory for n_basic_blocks nodes and the ENTRY_BLOCK or
/* We need memory for num_basic_blocks nodes and the ENTRY_BLOCK or
EXIT_BLOCK. */
unsigned int num = n_basic_blocks + 1 + 1;
unsigned int num = num_basic_blocks + 2;
init_ar (di->dfs_parent, TBB, num, 0);
init_ar (di->path_min, TBB, num, i);
init_ar (di->key, TBB, num, i);
@ -155,7 +155,7 @@ init_dom_info (di)
init_ar (di->set_size, unsigned int, num, 1);
init_ar (di->set_child, TBB, num, 0);
init_ar (di->dfs_order, TBB, (unsigned int) n_basic_blocks + 1, 0);
init_ar (di->dfs_order, TBB, (unsigned int) last_basic_block + 1, 0);
init_ar (di->dfs_to_bb, basic_block, num, 0);
di->dfsnum = 1;
@ -207,7 +207,7 @@ calc_dfs_tree_nonrec (di, bb, reverse)
/* Ending block. */
basic_block ex_block;
stack = (edge *) xmalloc ((n_basic_blocks + 3) * sizeof (edge));
stack = (edge *) xmalloc ((num_basic_blocks + 3) * sizeof (edge));
sp = 0;
/* Initialize our border blocks, and the first edge. */
@ -244,7 +244,7 @@ calc_dfs_tree_nonrec (di, bb, reverse)
/* If the next node BN is either already visited or a border
block the current edge is useless, and simply overwritten
with the next edge out of the current node. */
if (bn == ex_block || di->dfs_order[bn->index])
if (bn == ex_block || di->dfs_order[bn->sindex])
{
e = e->pred_next;
continue;
@ -255,7 +255,7 @@ calc_dfs_tree_nonrec (di, bb, reverse)
else
{
bn = e->dest;
if (bn == ex_block || di->dfs_order[bn->index])
if (bn == ex_block || di->dfs_order[bn->sindex])
{
e = e->succ_next;
continue;
@ -269,10 +269,10 @@ calc_dfs_tree_nonrec (di, bb, reverse)
/* Fill the DFS tree info calculatable _before_ recursing. */
if (bb != en_block)
my_i = di->dfs_order[bb->index];
my_i = di->dfs_order[bb->sindex];
else
my_i = di->dfs_order[n_basic_blocks];
child_i = di->dfs_order[bn->index] = di->dfsnum++;
my_i = di->dfs_order[last_basic_block];
child_i = di->dfs_order[bn->sindex] = di->dfsnum++;
di->dfs_to_bb[child_i] = bn;
di->dfs_parent[child_i] = my_i;
@ -314,7 +314,7 @@ calc_dfs_tree (di, reverse)
{
/* The first block is the ENTRY_BLOCK (or EXIT_BLOCK if REVERSE). */
basic_block begin = reverse ? EXIT_BLOCK_PTR : ENTRY_BLOCK_PTR;
di->dfs_order[n_basic_blocks] = di->dfsnum;
di->dfs_order[last_basic_block] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = begin;
di->dfsnum++;
@ -326,13 +326,12 @@ calc_dfs_tree (di, reverse)
They are reverse-unreachable. In the dom-case we disallow such
nodes, but in post-dom we have to deal with them, so we simply
include them in the DFS tree which actually becomes a forest. */
int i;
for (i = n_basic_blocks - 1; i >= 0; i--)
basic_block b;
FOR_ALL_BB_REVERSE (b)
{
basic_block b = BASIC_BLOCK (i);
if (di->dfs_order[b->index])
if (di->dfs_order[b->sindex])
continue;
di->dfs_order[b->index] = di->dfsnum;
di->dfs_order[b->sindex] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = b;
di->dfsnum++;
calc_dfs_tree_nonrec (di, b, reverse);
@ -342,7 +341,7 @@ calc_dfs_tree (di, reverse)
di->nodes = di->dfsnum - 1;
/* This aborts e.g. when there is _no_ path from ENTRY to EXIT at all. */
if (di->nodes != (unsigned int) n_basic_blocks + 1)
if (di->nodes != (unsigned int) num_basic_blocks + 1)
abort ();
}
@ -494,9 +493,9 @@ calc_idoms (di, reverse)
e_next = e->pred_next;
}
if (b == en_block)
k1 = di->dfs_order[n_basic_blocks];
k1 = di->dfs_order[last_basic_block];
else
k1 = di->dfs_order[b->index];
k1 = di->dfs_order[b->sindex];
/* Call eval() only if really needed. If k1 is above V in DFS tree,
then we know, that eval(k1) == k1 and key[k1] == k1. */
@ -542,20 +541,20 @@ idoms_to_doms (di, dominators)
{
TBB i, e_index;
int bb, bb_idom;
sbitmap_vector_zero (dominators, n_basic_blocks);
sbitmap_vector_zero (dominators, last_basic_block);
/* We have to be careful, to not include the ENTRY_BLOCK or EXIT_BLOCK
in the list of (post)-doms, so remember that in e_index. */
e_index = di->dfs_order[n_basic_blocks];
e_index = di->dfs_order[last_basic_block];
for (i = 1; i <= di->nodes; i++)
{
if (i == e_index)
continue;
bb = di->dfs_to_bb[i]->index;
bb = di->dfs_to_bb[i]->sindex;
if (di->dom[i] && (di->dom[i] != e_index))
{
bb_idom = di->dfs_to_bb[di->dom[i]]->index;
bb_idom = di->dfs_to_bb[di->dom[i]]->sindex;
sbitmap_copy (dominators[bb], dominators[bb_idom]);
}
else
@ -577,8 +576,8 @@ idoms_to_doms (di, dominators)
}
/* The main entry point into this module. IDOM is an integer array with room
for n_basic_blocks integers, DOMS is a preallocated sbitmap array having
room for n_basic_blocks^2 bits, and POST is true if the caller wants to
for last_basic_block integers, DOMS is a preallocated sbitmap array having
room for last_basic_block^2 bits, and POST is true if the caller wants to
know post-dominators.
On return IDOM[i] will be the BB->index of the immediate (post) dominator
@ -604,17 +603,17 @@ calculate_dominance_info (idom, doms, reverse)
if (idom)
{
int i;
for (i = 0; i < n_basic_blocks; i++)
basic_block b;
FOR_ALL_BB (b)
{
basic_block b = BASIC_BLOCK (i);
TBB d = di.dom[di.dfs_order[b->index]];
TBB d = di.dom[di.dfs_order[b->sindex]];
/* The old code didn't modify array elements of nodes having only
itself as dominator (d==0) or only ENTRY_BLOCK (resp. EXIT_BLOCK)
(d==1). */
if (d > 1)
idom[i] = di.dfs_to_bb[d]->index;
idom[b->sindex] = di.dfs_to_bb[d]->sindex;
}
}
if (doms)

View File

@ -928,8 +928,8 @@ insn_current_reference_address (branch)
void
compute_alignments ()
{
int i;
int log, max_skip, max_log;
basic_block bb;
if (label_align)
{
@ -946,9 +946,8 @@ compute_alignments ()
if (! optimize || optimize_size)
return;
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx label = bb->head;
int fallthru_frequency = 0, branch_frequency = 0, has_fallthru = 0;
edge e;
@ -978,8 +977,8 @@ compute_alignments ()
if (!has_fallthru
&& (branch_frequency > BB_FREQ_MAX / 10
|| (bb->frequency > BASIC_BLOCK (i - 1)->frequency * 10
&& (BASIC_BLOCK (i - 1)->frequency
|| (bb->frequency > bb->prev_bb->frequency * 10
&& (bb->prev_bb->frequency
<= ENTRY_BLOCK_PTR->frequency / 2))))
{
log = JUMP_ALIGN (label);
@ -2019,7 +2018,7 @@ final_scan_insn (insn, file, optimize, prescan, nopeepholes)
#endif
if (flag_debug_asm)
fprintf (asm_out_file, "\t%s basic block %d\n",
ASM_COMMENT_START, NOTE_BASIC_BLOCK (insn)->index);
ASM_COMMENT_START, NOTE_BASIC_BLOCK (insn)->sindex);
break;
case NOTE_INSN_EH_REGION_BEG:

View File

@ -575,7 +575,7 @@ verify_local_live_at_start (new_live_at_start, bb)
{
fprintf (rtl_dump_file,
"live_at_start mismatch in bb %d, aborting\nNew:\n",
bb->index);
bb->sindex);
debug_bitmap_file (rtl_dump_file, new_live_at_start);
fputs ("Old:\n", rtl_dump_file);
dump_bb (bb, rtl_dump_file);
@ -656,6 +656,7 @@ update_life_info (blocks, extent, prop_flags)
for ( ; ; )
{
int changed = 0;
basic_block bb;
calculate_global_regs_live (blocks, blocks,
prop_flags & (PROP_SCAN_DEAD_CODE
@ -667,9 +668,8 @@ update_life_info (blocks, extent, prop_flags)
/* Removing dead code may allow the CFG to be simplified which
in turn may allow for further dead code detection / removal. */
for (i = n_basic_blocks - 1; i >= 0; --i)
FOR_ALL_BB_REVERSE (bb)
{
basic_block bb = BASIC_BLOCK (i);
COPY_REG_SET (tmp, bb->global_live_at_end);
changed |= propagate_block (bb, tmp, NULL, NULL,
@ -718,10 +718,10 @@ update_life_info (blocks, extent, prop_flags)
}
else
{
for (i = n_basic_blocks - 1; i >= 0; --i)
{
basic_block bb = BASIC_BLOCK (i);
basic_block bb;
FOR_ALL_BB_REVERSE (bb)
{
COPY_REG_SET (tmp, bb->global_live_at_end);
propagate_block (bb, tmp, NULL, NULL, stabilized_prop_flags);
@ -775,16 +775,16 @@ update_life_info_in_dirty_blocks (extent, prop_flags)
enum update_life_extent extent;
int prop_flags;
{
sbitmap update_life_blocks = sbitmap_alloc (n_basic_blocks);
int block_num;
sbitmap update_life_blocks = sbitmap_alloc (last_basic_block);
int n = 0;
basic_block bb;
int retval = 0;
sbitmap_zero (update_life_blocks);
for (block_num = 0; block_num < n_basic_blocks; block_num++)
if (BASIC_BLOCK (block_num)->flags & BB_DIRTY)
FOR_ALL_BB (bb)
if (bb->flags & BB_DIRTY)
{
SET_BIT (update_life_blocks, block_num);
SET_BIT (update_life_blocks, bb->sindex);
n++;
}
@ -810,7 +810,8 @@ free_basic_block_vars (keep_head_end_p)
clear_edges ();
VARRAY_FREE (basic_block_info);
}
n_basic_blocks = 0;
num_basic_blocks = 0;
last_basic_block = 0;
ENTRY_BLOCK_PTR->aux = NULL;
ENTRY_BLOCK_PTR->global_live_at_end = NULL;
@ -825,14 +826,12 @@ int
delete_noop_moves (f)
rtx f ATTRIBUTE_UNUSED;
{
int i;
rtx insn, next;
basic_block bb;
int nnoops = 0;
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
bb = BASIC_BLOCK (i);
for (insn = bb->head; insn != NEXT_INSN (bb->end); insn = next)
{
next = NEXT_INSN (insn);
@ -1079,7 +1078,7 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
sbitmap blocks_in, blocks_out;
int flags;
{
basic_block *queue, *qhead, *qtail, *qend;
basic_block *queue, *qhead, *qtail, *qend, bb;
regset tmp, new_live_at_end, call_used;
regset_head tmp_head, call_used_head;
regset_head new_live_at_end_head;
@ -1088,10 +1087,8 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
/* Some passes used to forget clear aux field of basic block causing
sick behaviour here. */
#ifdef ENABLE_CHECKING
if (ENTRY_BLOCK_PTR->aux || EXIT_BLOCK_PTR->aux)
abort ();
for (i = 0; i < n_basic_blocks; i++)
if (BASIC_BLOCK (i)->aux)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
if (bb->aux)
abort ();
#endif
@ -1107,9 +1104,9 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
/* Create a worklist. Allocate an extra slot for ENTRY_BLOCK, and one
because the `head == tail' style test for an empty queue doesn't
work with a full queue. */
queue = (basic_block *) xmalloc ((n_basic_blocks + 2) * sizeof (*queue));
queue = (basic_block *) xmalloc ((num_basic_blocks + 2) * sizeof (*queue));
qtail = queue;
qhead = qend = queue + n_basic_blocks + 2;
qhead = qend = queue + num_basic_blocks + 2;
/* Queue the blocks set in the initial mask. Do this in reverse block
number order so that we are more likely for the first round to do
@ -1117,21 +1114,20 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
if (blocks_in)
{
/* Clear out the garbage that might be hanging out in bb->aux. */
for (i = n_basic_blocks - 1; i >= 0; --i)
BASIC_BLOCK (i)->aux = NULL;
FOR_ALL_BB (bb)
bb->aux = NULL;
EXECUTE_IF_SET_IN_SBITMAP (blocks_in, 0, i,
{
basic_block bb = BASIC_BLOCK (i);
bb = BASIC_BLOCK (i);
*--qhead = bb;
bb->aux = bb;
});
}
else
{
for (i = 0; i < n_basic_blocks; ++i)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
*--qhead = bb;
bb->aux = bb;
}
@ -1307,7 +1303,7 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
/* Let our caller know that BB changed enough to require its
death notes updated. */
if (blocks_out)
SET_BIT (blocks_out, bb->index);
SET_BIT (blocks_out, bb->sindex);
if (! rescan)
{
@ -1363,16 +1359,15 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
{
EXECUTE_IF_SET_IN_SBITMAP (blocks_out, 0, i,
{
basic_block bb = BASIC_BLOCK (i);
bb = BASIC_BLOCK (i);
FREE_REG_SET (bb->local_set);
FREE_REG_SET (bb->cond_local_set);
});
}
else
{
for (i = n_basic_blocks - 1; i >= 0; --i)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
FREE_REG_SET (bb->local_set);
FREE_REG_SET (bb->cond_local_set);
}
@ -1498,12 +1493,10 @@ initialize_uninitialized_subregs ()
void
allocate_bb_life_data ()
{
int i;
basic_block bb;
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
}
@ -2342,14 +2335,14 @@ int
regno_uninitialized (regno)
unsigned int regno;
{
if (n_basic_blocks == 0
if (num_basic_blocks == 0
|| (regno < FIRST_PSEUDO_REGISTER
&& (global_regs[regno]
|| fixed_regs[regno]
|| FUNCTION_ARG_REGNO_P (regno))))
return 0;
return REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start, regno);
return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno);
}
/* 1 if register REGNO was alive at a place where `setjmp' was called
@ -2360,11 +2353,11 @@ int
regno_clobbered_at_setjmp (regno)
int regno;
{
if (n_basic_blocks == 0)
if (num_basic_blocks == 0)
return 0;
return ((REG_N_SETS (regno) > 1
|| REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start, regno))
|| REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno))
&& REGNO_REG_SET_P (regs_live_at_setjmp, regno));
}
@ -2719,7 +2712,7 @@ mark_set_1 (pbi, code, reg, cond, insn, flags)
| PROP_DEATH_NOTES | PROP_AUTOINC))
{
rtx y;
int blocknum = pbi->bb->index;
int blocknum = pbi->bb->sindex;
y = NULL_RTX;
if (flags & (PROP_LOG_LINKS | PROP_AUTOINC))
@ -3576,7 +3569,7 @@ mark_used_reg (pbi, reg, cond, insn)
{
/* Keep track of which basic block each reg appears in. */
int blocknum = pbi->bb->index;
int blocknum = pbi->bb->sindex;
if (REG_BASIC_BLOCK (regno_first) == REG_BLOCK_UNKNOWN)
REG_BASIC_BLOCK (regno_first) = blocknum;
else if (REG_BASIC_BLOCK (regno_first) != blocknum)
@ -4246,18 +4239,16 @@ count_or_remove_death_notes (blocks, kill)
sbitmap blocks;
int kill;
{
int i, count = 0;
int count = 0;
basic_block bb;
for (i = n_basic_blocks - 1; i >= 0; --i)
FOR_ALL_BB_REVERSE (bb)
{
basic_block bb;
rtx insn;
if (blocks && ! TEST_BIT (blocks, i))
if (blocks && ! TEST_BIT (blocks, bb->sindex))
continue;
bb = BASIC_BLOCK (i);
for (insn = bb->head;; insn = NEXT_INSN (insn))
{
if (INSN_P (insn))

View File

@ -7817,7 +7817,7 @@ epilogue_done:
}
/* Find the last line number note in the first block. */
for (insn = BASIC_BLOCK (0)->end;
for (insn = ENTRY_BLOCK_PTR->next_bb->end;
insn != prologue_end && insn;
insn = PREV_INSN (insn))
if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)

File diff suppressed because it is too large Load Diff

View File

@ -583,7 +583,7 @@ global_alloc (file)
#if 0 /* We need to eliminate regs even if there is no rtl code,
for the sake of debugging information. */
if (n_basic_blocks > 0)
if (num_basic_blocks > 0)
#endif
{
build_insn_chain (get_insns ());
@ -636,7 +636,8 @@ allocno_compare (v1p, v2p)
static void
global_conflicts ()
{
int b, i;
int i;
basic_block b;
rtx insn;
int *block_start_allocnos;
@ -645,7 +646,7 @@ global_conflicts ()
block_start_allocnos = (int *) xmalloc (max_allocno * sizeof (int));
for (b = 0; b < n_basic_blocks; b++)
FOR_ALL_BB (b)
{
memset ((char *) allocnos_live, 0, allocno_row_words * sizeof (INT_TYPE));
@ -664,7 +665,7 @@ global_conflicts ()
are explicitly marked in basic_block_live_at_start. */
{
regset old = BASIC_BLOCK (b)->global_live_at_start;
regset old = b->global_live_at_start;
int ax = 0;
REG_SET_TO_HARD_REG_SET (hard_regs_live, old);
@ -713,7 +714,7 @@ global_conflicts ()
that is reached by an abnormal edge. */
edge e;
for (e = BASIC_BLOCK (b)->pred; e ; e = e->pred_next)
for (e = b->pred; e ; e = e->pred_next)
if (e->flags & EDGE_ABNORMAL)
break;
if (e != NULL)
@ -723,7 +724,7 @@ global_conflicts ()
#endif
}
insn = BLOCK_HEAD (b);
insn = b->head;
/* Scan the code of this basic block, noting which allocnos
and hard regs are born or die. When one is born,
@ -823,7 +824,7 @@ global_conflicts ()
}
}
if (insn == BLOCK_END (b))
if (insn == b->end)
break;
insn = NEXT_INSN (insn);
}
@ -1708,11 +1709,11 @@ void
mark_elimination (from, to)
int from, to;
{
int i;
basic_block bb;
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
regset r = BASIC_BLOCK (i)->global_live_at_start;
regset r = bb->global_live_at_start;
if (REGNO_REG_SET_P (r, from))
{
CLEAR_REGNO_REG_SET (r, from);
@ -1794,7 +1795,7 @@ build_insn_chain (first)
{
struct insn_chain **p = &reload_insn_chain;
struct insn_chain *prev = 0;
int b = 0;
basic_block b = ENTRY_BLOCK_PTR->next_bb;
regset_head live_relevant_regs_head;
live_relevant_regs = INITIALIZE_REG_SET (live_relevant_regs_head);
@ -1803,14 +1804,14 @@ build_insn_chain (first)
{
struct insn_chain *c;
if (first == BLOCK_HEAD (b))
if (first == b->head)
{
int i;
CLEAR_REG_SET (live_relevant_regs);
EXECUTE_IF_SET_IN_BITMAP
(BASIC_BLOCK (b)->global_live_at_start, 0, i,
(b->global_live_at_start, 0, i,
{
if (i < FIRST_PSEUDO_REGISTER
? ! TEST_HARD_REG_BIT (eliminable_regset, i)
@ -1827,7 +1828,7 @@ build_insn_chain (first)
*p = c;
p = &c->next;
c->insn = first;
c->block = b;
c->block = b->sindex;
if (INSN_P (first))
{
@ -1865,8 +1866,8 @@ build_insn_chain (first)
}
}
if (first == BLOCK_END (b))
b++;
if (first == b->end)
b = b->next_bb;
/* Stop after we pass the end of the last basic block. Verify that
no real insns are after the end of the last basic block.
@ -1874,7 +1875,7 @@ build_insn_chain (first)
We may want to reorganize the loop somewhat since this test should
always be the right exit test. Allow an ADDR_VEC or ADDR_DIF_VEC if
the previous real insn is a JUMP_INSN. */
if (b == n_basic_blocks)
if (b == EXIT_BLOCK_PTR)
{
for (first = NEXT_INSN (first) ; first; first = NEXT_INSN (first))
if (INSN_P (first)

View File

@ -258,7 +258,6 @@ print_rtl_graph_with_bb (base, suffix, rtx_first)
fprintf (fp, "(nil)\n");
else
{
int i;
enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
int max_uid = get_max_uid ();
int *start = (int *) xmalloc (max_uid * sizeof (int));
@ -266,19 +265,19 @@ print_rtl_graph_with_bb (base, suffix, rtx_first)
enum bb_state *in_bb_p = (enum bb_state *)
xmalloc (max_uid * sizeof (enum bb_state));
basic_block bb;
int j;
for (i = 0; i < max_uid; ++i)
for (j = 0; j < max_uid; ++j)
{
start[i] = end[i] = -1;
in_bb_p[i] = NOT_IN_BB;
start[j] = end[j] = -1;
in_bb_p[j] = NOT_IN_BB;
}
for (i = n_basic_blocks - 1; i >= 0; --i)
FOR_ALL_BB_REVERSE (bb)
{
rtx x;
bb = BASIC_BLOCK (i);
start[INSN_UID (bb->head)] = i;
end[INSN_UID (bb->end)] = i;
start[INSN_UID (bb->head)] = bb->sindex;
end[INSN_UID (bb->end)] = bb->sindex;
for (x = bb->head; x != NULL_RTX; x = NEXT_INSN (x))
{
in_bb_p[INSN_UID (x)]
@ -310,12 +309,12 @@ print_rtl_graph_with_bb (base, suffix, rtx_first)
continue;
}
if ((i = start[INSN_UID (tmp_rtx)]) >= 0)
if ((j = start[INSN_UID (tmp_rtx)]) >= 0)
{
/* We start a subgraph for each basic block. */
start_bb (fp, i);
start_bb (fp, j);
if (i == 0)
if (j == 0)
draw_edge (fp, 0, INSN_UID (tmp_rtx), 1, 0);
}
@ -323,11 +322,11 @@ print_rtl_graph_with_bb (base, suffix, rtx_first)
node_data (fp, tmp_rtx);
next_insn = next_nonnote_insn (tmp_rtx);
if ((i = end[INSN_UID (tmp_rtx)]) >= 0)
if ((j = end[INSN_UID (tmp_rtx)]) >= 0)
{
edge e;
bb = BASIC_BLOCK (i);
bb = BASIC_BLOCK (j);
/* End of the basic block. */
end_bb (fp);

View File

@ -2303,7 +2303,8 @@ void
sched_init (dump_file)
FILE *dump_file;
{
int luid, b;
int luid;
basic_block b;
rtx insn;
int i;
@ -2356,8 +2357,8 @@ sched_init (dump_file)
h_i_d[0].luid = 0;
luid = 1;
for (b = 0; b < n_basic_blocks; b++)
for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
FOR_ALL_BB (b)
for (insn = b->head;; insn = NEXT_INSN (insn))
{
INSN_LUID (insn) = luid;
@ -2369,7 +2370,7 @@ sched_init (dump_file)
if (GET_CODE (insn) != NOTE)
++luid;
if (insn == BLOCK_END (b))
if (insn == b->end)
break;
}
@ -2383,7 +2384,7 @@ sched_init (dump_file)
{
rtx line;
line_note_head = (rtx *) xcalloc (n_basic_blocks, sizeof (rtx));
line_note_head = (rtx *) xcalloc (last_basic_block, sizeof (rtx));
/* Save-line-note-head:
Determine the line-number at the start of each basic block.
@ -2391,22 +2392,22 @@ sched_init (dump_file)
predecessor has been scheduled, it is impossible to accurately
determine the correct line number for the first insn of the block. */
for (b = 0; b < n_basic_blocks; b++)
FOR_ALL_BB (b)
{
for (line = BLOCK_HEAD (b); line; line = PREV_INSN (line))
for (line = b->head; line; line = PREV_INSN (line))
if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
{
line_note_head[b] = line;
line_note_head[b->sindex] = line;
break;
}
/* Do a forward search as well, since we won't get to see the first
notes in a basic block. */
for (line = BLOCK_HEAD (b); line; line = NEXT_INSN (line))
for (line = b->head; line; line = NEXT_INSN (line))
{
if (INSN_P (line))
break;
if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
line_note_head[b] = line;
line_note_head[b->sindex] = line;
}
}
}
@ -2420,22 +2421,22 @@ sched_init (dump_file)
/* ??? Add a NOTE after the last insn of the last basic block. It is not
known why this is done. */
insn = BLOCK_END (n_basic_blocks - 1);
insn = EXIT_BLOCK_PTR->prev_bb->end;
if (NEXT_INSN (insn) == 0
|| (GET_CODE (insn) != NOTE
&& GET_CODE (insn) != CODE_LABEL
/* Don't emit a NOTE if it would end up before a BARRIER. */
&& GET_CODE (NEXT_INSN (insn)) != BARRIER))
{
emit_note_after (NOTE_INSN_DELETED, BLOCK_END (n_basic_blocks - 1));
emit_note_after (NOTE_INSN_DELETED, EXIT_BLOCK_PTR->prev_bb->end);
/* Make insn to appear outside BB. */
BLOCK_END (n_basic_blocks - 1) = PREV_INSN (BLOCK_END (n_basic_blocks - 1));
EXIT_BLOCK_PTR->prev_bb->end = PREV_INSN (EXIT_BLOCK_PTR->prev_bb->end);
}
/* Compute INSN_REG_WEIGHT for all blocks. We must do this before
removing death notes. */
for (b = n_basic_blocks - 1; b >= 0; b--)
find_insn_reg_weight (b);
FOR_ALL_BB_REVERSE (b)
find_insn_reg_weight (b->sindex);
}
/* Free global data used during insn scheduling. */

View File

@ -110,14 +110,6 @@ static int find_memory PARAMS ((rtx *, void *));
static int dead_or_predicable PARAMS ((basic_block, basic_block,
basic_block, basic_block, int));
static void noce_emit_move_insn PARAMS ((rtx, rtx));
/* Abuse the basic_block AUX field to store the original block index,
as well as a flag indicating that the block should be rescaned for
life analysis. */
#define SET_ORIG_INDEX(BB,I) ((BB)->aux = (void *)((size_t)(I)))
#define ORIG_INDEX(BB) ((size_t)(BB)->aux)
/* Count the number of non-jump active insns in BB. */
@ -1973,7 +1965,7 @@ find_if_block (test_bb, then_edge, else_edge)
basic_block join_bb = NULL_BLOCK;
edge then_succ = then_bb->succ;
edge else_succ = else_bb->succ;
int next_index;
basic_block next;
/* The THEN block of an IF-THEN combo must have exactly one predecessor. */
if (then_bb->pred->pred_next != NULL_EDGE)
@ -2043,12 +2035,12 @@ find_if_block (test_bb, then_edge, else_edge)
if (else_bb)
fprintf (rtl_dump_file,
"\nIF-THEN-ELSE block found, start %d, then %d, else %d, join %d\n",
test_bb->index, then_bb->index, else_bb->index,
join_bb->index);
test_bb->sindex, then_bb->sindex, else_bb->sindex,
join_bb->sindex);
else
fprintf (rtl_dump_file,
"\nIF-THEN block found, start %d, then %d, join %d\n",
test_bb->index, then_bb->index, join_bb->index);
test_bb->sindex, then_bb->sindex, join_bb->sindex);
}
/* Make sure IF, THEN, and ELSE, blocks are adjacent. Actually, we
@ -2057,10 +2049,10 @@ find_if_block (test_bb, then_edge, else_edge)
/* ??? As an enhancement, move the ELSE block. Have to deal with
BLOCK notes, if by no other means than aborting the merge if they
exist. Sticky enough I don't want to think about it now. */
next_index = then_bb->index;
if (else_bb && ++next_index != else_bb->index)
next = then_bb;
if (else_bb && (next = next->next_bb) != else_bb)
return FALSE;
if (++next_index != join_bb->index && join_bb->index != EXIT_BLOCK)
if ((next = next->next_bb) != join_bb && join_bb != EXIT_BLOCK_PTR)
{
if (else_bb)
join_bb = NULL;
@ -2100,7 +2092,7 @@ find_cond_trap (test_bb, then_edge, else_edge)
if (rtl_dump_file)
{
fprintf (rtl_dump_file, "\nTRAP-IF block found, start %d, trap %d\n",
test_bb->index, trap_bb->index);
test_bb->sindex, trap_bb->sindex);
}
/* If this is not a standard conditional jump, we can't parse it. */
@ -2146,7 +2138,7 @@ find_cond_trap (test_bb, then_edge, else_edge)
/* If the non-trap block and the test are now adjacent, merge them.
Otherwise we must insert a direct branch. */
if (test_bb->index + 1 == other_bb->index)
if (test_bb->next_bb == other_bb)
{
delete_insn (jump);
merge_if_block (test_bb, NULL, NULL, other_bb);
@ -2300,7 +2292,7 @@ find_if_case_1 (test_bb, then_edge, else_edge)
if (rtl_dump_file)
fprintf (rtl_dump_file,
"\nIF-CASE-1 found, start %d, then %d\n",
test_bb->index, then_bb->index);
test_bb->sindex, then_bb->sindex);
/* THEN is small. */
if (count_bb_insns (then_bb) > BRANCH_COST)
@ -2321,8 +2313,6 @@ find_if_case_1 (test_bb, then_edge, else_edge)
new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb), else_bb);
/* Make rest of code believe that the newly created block is the THEN_BB
block we are going to remove. */
if (new_bb)
new_bb->aux = then_bb->aux;
flow_delete_block (then_bb);
/* We've possibly created jump to next insn, cleanup_cfg will solve that
later. */
@ -2358,16 +2348,16 @@ find_if_case_2 (test_bb, then_edge, else_edge)
return FALSE;
/* THEN is not EXIT. */
if (then_bb->index < 0)
if (then_bb == EXIT_BLOCK_PTR)
return FALSE;
/* ELSE is predicted or SUCC(ELSE) postdominates THEN. */
note = find_reg_note (test_bb->end, REG_BR_PROB, NULL_RTX);
if (note && INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2)
;
else if (else_succ->dest->index < 0
|| TEST_BIT (post_dominators[ORIG_INDEX (then_bb)],
ORIG_INDEX (else_succ->dest)))
else if (else_succ->dest == EXIT_BLOCK_PTR
|| TEST_BIT (post_dominators[then_bb->sindex],
else_succ->dest->sindex))
;
else
return FALSE;
@ -2376,7 +2366,7 @@ find_if_case_2 (test_bb, then_edge, else_edge)
if (rtl_dump_file)
fprintf (rtl_dump_file,
"\nIF-CASE-2 found, start %d, else %d\n",
test_bb->index, else_bb->index);
test_bb->sindex, else_bb->sindex);
/* ELSE is small. */
if (count_bb_insns (then_bb) > BRANCH_COST)
@ -2685,7 +2675,7 @@ void
if_convert (x_life_data_ok)
int x_life_data_ok;
{
int block_num;
basic_block bb;
num_possible_if_blocks = 0;
num_updated_if_blocks = 0;
@ -2700,25 +2690,17 @@ if_convert (x_life_data_ok)
post_dominators = NULL;
if (HAVE_conditional_execution || life_data_ok)
{
post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
post_dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS);
}
if (life_data_ok)
clear_bb_flags ();
/* Record initial block numbers. */
for (block_num = 0; block_num < n_basic_blocks; block_num++)
SET_ORIG_INDEX (BASIC_BLOCK (block_num), block_num);
/* Go through each of the basic blocks looking for things to convert. */
for (block_num = 0; block_num < n_basic_blocks; )
{
basic_block bb = BASIC_BLOCK (block_num);
if (find_if_header (bb))
block_num = bb->index;
else
block_num++;
}
FOR_ALL_BB (bb)
while (find_if_header (bb))
{
}
if (post_dominators)
sbitmap_vector_free (post_dominators);

370
gcc/lcm.c
View File

@ -106,7 +106,7 @@ compute_antinout_edge (antloc, transp, antin, antout)
sbitmap *antin;
sbitmap *antout;
{
int bb;
basic_block bb;
edge e;
basic_block *worklist, *qin, *qout, *qend;
unsigned int qlen;
@ -115,23 +115,23 @@ compute_antinout_edge (antloc, transp, antin, antout)
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
= (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
= (basic_block *) xmalloc (sizeof (basic_block) * num_basic_blocks);
/* We want a maximal solution, so make an optimistic initialization of
ANTIN. */
sbitmap_vector_ones (antin, n_basic_blocks);
sbitmap_vector_ones (antin, last_basic_block);
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of ANTIN above. */
for (bb = n_basic_blocks - 1; bb >= 0; bb--)
FOR_ALL_BB_REVERSE (bb)
{
*qin++ = BASIC_BLOCK (bb);
BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
*qin++ = bb;
bb->aux = bb;
}
qin = worklist;
qend = &worklist[n_basic_blocks];
qlen = n_basic_blocks;
qend = &worklist[num_basic_blocks];
qlen = num_basic_blocks;
/* Mark blocks which are predecessors of the exit block so that we
can easily identify them below. */
@ -142,32 +142,31 @@ compute_antinout_edge (antloc, transp, antin, antout)
while (qlen)
{
/* Take the first entry off the worklist. */
basic_block b = *qout++;
bb = b->index;
basic_block bb = *qout++;
qlen--;
if (qout >= qend)
qout = worklist;
if (b->aux == EXIT_BLOCK_PTR)
if (bb->aux == EXIT_BLOCK_PTR)
/* Do not clear the aux field for blocks which are predecessors of
the EXIT block. That way we never add then to the worklist
again. */
sbitmap_zero (antout[bb]);
sbitmap_zero (antout[bb->sindex]);
else
{
/* Clear the aux field of this block so that it can be added to
the worklist again if necessary. */
b->aux = NULL;
sbitmap_intersection_of_succs (antout[bb], antin, bb);
bb->aux = NULL;
sbitmap_intersection_of_succs (antout[bb->sindex], antin, bb->sindex);
}
if (sbitmap_a_or_b_and_c_cg (antin[bb], antloc[bb],
transp[bb], antout[bb]))
if (sbitmap_a_or_b_and_c_cg (antin[bb->sindex], antloc[bb->sindex],
transp[bb->sindex], antout[bb->sindex]))
/* If the in state of this block changed, then we need
to add the predecessors of this block to the worklist
if they are not already on the worklist. */
for (e = b->pred; e; e = e->pred_next)
for (e = bb->pred; e; e = e->pred_next)
if (!e->src->aux && e->src != ENTRY_BLOCK_PTR)
{
*qin++ = e->src;
@ -205,22 +204,22 @@ compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest)
pred = INDEX_EDGE_PRED_BB (edge_list, x);
succ = INDEX_EDGE_SUCC_BB (edge_list, x);
if (pred == ENTRY_BLOCK_PTR)
sbitmap_copy (earliest[x], antin[succ->index]);
sbitmap_copy (earliest[x], antin[succ->sindex]);
else
{
/* We refer to the EXIT_BLOCK index, instead of testing for
EXIT_BLOCK_PTR, so that EXIT_BLOCK_PTR's index can be
changed so as to pretend it's a regular block, so that
its antin can be taken into account. */
if (succ->index == EXIT_BLOCK)
if (succ->sindex == EXIT_BLOCK)
sbitmap_zero (earliest[x]);
else
{
sbitmap_difference (difference, antin[succ->index],
avout[pred->index]);
sbitmap_not (temp_bitmap, antout[pred->index]);
sbitmap_difference (difference, antin[succ->sindex],
avout[pred->sindex]);
sbitmap_not (temp_bitmap, antout[pred->sindex]);
sbitmap_a_and_b_or_c (earliest[x], difference,
kill[pred->index], temp_bitmap);
kill[pred->sindex], temp_bitmap);
}
}
}
@ -263,9 +262,9 @@ compute_laterin (edge_list, earliest, antloc, later, laterin)
struct edge_list *edge_list;
sbitmap *earliest, *antloc, *later, *laterin;
{
int bb, num_edges, i;
int num_edges, i;
edge e;
basic_block *worklist, *qin, *qout, *qend;
basic_block *worklist, *qin, *qout, *qend, bb;
unsigned int qlen;
num_edges = NUM_EDGES (edge_list);
@ -274,7 +273,7 @@ compute_laterin (edge_list, earliest, antloc, later, laterin)
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
= (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
= (basic_block *) xmalloc (sizeof (basic_block) * (num_basic_blocks + 1));
/* Initialize a mapping from each edge to its index. */
for (i = 0; i < num_edges; i++)
@ -301,41 +300,39 @@ compute_laterin (edge_list, earliest, antloc, later, laterin)
/* Add all the blocks to the worklist. This prevents an early exit from
the loop given our optimistic initialization of LATER above. */
for (bb = 0; bb < n_basic_blocks; bb++)
FOR_ALL_BB (bb)
{
basic_block b = BASIC_BLOCK (bb);
*qin++ = b;
b->aux = b;
*qin++ = bb;
bb->aux = bb;
}
qin = worklist;
/* Note that we do not use the last allocated element for our queue,
as EXIT_BLOCK is never inserted into it. In fact the above allocation
of n_basic_blocks + 1 elements is not encessary. */
qend = &worklist[n_basic_blocks];
qlen = n_basic_blocks;
of num_basic_blocks + 1 elements is not encessary. */
qend = &worklist[num_basic_blocks];
qlen = num_basic_blocks;
/* Iterate until the worklist is empty. */
while (qlen)
{
/* Take the first entry off the worklist. */
basic_block b = *qout++;
b->aux = NULL;
bb = *qout++;
bb->aux = NULL;
qlen--;
if (qout >= qend)
qout = worklist;
/* Compute the intersection of LATERIN for each incoming edge to B. */
bb = b->index;
sbitmap_ones (laterin[bb]);
for (e = b->pred; e != NULL; e = e->pred_next)
sbitmap_a_and_b (laterin[bb], laterin[bb], later[(size_t)e->aux]);
sbitmap_ones (laterin[bb->sindex]);
for (e = bb->pred; e != NULL; e = e->pred_next)
sbitmap_a_and_b (laterin[bb->sindex], laterin[bb->sindex], later[(size_t)e->aux]);
/* Calculate LATER for all outgoing edges. */
for (e = b->succ; e != NULL; e = e->succ_next)
for (e = bb->succ; e != NULL; e = e->succ_next)
if (sbitmap_union_of_diff_cg (later[(size_t) e->aux],
earliest[(size_t) e->aux],
laterin[e->src->index],
antloc[e->src->index])
earliest[(size_t) e->aux],
laterin[e->src->sindex],
antloc[e->src->sindex])
/* If LATER for an outgoing edge was changed, then we need
to add the target of the outgoing edge to the worklist. */
&& e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0)
@ -351,10 +348,10 @@ compute_laterin (edge_list, earliest, antloc, later, laterin)
/* Computation of insertion and deletion points requires computing LATERIN
for the EXIT block. We allocated an extra entry in the LATERIN array
for just this purpose. */
sbitmap_ones (laterin[n_basic_blocks]);
sbitmap_ones (laterin[last_basic_block]);
for (e = EXIT_BLOCK_PTR->pred; e != NULL; e = e->pred_next)
sbitmap_a_and_b (laterin[n_basic_blocks],
laterin[n_basic_blocks],
sbitmap_a_and_b (laterin[last_basic_block],
laterin[last_basic_block],
later[(size_t) e->aux]);
clear_aux_for_edges ();
@ -370,18 +367,19 @@ compute_insert_delete (edge_list, antloc, later, laterin,
sbitmap *antloc, *later, *laterin, *insert, *delete;
{
int x;
basic_block bb;
for (x = 0; x < n_basic_blocks; x++)
sbitmap_difference (delete[x], antloc[x], laterin[x]);
FOR_ALL_BB (bb)
sbitmap_difference (delete[bb->sindex], antloc[bb->sindex], laterin[bb->sindex]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
{
basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x);
if (b == EXIT_BLOCK_PTR)
sbitmap_difference (insert[x], later[x], laterin[n_basic_blocks]);
sbitmap_difference (insert[x], later[x], laterin[last_basic_block]);
else
sbitmap_difference (insert[x], later[x], laterin[b->index]);
sbitmap_difference (insert[x], later[x], laterin[b->sindex]);
}
}
@ -415,29 +413,29 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
fprintf (file, "Edge List:\n");
verify_edge_list (file, edge_list);
print_edge_list (file, edge_list);
dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
dump_sbitmap_vector (file, "antloc", "", antloc, n_basic_blocks);
dump_sbitmap_vector (file, "avloc", "", avloc, n_basic_blocks);
dump_sbitmap_vector (file, "kill", "", kill, n_basic_blocks);
dump_sbitmap_vector (file, "transp", "", transp, last_basic_block);
dump_sbitmap_vector (file, "antloc", "", antloc, last_basic_block);
dump_sbitmap_vector (file, "avloc", "", avloc, last_basic_block);
dump_sbitmap_vector (file, "kill", "", kill, last_basic_block);
}
#endif
/* Compute global availability. */
avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_available (avloc, kill, avout, avin);
sbitmap_vector_free (avin);
/* Compute global anticipatability. */
antin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
antout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
antin = sbitmap_vector_alloc (last_basic_block, n_exprs);
antout = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_antinout_edge (antloc, transp, antin, antout);
#ifdef LCM_DEBUG_INFO
if (file)
{
dump_sbitmap_vector (file, "antin", "", antin, n_basic_blocks);
dump_sbitmap_vector (file, "antout", "", antout, n_basic_blocks);
dump_sbitmap_vector (file, "antin", "", antin, last_basic_block);
dump_sbitmap_vector (file, "antout", "", antout, last_basic_block);
}
#endif
@ -457,13 +455,13 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
later = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the exit block in the laterin vector. */
laterin = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
laterin = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
compute_laterin (edge_list, earliest, antloc, later, laterin);
#ifdef LCM_DEBUG_INFO
if (file)
{
dump_sbitmap_vector (file, "laterin", "", laterin, n_basic_blocks + 1);
dump_sbitmap_vector (file, "laterin", "", laterin, last_basic_block + 1);
dump_sbitmap_vector (file, "later", "", later, num_edges);
}
#endif
@ -471,7 +469,7 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
sbitmap_vector_free (earliest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
*delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
*delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_insert_delete (edge_list, antloc, later, laterin, *insert, *delete);
sbitmap_vector_free (laterin);
@ -482,7 +480,7 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
{
dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
n_basic_blocks);
last_basic_block);
}
#endif
@ -496,31 +494,30 @@ void
compute_available (avloc, kill, avout, avin)
sbitmap *avloc, *kill, *avout, *avin;
{
int bb;
edge e;
basic_block *worklist, *qin, *qout, *qend;
basic_block *worklist, *qin, *qout, *qend, bb;
unsigned int qlen;
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
= (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
= (basic_block *) xmalloc (sizeof (basic_block) * num_basic_blocks);
/* We want a maximal solution. */
sbitmap_vector_ones (avout, n_basic_blocks);
sbitmap_vector_ones (avout, last_basic_block);
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of AVOUT above. */
for (bb = 0; bb < n_basic_blocks; bb++)
FOR_ALL_BB (bb)
{
*qin++ = BASIC_BLOCK (bb);
BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
*qin++ = bb;
bb->aux = bb;
}
qin = worklist;
qend = &worklist[n_basic_blocks];
qlen = n_basic_blocks;
qend = &worklist[num_basic_blocks];
qlen = num_basic_blocks;
/* Mark blocks which are successors of the entry block so that we
can easily identify them below. */
@ -531,8 +528,7 @@ compute_available (avloc, kill, avout, avin)
while (qlen)
{
/* Take the first entry off the worklist. */
basic_block b = *qout++;
bb = b->index;
basic_block bb = *qout++;
qlen--;
if (qout >= qend)
@ -541,23 +537,24 @@ compute_available (avloc, kill, avout, avin)
/* If one of the predecessor blocks is the ENTRY block, then the
intersection of avouts is the null set. We can identify such blocks
by the special value in the AUX field in the block structure. */
if (b->aux == ENTRY_BLOCK_PTR)
if (bb->aux == ENTRY_BLOCK_PTR)
/* Do not clear the aux field for blocks which are successors of the
ENTRY block. That way we never add then to the worklist again. */
sbitmap_zero (avin[bb]);
sbitmap_zero (avin[bb->sindex]);
else
{
/* Clear the aux field of this block so that it can be added to
the worklist again if necessary. */
b->aux = NULL;
sbitmap_intersection_of_preds (avin[bb], avout, bb);
bb->aux = NULL;
sbitmap_intersection_of_preds (avin[bb->sindex], avout, bb->sindex);
}
if (sbitmap_union_of_diff_cg (avout[bb], avloc[bb], avin[bb], kill[bb]))
if (sbitmap_union_of_diff_cg (avout[bb->sindex], avloc[bb->sindex],
avin[bb->sindex], kill[bb->sindex]))
/* If the out state of this block changed, then we need
to add the successors of this block to the worklist
if they are not already on the worklist. */
for (e = b->succ; e; e = e->succ_next)
for (e = bb->succ; e; e = e->succ_next)
if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR)
{
*qin++ = e->dest;
@ -597,18 +594,18 @@ compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin,
pred = INDEX_EDGE_PRED_BB (edge_list, x);
succ = INDEX_EDGE_SUCC_BB (edge_list, x);
if (succ == EXIT_BLOCK_PTR)
sbitmap_copy (farthest[x], st_avout[pred->index]);
sbitmap_copy (farthest[x], st_avout[pred->sindex]);
else
{
if (pred == ENTRY_BLOCK_PTR)
sbitmap_zero (farthest[x]);
else
{
sbitmap_difference (difference, st_avout[pred->index],
st_antin[succ->index]);
sbitmap_not (temp_bitmap, st_avin[succ->index]);
sbitmap_difference (difference, st_avout[pred->sindex],
st_antin[succ->sindex]);
sbitmap_not (temp_bitmap, st_avin[succ->sindex]);
sbitmap_a_and_b_or_c (farthest[x], difference,
kill[succ->index], temp_bitmap);
kill[succ->sindex], temp_bitmap);
}
}
}
@ -627,9 +624,9 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
struct edge_list *edge_list;
sbitmap *farthest, *st_avloc, *nearer, *nearerout;
{
int bb, num_edges, i;
int num_edges, i;
edge e;
basic_block *worklist, *tos;
basic_block *worklist, *tos, bb;
num_edges = NUM_EDGES (edge_list);
@ -637,7 +634,7 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
tos = worklist
= (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
= (basic_block *) xmalloc (sizeof (basic_block) * (num_basic_blocks + 1));
/* Initialize NEARER for each edge and build a mapping from an edge to
its index. */
@ -656,33 +653,31 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
/* Add all the blocks to the worklist. This prevents an early exit
from the loop given our optimistic initialization of NEARER. */
for (bb = 0; bb < n_basic_blocks; bb++)
FOR_ALL_BB (bb)
{
basic_block b = BASIC_BLOCK (bb);
*tos++ = b;
b->aux = b;
*tos++ = bb;
bb->aux = bb;
}
/* Iterate until the worklist is empty. */
while (tos != worklist)
{
/* Take the first entry off the worklist. */
basic_block b = *--tos;
b->aux = NULL;
bb = *--tos;
bb->aux = NULL;
/* Compute the intersection of NEARER for each outgoing edge from B. */
bb = b->index;
sbitmap_ones (nearerout[bb]);
for (e = b->succ; e != NULL; e = e->succ_next)
sbitmap_a_and_b (nearerout[bb], nearerout[bb],
sbitmap_ones (nearerout[bb->sindex]);
for (e = bb->succ; e != NULL; e = e->succ_next)
sbitmap_a_and_b (nearerout[bb->sindex], nearerout[bb->sindex],
nearer[(size_t) e->aux]);
/* Calculate NEARER for all incoming edges. */
for (e = b->pred; e != NULL; e = e->pred_next)
for (e = bb->pred; e != NULL; e = e->pred_next)
if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux],
farthest[(size_t) e->aux],
nearerout[e->dest->index],
st_avloc[e->dest->index])
farthest[(size_t) e->aux],
nearerout[e->dest->sindex],
st_avloc[e->dest->sindex])
/* If NEARER for an incoming edge was changed, then we need
to add the source of the incoming edge to the worklist. */
&& e->src != ENTRY_BLOCK_PTR && e->src->aux == 0)
@ -695,10 +690,10 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
/* Computation of insertion and deletion points requires computing NEAREROUT
for the ENTRY block. We allocated an extra entry in the NEAREROUT array
for just this purpose. */
sbitmap_ones (nearerout[n_basic_blocks]);
sbitmap_ones (nearerout[last_basic_block]);
for (e = ENTRY_BLOCK_PTR->succ; e != NULL; e = e->succ_next)
sbitmap_a_and_b (nearerout[n_basic_blocks],
nearerout[n_basic_blocks],
sbitmap_a_and_b (nearerout[last_basic_block],
nearerout[last_basic_block],
nearer[(size_t) e->aux]);
clear_aux_for_edges ();
@ -714,17 +709,19 @@ compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
sbitmap *st_avloc, *nearer, *nearerout, *insert, *delete;
{
int x;
basic_block bb;
for (x = 0; x < n_basic_blocks; x++)
sbitmap_difference (delete[x], st_avloc[x], nearerout[x]);
FOR_ALL_BB (bb)
sbitmap_difference (delete[bb->sindex], st_avloc[bb->sindex],
nearerout[bb->sindex]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
{
basic_block b = INDEX_EDGE_PRED_BB (edge_list, x);
if (b == ENTRY_BLOCK_PTR)
sbitmap_difference (insert[x], nearer[x], nearerout[n_basic_blocks]);
sbitmap_difference (insert[x], nearer[x], nearerout[last_basic_block]);
else
sbitmap_difference (insert[x], nearer[x], nearerout[b->index]);
sbitmap_difference (insert[x], nearer[x], nearerout[b->sindex]);
}
}
@ -754,15 +751,15 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
edge_list = create_edge_list ();
num_edges = NUM_EDGES (edge_list);
st_antin = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
st_antout = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
sbitmap_vector_zero (st_antin, n_basic_blocks);
sbitmap_vector_zero (st_antout, n_basic_blocks);
st_antin = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs);
st_antout = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs);
sbitmap_vector_zero (st_antin, last_basic_block);
sbitmap_vector_zero (st_antout, last_basic_block);
compute_antinout_edge (st_antloc, transp, st_antin, st_antout);
/* Compute global anticipatability. */
st_avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
st_avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
st_avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
st_avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_available (st_avloc, kill, st_avout, st_avin);
#ifdef LCM_DEBUG_INFO
@ -771,20 +768,20 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
fprintf (file, "Edge List:\n");
verify_edge_list (file, edge_list);
print_edge_list (file, edge_list);
dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
dump_sbitmap_vector (file, "st_avloc", "", st_avloc, n_basic_blocks);
dump_sbitmap_vector (file, "st_antloc", "", st_antloc, n_basic_blocks);
dump_sbitmap_vector (file, "st_antin", "", st_antin, n_basic_blocks);
dump_sbitmap_vector (file, "st_antout", "", st_antout, n_basic_blocks);
dump_sbitmap_vector (file, "st_kill", "", kill, n_basic_blocks);
dump_sbitmap_vector (file, "transp", "", transp, last_basic_block);
dump_sbitmap_vector (file, "st_avloc", "", st_avloc, last_basic_block);
dump_sbitmap_vector (file, "st_antloc", "", st_antloc, last_basic_block);
dump_sbitmap_vector (file, "st_antin", "", st_antin, last_basic_block);
dump_sbitmap_vector (file, "st_antout", "", st_antout, last_basic_block);
dump_sbitmap_vector (file, "st_kill", "", kill, last_basic_block);
}
#endif
#ifdef LCM_DEBUG_INFO
if (file)
{
dump_sbitmap_vector (file, "st_avout", "", st_avout, n_basic_blocks);
dump_sbitmap_vector (file, "st_avin", "", st_avin, n_basic_blocks);
dump_sbitmap_vector (file, "st_avout", "", st_avout, last_basic_block);
dump_sbitmap_vector (file, "st_avin", "", st_avin, last_basic_block);
}
#endif
@ -807,14 +804,14 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
nearer = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the entry block. */
nearerout = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
nearerout = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout);
#ifdef LCM_DEBUG_INFO
if (file)
{
dump_sbitmap_vector (file, "nearerout", "", nearerout,
n_basic_blocks + 1);
last_basic_block + 1);
dump_sbitmap_vector (file, "nearer", "", nearer, num_edges);
}
#endif
@ -822,7 +819,7 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
sbitmap_vector_free (farthest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
*delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
*delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
*insert, *delete);
@ -834,7 +831,7 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
{
dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
n_basic_blocks);
last_basic_block);
}
#endif
return edge_list;
@ -960,10 +957,10 @@ make_preds_opaque (b, j)
{
basic_block pb = e->src;
if (e->aux || ! TEST_BIT (transp[pb->index], j))
if (e->aux || ! TEST_BIT (transp[pb->sindex], j))
continue;
RESET_BIT (transp[pb->index], j);
RESET_BIT (transp[pb->sindex], j);
make_preds_opaque (pb, j);
}
}
@ -1019,7 +1016,8 @@ optimize_mode_switching (file)
FILE *file;
{
rtx insn;
int bb, e;
int e;
basic_block bb;
int need_commit = 0;
sbitmap *kill;
struct edge_list *edge_list;
@ -1034,8 +1032,8 @@ optimize_mode_switching (file)
clear_bb_flags ();
#ifdef NORMAL_MODE
/* Increment n_basic_blocks before allocating bb_info. */
n_basic_blocks++;
/* Increment last_basic_block before allocating bb_info. */
last_basic_block++;
#endif
for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
@ -1043,7 +1041,7 @@ optimize_mode_switching (file)
{
/* Create the list of segments within each basic block. */
bb_info[n_entities]
= (struct bb_info *) xcalloc (n_basic_blocks, sizeof **bb_info);
= (struct bb_info *) xcalloc (last_basic_block, sizeof **bb_info);
entity_map[n_entities++] = e;
if (num_modes[e] > max_num_modes)
max_num_modes = num_modes[e];
@ -1051,7 +1049,7 @@ optimize_mode_switching (file)
#ifdef NORMAL_MODE
/* Decrement it back in case we return below. */
n_basic_blocks--;
last_basic_block--;
#endif
if (! n_entities)
@ -1063,20 +1061,20 @@ optimize_mode_switching (file)
EXIT_BLOCK isn't optimized away. We do this by incrementing the
basic block count, growing the VARRAY of basic_block_info and
appending the EXIT_BLOCK_PTR to it. */
n_basic_blocks++;
if (VARRAY_SIZE (basic_block_info) < n_basic_blocks)
VARRAY_GROW (basic_block_info, n_basic_blocks);
BASIC_BLOCK (n_basic_blocks - 1) = EXIT_BLOCK_PTR;
EXIT_BLOCK_PTR->index = n_basic_blocks - 1;
last_basic_block++;
if (VARRAY_SIZE (basic_block_info) < last_basic_block)
VARRAY_GROW (basic_block_info, last_basic_block);
BASIC_BLOCK (last_basic_block - 1) = EXIT_BLOCK_PTR;
EXIT_BLOCK_PTR->sindex = last_basic_blocks;
#endif
/* Create the bitmap vectors. */
antic = sbitmap_vector_alloc (n_basic_blocks, n_entities);
transp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
comp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
antic = sbitmap_vector_alloc (last_basic_block, n_entities);
transp = sbitmap_vector_alloc (last_basic_block, n_entities);
comp = sbitmap_vector_alloc (last_basic_block, n_entities);
sbitmap_vector_ones (transp, n_basic_blocks);
sbitmap_vector_ones (transp, last_basic_block);
for (j = n_entities - 1; j >= 0; j--)
{
@ -1087,16 +1085,16 @@ optimize_mode_switching (file)
/* Determine what the first use (if any) need for a mode of entity E is.
This will be the mode that is anticipatable for this block.
Also compute the initial transparency settings. */
for (bb = 0 ; bb < n_basic_blocks; bb++)
FOR_ALL_BB (bb)
{
struct seginfo *ptr;
int last_mode = no_mode;
HARD_REG_SET live_now;
REG_SET_TO_HARD_REG_SET (live_now,
BASIC_BLOCK (bb)->global_live_at_start);
for (insn = BLOCK_HEAD (bb);
insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
bb->global_live_at_start);
for (insn = bb->head;
insn != NULL && insn != NEXT_INSN (bb->end);
insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
@ -1107,9 +1105,9 @@ optimize_mode_switching (file)
if (mode != no_mode && mode != last_mode)
{
last_mode = mode;
ptr = new_seginfo (mode, insn, bb, live_now);
add_seginfo (info + bb, ptr);
RESET_BIT (transp[bb], j);
ptr = new_seginfo (mode, insn, bb->sindex, live_now);
add_seginfo (info + bb->sindex, ptr);
RESET_BIT (transp[bb->sindex], j);
}
/* Update LIVE_NOW. */
@ -1124,12 +1122,12 @@ optimize_mode_switching (file)
}
}
info[bb].computing = last_mode;
info[bb->sindex].computing = last_mode;
/* Check for blocks without ANY mode requirements. */
if (last_mode == no_mode)
{
ptr = new_seginfo (no_mode, insn, bb, live_now);
add_seginfo (info + bb, ptr);
ptr = new_seginfo (no_mode, insn, bb->sindex, live_now);
add_seginfo (info + bb->sindex, ptr);
}
}
#ifdef NORMAL_MODE
@ -1142,65 +1140,65 @@ optimize_mode_switching (file)
for (eg = ENTRY_BLOCK_PTR->succ; eg; eg = eg->succ_next)
{
bb = eg->dest->index;
bb = eg->dest;
/* By always making this nontransparent, we save
an extra check in make_preds_opaque. We also
need this to avoid confusing pre_edge_lcm when
antic is cleared but transp and comp are set. */
RESET_BIT (transp[bb], j);
RESET_BIT (transp[bb->sindex], j);
/* If the block already has MODE, pretend it
has none (because we don't need to set it),
but retain whatever mode it computes. */
if (info[bb].seginfo->mode == mode)
info[bb].seginfo->mode = no_mode;
if (info[bb->sindex].seginfo->mode == mode)
info[bb->sindex].seginfo->mode = no_mode;
/* Insert a fake computing definition of MODE into entry
blocks which compute no mode. This represents the mode on
entry. */
else if (info[bb].computing == no_mode)
else if (info[bb->sindex].computing == no_mode)
{
info[bb].computing = mode;
info[bb].seginfo->mode = no_mode;
info[bb->sindex].computing = mode;
info[bb->sindex].seginfo->mode = no_mode;
}
}
bb = n_basic_blocks - 1;
info[bb].seginfo->mode = mode;
bb = EXIT_BLOCK_PTR;
info[bb->sindex].seginfo->mode = mode;
}
}
#endif /* NORMAL_MODE */
}
kill = sbitmap_vector_alloc (n_basic_blocks, n_entities);
kill = sbitmap_vector_alloc (last_basic_block, n_entities);
for (i = 0; i < max_num_modes; i++)
{
int current_mode[N_ENTITIES];
/* Set the anticipatable and computing arrays. */
sbitmap_vector_zero (antic, n_basic_blocks);
sbitmap_vector_zero (comp, n_basic_blocks);
sbitmap_vector_zero (antic, last_basic_block);
sbitmap_vector_zero (comp, last_basic_block);
for (j = n_entities - 1; j >= 0; j--)
{
int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
struct bb_info *info = bb_info[j];
for (bb = 0 ; bb < n_basic_blocks; bb++)
FOR_ALL_BB (bb)
{
if (info[bb].seginfo->mode == m)
SET_BIT (antic[bb], j);
if (info[bb->sindex].seginfo->mode == m)
SET_BIT (antic[bb->sindex], j);
if (info[bb].computing == m)
SET_BIT (comp[bb], j);
if (info[bb->sindex].computing == m)
SET_BIT (comp[bb->sindex], j);
}
}
/* Calculate the optimal locations for the
placement mode switches to modes with priority I. */
for (bb = n_basic_blocks - 1; bb >= 0; bb--)
sbitmap_not (kill[bb], transp[bb]);
FOR_ALL_BB_REVERSE (bb)
sbitmap_not (kill[bb->sindex], transp[bb->sindex]);
edge_list = pre_edge_lcm (file, 1, transp, comp, antic,
kill, &insert, &delete);
@ -1269,8 +1267,8 @@ optimize_mode_switching (file)
emit_insn_after (mode_set, src_bb->end);
else
abort ();
bb_info[j][src_bb->index].computing = mode;
RESET_BIT (transp[src_bb->index], j);
bb_info[j][src_bb->sindex].computing = mode;
RESET_BIT (transp[src_bb->sindex], j);
}
else
{
@ -1279,12 +1277,12 @@ optimize_mode_switching (file)
}
}
for (bb = n_basic_blocks - 1; bb >= 0; bb--)
if (TEST_BIT (delete[bb], j))
FOR_ALL_BB_REVERSE (bb)
if (TEST_BIT (delete[bb->sindex], j))
{
make_preds_opaque (BASIC_BLOCK (bb), j);
make_preds_opaque (bb, j);
/* Cancel the 'deleted' mode set. */
bb_info[j][bb].seginfo->mode = no_mode;
bb_info[j][bb->sindex].seginfo->mode = no_mode;
}
}
@ -1294,9 +1292,9 @@ optimize_mode_switching (file)
#ifdef NORMAL_MODE
/* Restore the special status of EXIT_BLOCK. */
n_basic_blocks--;
last_basic_block--;
VARRAY_POP (basic_block_info);
EXIT_BLOCK_PTR->index = EXIT_BLOCK;
EXIT_BLOCK_PTR->sindex = EXIT_BLOCK;
#endif
/* Now output the remaining mode sets in all the segments. */
@ -1305,16 +1303,16 @@ optimize_mode_switching (file)
int no_mode = num_modes[entity_map[j]];
#ifdef NORMAL_MODE
if (bb_info[j][n_basic_blocks].seginfo->mode != no_mode)
if (bb_info[j][last_basic_block].seginfo->mode != no_mode)
{
edge eg;
struct seginfo *ptr = bb_info[j][n_basic_blocks].seginfo;
struct seginfo *ptr = bb_info[j][last_basic_block].seginfo;
for (eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next)
{
rtx mode_set;
if (bb_info[j][eg->src->index].computing == ptr->mode)
if (bb_info[j][eg->src->sindex].computing == ptr->mode)
continue;
start_sequence ();
@ -1349,10 +1347,10 @@ optimize_mode_switching (file)
}
#endif
for (bb = n_basic_blocks - 1; bb >= 0; bb--)
FOR_ALL_BB_REVERSE (bb)
{
struct seginfo *ptr, *next;
for (ptr = bb_info[j][bb].seginfo; ptr; ptr = next)
for (ptr = bb_info[j][bb->sindex].seginfo; ptr; ptr = next)
{
next = ptr->next;
if (ptr->mode != no_mode)

View File

@ -336,8 +336,9 @@ alloc_qty (regno, mode, size, birth)
int
local_alloc ()
{
int b, i;
int i;
int max_qty;
basic_block b;
/* We need to keep track of whether or not we recorded a LABEL_REF so
that we know if the jump optimizer needs to be rerun. */
@ -394,7 +395,7 @@ local_alloc ()
/* Allocate each block's local registers, block by block. */
for (b = 0; b < n_basic_blocks; b++)
FOR_ALL_BB (b)
{
/* NEXT_QTY indicates which elements of the `qty_...'
vectors might need to be initialized because they were used
@ -426,7 +427,7 @@ local_alloc ()
next_qty = 0;
block_alloc (b);
block_alloc (b->sindex);
}
free (qty);
@ -815,7 +816,7 @@ static void
update_equiv_regs ()
{
rtx insn;
int block;
basic_block bb;
int loop_depth;
regset_head cleared_regs;
int clear_regnos = 0;
@ -828,9 +829,8 @@ update_equiv_regs ()
/* Scan the insns and find which registers have equivalences. Do this
in a separate scan of the insns because (due to -fcse-follow-jumps)
a register can be set below its use. */
for (block = 0; block < n_basic_blocks; block++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (block);
loop_depth = bb->loop_depth;
for (insn = bb->head; insn != NEXT_INSN (bb->end); insn = NEXT_INSN (insn))
@ -1044,10 +1044,8 @@ update_equiv_regs ()
within the same loop (or in an inner loop), then move the register
initialization just before the use, so that they are in the same
basic block. */
for (block = n_basic_blocks - 1; block >= 0; block--)
FOR_ALL_BB_REVERSE (bb)
{
basic_block bb = BASIC_BLOCK (block);
loop_depth = bb->loop_depth;
for (insn = bb->end; insn != PREV_INSN (bb->head); insn = PREV_INSN (insn))
{
@ -1139,12 +1137,12 @@ update_equiv_regs ()
XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
REG_BASIC_BLOCK (regno) = block >= 0 ? block : 0;
REG_BASIC_BLOCK (regno) = bb->sindex;
REG_N_CALLS_CROSSED (regno) = 0;
REG_LIVE_LENGTH (regno) = 2;
if (block >= 0 && insn == BLOCK_HEAD (block))
BLOCK_HEAD (block) = PREV_INSN (insn);
if (insn == bb->head)
bb->head = PREV_INSN (insn);
/* Remember to clear REGNO from all basic block's live
info. */
@ -1159,24 +1157,22 @@ update_equiv_regs ()
/* Clear all dead REGNOs from all basic block's live info. */
if (clear_regnos)
{
int j, l;
int j;
if (clear_regnos > 8)
{
for (l = 0; l < n_basic_blocks; l++)
FOR_ALL_BB (bb)
{
AND_COMPL_REG_SET (BASIC_BLOCK (l)->global_live_at_start,
&cleared_regs);
AND_COMPL_REG_SET (BASIC_BLOCK (l)->global_live_at_end,
&cleared_regs);
AND_COMPL_REG_SET (bb->global_live_at_start, &cleared_regs);
AND_COMPL_REG_SET (bb->global_live_at_end, &cleared_regs);
}
}
else
EXECUTE_IF_SET_IN_REG_SET (&cleared_regs, 0, j,
{
for (l = 0; l < n_basic_blocks; l++)
FOR_ALL_BB (bb)
{
CLEAR_REGNO_REG_SET (BASIC_BLOCK (l)->global_live_at_start, j);
CLEAR_REGNO_REG_SET (BASIC_BLOCK (l)->global_live_at_end, j);
CLEAR_REGNO_REG_SET (bb->global_live_at_start, j);
CLEAR_REGNO_REG_SET (bb->global_live_at_end, j);
}
});
}

View File

@ -10746,7 +10746,7 @@ loop_dump_aux (loop, file, verbose)
/* This can happen when a marked loop appears as two nested loops,
say from while (a || b) {}. The inner loop won't match
the loop markers but the outer one will. */
if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->sindex)
fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
}
}

View File

@ -319,7 +319,7 @@ combine_predictions_for_insn (insn, bb)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
bb->index);
bb->sindex);
/* We implement "first match" heuristics and use probability guessed
by predictor with smallest index. In the future we will use better
@ -409,10 +409,11 @@ estimate_probability (loops_info)
struct loops *loops_info;
{
sbitmap *dominators, *post_dominators;
basic_block bb;
int i;
dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
post_dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
calculate_dominance_info (NULL, dominators, CDI_DOMINATORS);
calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS);
@ -420,15 +421,14 @@ estimate_probability (loops_info)
natural loop. */
for (i = 0; i < loops_info->num; i++)
{
int j;
int exits;
struct loop *loop = &loops_info->array[i];
flow_loop_scan (loops_info, loop, LOOP_EXIT_EDGES);
exits = loop->num_exits;
for (j = loop->first->index; j <= loop->last->index; ++j)
if (TEST_BIT (loop->nodes, j))
FOR_BB_BETWEEN (bb, loop->first, loop->last->next_bb, next_bb)
if (TEST_BIT (loop->nodes, bb->sindex))
{
int header_found = 0;
edge e;
@ -437,12 +437,12 @@ estimate_probability (loops_info)
statements construct loops via "non-loop" constructs
in the source language and are better to be handled
separately. */
if (predicted_by_p (BASIC_BLOCK (j), PRED_CONTINUE))
if (predicted_by_p (bb, PRED_CONTINUE))
continue;
/* Loop branch heuristics - predict an edge back to a
loop's head as taken. */
for (e = BASIC_BLOCK(j)->succ; e; e = e->succ_next)
for (e = bb->succ; e; e = e->succ_next)
if (e->dest == loop->header
&& e->src == loop->latch)
{
@ -453,9 +453,9 @@ estimate_probability (loops_info)
/* Loop exit heuristics - predict an edge exiting the loop if the
conditinal has no loop header successors as not taken. */
if (!header_found)
for (e = BASIC_BLOCK(j)->succ; e; e = e->succ_next)
if (e->dest->index < 0
|| !TEST_BIT (loop->nodes, e->dest->index))
for (e = bb->succ; e; e = e->succ_next)
if (e->dest->sindex < 0
|| !TEST_BIT (loop->nodes, e->dest->sindex))
predict_edge
(e, PRED_LOOP_EXIT,
(REG_BR_PROB_BASE
@ -465,9 +465,8 @@ estimate_probability (loops_info)
}
/* Attempt to predict conditional jumps using a number of heuristics. */
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx last_insn = bb->end;
rtx cond, earliest;
edge e;
@ -492,8 +491,8 @@ estimate_probability (loops_info)
/* Look for block we are guarding (ie we dominate it,
but it doesn't postdominate us). */
if (e->dest != EXIT_BLOCK_PTR && e->dest != bb
&& TEST_BIT (dominators[e->dest->index], e->src->index)
&& !TEST_BIT (post_dominators[e->src->index], e->dest->index))
&& TEST_BIT (dominators[e->dest->sindex], e->src->sindex)
&& !TEST_BIT (post_dominators[e->src->sindex], e->dest->sindex))
{
rtx insn;
@ -604,11 +603,11 @@ estimate_probability (loops_info)
}
/* Attach the combined probability to each conditional jump. */
for (i = 0; i < n_basic_blocks; i++)
if (GET_CODE (BLOCK_END (i)) == JUMP_INSN
&& any_condjump_p (BLOCK_END (i))
&& BASIC_BLOCK (i)->succ->succ_next != NULL)
combine_predictions_for_insn (BLOCK_END (i), BASIC_BLOCK (i));
FOR_ALL_BB (bb)
if (GET_CODE (bb->end) == JUMP_INSN
&& any_condjump_p (bb->end)
&& bb->succ->succ_next != NULL)
combine_predictions_for_insn (bb->end, bb);
sbitmap_vector_free (post_dominators);
sbitmap_vector_free (dominators);
@ -695,13 +694,16 @@ static bool
last_basic_block_p (bb)
basic_block bb;
{
return (bb->index == n_basic_blocks - 1
|| (bb->index == n_basic_blocks - 2
if (bb == EXIT_BLOCK_PTR)
return false;
return (bb->next_bb == EXIT_BLOCK_PTR
|| (bb->next_bb->next_bb == EXIT_BLOCK_PTR
&& bb->succ && !bb->succ->succ_next
&& bb->succ->dest->index == n_basic_blocks - 1));
&& bb->succ->dest->next_bb == EXIT_BLOCK_PTR));
}
/* Sets branch probabilities according to PREDiction and FLAGS. HEADS[bb->index]
/* Sets branch probabilities according to PREDiction and FLAGS. HEADS[bb->sindex]
should be index of basic block in that we need to alter branch predictions
(i.e. the first of our dominators such that we do not post-dominate it)
(but we fill this information on demand, so -1 may be there in case this
@ -722,43 +724,43 @@ process_note_prediction (bb, heads, dominators, post_dominators, pred, flags)
taken = flags & IS_TAKEN;
if (heads[bb->index] < 0)
if (heads[bb->sindex] < 0)
{
/* This is first time we need this field in heads array; so
find first dominator that we do not post-dominate (we are
using already known members of heads array). */
int ai = bb->index;
int next_ai = dominators[bb->index];
int ai = bb->sindex;
int next_ai = dominators[bb->sindex];
int head;
while (heads[next_ai] < 0)
{
if (!TEST_BIT (post_dominators[next_ai], bb->index))
if (!TEST_BIT (post_dominators[next_ai], bb->sindex))
break;
heads[next_ai] = ai;
ai = next_ai;
next_ai = dominators[next_ai];
}
if (!TEST_BIT (post_dominators[next_ai], bb->index))
if (!TEST_BIT (post_dominators[next_ai], bb->sindex))
head = next_ai;
else
head = heads[next_ai];
while (next_ai != bb->index)
while (next_ai != bb->sindex)
{
next_ai = ai;
ai = heads[ai];
heads[next_ai] = head;
}
}
y = heads[bb->index];
y = heads[bb->sindex];
/* Now find the edge that leads to our branch and aply the prediction. */
if (y == n_basic_blocks)
if (y == last_basic_block)
return;
for (e = BASIC_BLOCK (y)->succ; e; e = e->succ_next)
if (e->dest->index >= 0
&& TEST_BIT (post_dominators[e->dest->index], bb->index))
if (e->dest->sindex >= 0
&& TEST_BIT (post_dominators[e->dest->sindex], bb->sindex))
predict_edge_def (e, pred, taken);
}
@ -831,7 +833,7 @@ process_note_predictions (bb, heads, dominators, post_dominators)
void
note_prediction_to_br_prob ()
{
int i;
basic_block bb;
sbitmap *post_dominators;
int *dominators, *heads;
@ -839,23 +841,20 @@ note_prediction_to_br_prob ()
add_noreturn_fake_exit_edges ();
connect_infinite_loops_to_exit ();
dominators = xmalloc (sizeof (int) * n_basic_blocks);
memset (dominators, -1, sizeof (int) * n_basic_blocks);
post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
dominators = xmalloc (sizeof (int) * last_basic_block);
memset (dominators, -1, sizeof (int) * last_basic_block);
post_dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS);
calculate_dominance_info (dominators, NULL, CDI_DOMINATORS);
heads = xmalloc (sizeof (int) * n_basic_blocks);
memset (heads, -1, sizeof (int) * n_basic_blocks);
heads[0] = n_basic_blocks;
heads = xmalloc (sizeof (int) * last_basic_block);
memset (heads, -1, sizeof (int) * last_basic_block);
heads[ENTRY_BLOCK_PTR->next_bb->sindex] = last_basic_block;
/* Process all prediction notes. */
for (i = 0; i < n_basic_blocks; ++i)
{
basic_block bb = BASIC_BLOCK (i);
process_note_predictions (bb, heads, dominators, post_dominators);
}
FOR_ALL_BB (bb)
process_note_predictions (bb, heads, dominators, post_dominators);
sbitmap_vector_free (post_dominators);
free (dominators);
@ -903,17 +902,15 @@ static void
propagate_freq (head)
basic_block head;
{
basic_block bb = head;
basic_block last = bb;
basic_block bb;
basic_block last;
edge e;
basic_block nextbb;
int n;
/* For each basic block we need to visit count number of his predecessors
we need to visit first. */
for (n = 0; n < n_basic_blocks; n++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (n);
if (BLOCK_INFO (bb)->tovisit)
{
int count = 0;
@ -925,13 +922,14 @@ propagate_freq (head)
&& rtl_dump_file && !EDGE_INFO (e)->back_edge)
fprintf (rtl_dump_file,
"Irreducible region hit, ignoring edge to %i->%i\n",
e->src->index, bb->index);
e->src->sindex, bb->sindex);
BLOCK_INFO (bb)->npredecessors = count;
}
}
memcpy (&BLOCK_INFO (head)->frequency, &real_one, sizeof (real_one));
for (; bb; bb = nextbb)
last = head;
for (bb = head; bb; bb = nextbb)
{
REAL_VALUE_TYPE cyclic_probability, frequency;
@ -1074,24 +1072,13 @@ static void
counts_to_freqs ()
{
HOST_WIDEST_INT count_max = 1;
int i;
basic_block bb;
for (i = 0; i < n_basic_blocks; i++)
count_max = MAX (BASIC_BLOCK (i)->count, count_max);
FOR_ALL_BB (bb)
count_max = MAX (bb->count, count_max);
for (i = -2; i < n_basic_blocks; i++)
{
basic_block bb;
if (i == -2)
bb = ENTRY_BLOCK_PTR;
else if (i == -1)
bb = EXIT_BLOCK_PTR;
else
bb = BASIC_BLOCK (i);
bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
}
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
}
/* Return true if function is likely to be expensive, so there is no point to
@ -1104,7 +1091,7 @@ expensive_function_p (threshold)
int threshold;
{
unsigned int sum = 0;
int i;
basic_block bb;
unsigned int limit;
/* We can not compute accurately for large thresholds due to scaled
@ -1120,9 +1107,8 @@ expensive_function_p (threshold)
/* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
limit = ENTRY_BLOCK_PTR->frequency * threshold;
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx insn;
for (insn = bb->head; insn != NEXT_INSN (bb->end);
@ -1144,7 +1130,7 @@ static void
estimate_bb_frequencies (loops)
struct loops *loops;
{
int i;
basic_block bb;
REAL_VALUE_TYPE freq_max;
enum machine_mode double_mode = TYPE_MODE (double_type_node);
@ -1166,13 +1152,13 @@ estimate_bb_frequencies (loops)
mark_dfs_back_edges ();
/* Fill in the probability values in flowgraph based on the REG_BR_PROB
notes. */
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
rtx last_insn = BLOCK_END (i);
rtx last_insn = bb->end;
if (GET_CODE (last_insn) != JUMP_INSN || !any_condjump_p (last_insn)
/* Avoid handling of conditional jumps jumping to fallthru edge. */
|| BASIC_BLOCK (i)->succ->succ_next == NULL)
|| bb->succ->succ_next == NULL)
{
/* We can predict only conditional jumps at the moment.
Expect each edge to be equally probable.
@ -1180,14 +1166,14 @@ estimate_bb_frequencies (loops)
int nedges = 0;
edge e;
for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
for (e = bb->succ; e; e = e->succ_next)
{
nedges++;
if (e->probability != 0)
break;
}
if (!e)
for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
for (e = bb->succ; e; e = e->succ_next)
e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges;
}
}
@ -1197,17 +1183,10 @@ estimate_bb_frequencies (loops)
/* Set up block info for each basic block. */
alloc_aux_for_blocks (sizeof (struct block_info_def));
alloc_aux_for_edges (sizeof (struct edge_info_def));
for (i = -2; i < n_basic_blocks; i++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
{
edge e;
basic_block bb;
if (i == -2)
bb = ENTRY_BLOCK_PTR;
else if (i == -1)
bb = EXIT_BLOCK_PTR;
else
bb = BASIC_BLOCK (i);
BLOCK_INFO (bb)->tovisit = 0;
for (e = bb->succ; e; e = e->succ_next)
@ -1226,32 +1205,22 @@ estimate_bb_frequencies (loops)
estimate_loops_at_level (loops->tree_root);
/* Now fake loop around whole function to finalize probabilities. */
for (i = 0; i < n_basic_blocks; i++)
BLOCK_INFO (BASIC_BLOCK (i))->tovisit = 1;
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
BLOCK_INFO (bb)->tovisit = 1;
BLOCK_INFO (ENTRY_BLOCK_PTR)->tovisit = 1;
BLOCK_INFO (EXIT_BLOCK_PTR)->tovisit = 1;
propagate_freq (ENTRY_BLOCK_PTR);
memcpy (&freq_max, &real_zero, sizeof (real_zero));
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
if (REAL_VALUES_LESS
(freq_max, BLOCK_INFO (BASIC_BLOCK (i))->frequency))
memcpy (&freq_max, &BLOCK_INFO (BASIC_BLOCK (i))->frequency,
(freq_max, BLOCK_INFO (bb)->frequency))
memcpy (&freq_max, &BLOCK_INFO (bb)->frequency,
sizeof (freq_max));
for (i = -2; i < n_basic_blocks; i++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
{
basic_block bb;
REAL_VALUE_TYPE tmp;
if (i == -2)
bb = ENTRY_BLOCK_PTR;
else if (i == -1)
bb = EXIT_BLOCK_PTR;
else
bb = BASIC_BLOCK (i);
REAL_ARITHMETIC (tmp, MULT_EXPR, BLOCK_INFO (bb)->frequency,
real_bb_freq_max);
REAL_ARITHMETIC (tmp, RDIV_EXPR, tmp, freq_max);
@ -1271,14 +1240,14 @@ estimate_bb_frequencies (loops)
static void
compute_function_frequency ()
{
int i;
basic_block bb;
if (!profile_info.count_profiles_merged
|| !flag_branch_probabilities)
return;
cfun->function_frequency = FUNCTION_FREQUENCY_UNLIKELY_EXECUTED;
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
if (maybe_hot_bb_p (bb))
{
cfun->function_frequency = FUNCTION_FREQUENCY_HOT;

View File

@ -265,7 +265,7 @@ print_rtx (in_rtx)
{
basic_block bb = NOTE_BASIC_BLOCK (in_rtx);
if (bb != 0)
fprintf (outfile, " [bb %d]", bb->index);
fprintf (outfile, " [bb %d]", bb->sindex);
break;
}

View File

@ -73,11 +73,11 @@ struct bb_info
/* Keep all basic block indexes nonnegative in the gcov output. Index 0
is used for entry block, last block exit block. */
#define GCOV_INDEX_TO_BB(i) ((i) == 0 ? ENTRY_BLOCK_PTR \
: (((i) == n_basic_blocks + 1) \
: (((i) == last_basic_block + 1) \
? EXIT_BLOCK_PTR : BASIC_BLOCK ((i)-1)))
#define BB_TO_GCOV_INDEX(bb) ((bb) == ENTRY_BLOCK_PTR ? 0 \
: ((bb) == EXIT_BLOCK_PTR \
? n_basic_blocks + 1 : (bb)->index + 1))
? last_basic_block + 1 : (bb)->sindex + 1))
/* Instantiate the profile info structure. */
@ -137,14 +137,13 @@ static void
instrument_edges (el)
struct edge_list *el;
{
int i;
int num_instr_edges = 0;
int num_edges = NUM_EDGES (el);
basic_block bb;
remove_fake_edges ();
for (i = 0; i < n_basic_blocks + 2; i++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
{
basic_block bb = GCOV_INDEX_TO_BB (i);
edge e = bb->succ;
while (e)
{
@ -155,7 +154,7 @@ instrument_edges (el)
abort ();
if (rtl_dump_file)
fprintf (rtl_dump_file, "Edge %d to %d instrumented%s\n",
e->src->index, e->dest->index,
e->src->sindex, e->dest->sindex,
EDGE_CRITICAL_P (e) ? " (and split)" : "");
need_func_profiler = 1;
insert_insn_on_edge (
@ -216,8 +215,8 @@ static gcov_type *
get_exec_counts ()
{
int num_edges = 0;
int i;
int okay = 1;
basic_block bb;
int okay = 1, j;
int mismatch = 0;
gcov_type *profile;
char *function_name_buffer;
@ -233,15 +232,12 @@ get_exec_counts ()
/* Count the edges to be (possibly) instrumented. */
for (i = 0; i < n_basic_blocks + 2; i++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
{
basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
for (e = bb->succ; e; e = e->succ_next)
if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
{
num_edges++;
}
num_edges++;
}
/* now read and combine all matching profiles. */
@ -251,8 +247,8 @@ get_exec_counts ()
function_name_buffer_len = strlen (current_function_name) + 1;
function_name_buffer = xmalloc (function_name_buffer_len + 1);
for (i = 0; i < num_edges; i++)
profile[i] = 0;
for (j = 0; j < num_edges; j++)
profile[j] = 0;
while (1)
{
@ -376,8 +372,8 @@ get_exec_counts ()
static void
compute_branch_probabilities ()
{
int i;
int num_edges = 0;
basic_block bb;
int num_edges = 0, i;
int changes;
int passes;
int hist_br_prob[20];
@ -389,9 +385,8 @@ compute_branch_probabilities ()
/* Attach extra info block to each bb. */
alloc_aux_for_blocks (sizeof (struct bb_info));
for (i = 0; i < n_basic_blocks + 2; i++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
{
basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
for (e = bb->succ; e; e = e->succ_next)
@ -412,9 +407,8 @@ compute_branch_probabilities ()
/* The first count in the .da file is the number of times that the function
was entered. This is the exec_count for block zero. */
for (i = 0; i < n_basic_blocks + 2; i++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
{
basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
for (e = bb->succ; e; e = e->succ_next)
if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
@ -433,7 +427,7 @@ compute_branch_probabilities ()
if (rtl_dump_file)
{
fprintf (rtl_dump_file, "\nRead edge from %i to %i, count:",
bb->index, e->dest->index);
bb->sindex, e->dest->sindex);
fprintf (rtl_dump_file, HOST_WIDEST_INT_PRINT_DEC,
(HOST_WIDEST_INT) e->count);
}
@ -466,9 +460,8 @@ compute_branch_probabilities ()
{
passes++;
changes = 0;
for (i = n_basic_blocks + 1; i >= 0; i--)
FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR, NULL, prev_bb)
{
basic_block bb = GCOV_INDEX_TO_BB (i);
struct bb_info *bi = BB_INFO (bb);
if (! bi->count_valid)
{
@ -563,9 +556,8 @@ compute_branch_probabilities ()
/* If the graph has been correctly solved, every block will have a
succ and pred count of zero. */
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
if (BB_INFO (bb)->succ_count || BB_INFO (bb)->pred_count)
abort ();
}
@ -578,9 +570,8 @@ compute_branch_probabilities ()
num_never_executed = 0;
num_branches = 0;
for (i = 0; i <= n_basic_blocks + 1; i++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
{
basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
gcov_type total;
rtx note;
@ -594,11 +585,11 @@ compute_branch_probabilities ()
if (e->probability < 0 || e->probability > REG_BR_PROB_BASE)
{
error ("corrupted profile info: prob for %d-%d thought to be %d",
e->src->index, e->dest->index, e->probability);
e->src->sindex, e->dest->sindex, e->probability);
e->probability = REG_BR_PROB_BASE / 2;
}
}
if (bb->index >= 0
if (bb->sindex >= 0
&& any_condjump_p (bb->end)
&& bb->succ->succ_next)
{
@ -655,7 +646,7 @@ compute_branch_probabilities ()
for (e = bb->succ; e; e = e->succ_next)
e->probability = REG_BR_PROB_BASE / total;
}
if (bb->index >= 0
if (bb->sindex >= 0
&& any_condjump_p (bb->end)
&& bb->succ->succ_next)
num_branches++, num_never_executed;
@ -696,12 +687,10 @@ static long
compute_checksum ()
{
long chsum = 0;
int i;
basic_block bb;
for (i = 0; i < n_basic_blocks ; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
edge e;
for (e = bb->succ; e; e = e->succ_next)
@ -734,6 +723,7 @@ compute_checksum ()
void
branch_prob ()
{
basic_block bb;
int i;
int num_edges, ignored_edges;
struct edge_list *el;
@ -762,11 +752,10 @@ branch_prob ()
We also add fake exit edges for each call and asm statement in the
basic, since it may not return. */
for (i = 0; i < n_basic_blocks ; i++)
FOR_ALL_BB (bb)
{
int need_exit_edge = 0, need_entry_edge = 0;
int have_exit_edge = 0, have_entry_edge = 0;
basic_block bb = BASIC_BLOCK (i);
rtx insn;
edge e;
@ -791,7 +780,7 @@ branch_prob ()
{
/* We should not get abort here, as call to setjmp should not
be the very first instruction of function. */
if (!i)
if (bb == ENTRY_BLOCK_PTR)
abort ();
make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FAKE);
}
@ -819,14 +808,14 @@ branch_prob ()
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Adding fake exit edge to bb %i\n",
bb->index);
bb->sindex);
make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
}
if (need_entry_edge && !have_entry_edge)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Adding fake entry edge to bb %i\n",
bb->index);
bb->sindex);
make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FAKE);
}
}
@ -858,10 +847,10 @@ branch_prob ()
GCOV utility. */
if (flag_test_coverage)
{
int i = 0;
for (i = 0 ; i < n_basic_blocks; i++)
basic_block bb;
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx insn = bb->head;
static int ignore_next_note = 0;
@ -939,9 +928,9 @@ branch_prob ()
}
}
total_num_blocks += n_basic_blocks + 2;
total_num_blocks += num_basic_blocks + 2;
if (rtl_dump_file)
fprintf (rtl_dump_file, "%d basic blocks\n", n_basic_blocks);
fprintf (rtl_dump_file, "%d basic blocks\n", num_basic_blocks);
total_num_edges += num_edges;
if (rtl_dump_file)
@ -967,12 +956,11 @@ branch_prob ()
__write_long (profile_info.current_function_cfg_checksum, bbg_file, 4);
/* The plus 2 stands for entry and exit block. */
__write_long (n_basic_blocks + 2, bbg_file, 4);
__write_long (num_basic_blocks + 2, bbg_file, 4);
__write_long (num_edges - ignored_edges + 1, bbg_file, 4);
for (i = 0; i < n_basic_blocks + 1; i++)
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
{
basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
long count = 0;
@ -1081,13 +1069,14 @@ find_spanning_tree (el)
struct edge_list *el;
{
int i;
basic_block bb;
int num_edges = NUM_EDGES (el);
/* We use aux field for standard union-find algorithm. */
EXIT_BLOCK_PTR->aux = EXIT_BLOCK_PTR;
ENTRY_BLOCK_PTR->aux = ENTRY_BLOCK_PTR;
for (i = 0; i < n_basic_blocks; i++)
BASIC_BLOCK (i)->aux = BASIC_BLOCK (i);
FOR_ALL_BB (bb)
bb->aux = bb;
/* Add fake edge exit to entry we can't instrument. */
union_groups (EXIT_BLOCK_PTR, ENTRY_BLOCK_PTR);
@ -1106,7 +1095,7 @@ find_spanning_tree (el)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Abnormal edge %d to %d put to tree\n",
e->src->index, e->dest->index);
e->src->sindex, e->dest->sindex);
EDGE_INFO (e)->on_tree = 1;
union_groups (e->src, e->dest);
}
@ -1122,7 +1111,7 @@ find_spanning_tree (el)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Critical edge %d to %d put to tree\n",
e->src->index, e->dest->index);
e->src->sindex, e->dest->sindex);
EDGE_INFO (e)->on_tree = 1;
union_groups (e->src, e->dest);
}
@ -1137,7 +1126,7 @@ find_spanning_tree (el)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Normal edge %d to %d put to tree\n",
e->src->index, e->dest->index);
e->src->sindex, e->dest->sindex);
EDGE_INFO (e)->on_tree = 1;
union_groups (e->src, e->dest);
}
@ -1145,8 +1134,8 @@ find_spanning_tree (el)
EXIT_BLOCK_PTR->aux = NULL;
ENTRY_BLOCK_PTR->aux = NULL;
for (i = 0; i < n_basic_blocks; i++)
BASIC_BLOCK (i)->aux = NULL;
FOR_ALL_BB (bb)
bb->aux = NULL;
}
/* Perform file-level initialization for branch-prob processing. */

View File

@ -2727,15 +2727,14 @@ split_all_insns (upd_life)
{
sbitmap blocks;
int changed;
int i;
basic_block bb;
blocks = sbitmap_alloc (n_basic_blocks);
blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
changed = 0;
for (i = n_basic_blocks - 1; i >= 0; --i)
FOR_ALL_BB_REVERSE (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx insn, next;
bool finish = false;
@ -2756,7 +2755,7 @@ split_all_insns (upd_life)
while (GET_CODE (last) == BARRIER)
last = PREV_INSN (last);
SET_BIT (blocks, i);
SET_BIT (blocks, bb->sindex);
changed = 1;
insn = last;
}
@ -2999,7 +2998,8 @@ peephole2_optimize (dump_file)
regset_head rs_heads[MAX_INSNS_PER_PEEP2 + 2];
rtx insn, prev;
regset live;
int i, b;
int i;
basic_block bb;
#ifdef HAVE_conditional_execution
sbitmap blocks;
bool changed;
@ -3013,16 +3013,15 @@ peephole2_optimize (dump_file)
live = INITIALIZE_REG_SET (rs_heads[i]);
#ifdef HAVE_conditional_execution
blocks = sbitmap_alloc (n_basic_blocks);
blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
changed = false;
#else
count_or_remove_death_notes (NULL, 1);
#endif
for (b = n_basic_blocks - 1; b >= 0; --b)
FOR_ALL_BB_REVERSE (bb)
{
basic_block bb = BASIC_BLOCK (b);
struct propagate_block_info *pbi;
/* Indicate that all slots except the last holds invalid data. */

View File

@ -418,8 +418,8 @@ reg_to_stack (first, file)
rtx first;
FILE *file;
{
int i;
int max_uid;
basic_block bb;
int max_uid, i;
/* Clean up previous run. */
if (stack_regs_mentioned_data)
@ -451,10 +451,9 @@ reg_to_stack (first, file)
/* Set up block info for each basic block. */
alloc_aux_for_blocks (sizeof (struct block_info_def));
for (i = n_basic_blocks - 1; i >= 0; --i)
FOR_ALL_BB_REVERSE (bb)
{
edge e;
basic_block bb = BASIC_BLOCK (i);
for (e = bb->pred; e; e=e->pred_next)
if (!(e->flags & EDGE_DFS_BACK)
&& e->src != ENTRY_BLOCK_PTR)
@ -2382,12 +2381,12 @@ print_stack (file, s)
static int
convert_regs_entry ()
{
int inserted = 0, i;
int inserted = 0;
edge e;
basic_block block;
for (i = n_basic_blocks - 1; i >= 0; --i)
FOR_ALL_BB_REVERSE (block)
{
basic_block block = BASIC_BLOCK (i);
block_info bi = BLOCK_INFO (block);
int reg;
@ -2491,7 +2490,7 @@ compensate_edge (e, file)
current_block = block;
regstack = bi->stack_out;
if (file)
fprintf (file, "Edge %d->%d: ", block->index, target->index);
fprintf (file, "Edge %d->%d: ", block->sindex, target->sindex);
if (target_stack->top == -2)
{
@ -2651,7 +2650,7 @@ convert_regs_1 (file, block)
if (EDGE_CRITICAL_P (e))
beste = e;
}
else if (e->src->index < beste->src->index)
else if (e->src->sindex < beste->src->sindex)
beste = e;
}
@ -2665,7 +2664,7 @@ convert_regs_1 (file, block)
if (file)
{
fprintf (file, "\nBasic block %d\nInput stack: ", block->index);
fprintf (file, "\nBasic block %d\nInput stack: ", block->sindex);
print_stack (file, &bi->stack_in);
}
@ -2780,7 +2779,7 @@ convert_regs_2 (file, block)
basic_block *stack, *sp;
int inserted;
stack = (basic_block *) xmalloc (sizeof (*stack) * n_basic_blocks);
stack = (basic_block *) xmalloc (sizeof (*stack) * num_basic_blocks);
sp = stack;
*sp++ = block;
@ -2815,7 +2814,8 @@ static int
convert_regs (file)
FILE *file;
{
int inserted, i;
int inserted;
basic_block b;
edge e;
/* Initialize uninitialized registers on function entry. */
@ -2835,9 +2835,8 @@ convert_regs (file)
/* ??? Process all unreachable blocks. Though there's no excuse
for keeping these even when not optimizing. */
for (i = 0; i < n_basic_blocks; ++i)
FOR_ALL_BB (b)
{
basic_block b = BASIC_BLOCK (i);
block_info bi = BLOCK_INFO (b);
if (! bi->done)

View File

@ -1127,10 +1127,10 @@ scan_one_insn (insn, pass)
INSN could not be at the beginning of that block. */
if (previnsn == 0 || GET_CODE (previnsn) == JUMP_INSN)
{
int b;
for (b = 0; b < n_basic_blocks; b++)
if (insn == BLOCK_HEAD (b))
BLOCK_HEAD (b) = newinsn;
basic_block b;
FOR_ALL_BB (b)
if (insn == b->head)
b->head = newinsn;
}
/* This makes one more setting of new insns's dest. */
@ -1255,7 +1255,7 @@ regclass (f, nregs, dump)
for (pass = 0; pass <= flag_expensive_optimizations; pass++)
{
int index;
basic_block bb;
if (dump)
fprintf (dump, "\n\nPass %i\n\n",pass);
@ -1277,9 +1277,8 @@ regclass (f, nregs, dump)
insn = scan_one_insn (insn, pass);
}
else
for (index = 0; index < n_basic_blocks; index++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (index);
/* Show that an insn inside a loop is likely to be executed three
times more than insns outside a loop. This is much more

View File

@ -223,7 +223,7 @@ mark_flags_life_zones (flags)
{
int flags_regno;
int flags_nregs;
int block;
basic_block block;
#ifdef HAVE_cc0
/* If we found a flags register on a cc0 host, bail. */
@ -254,13 +254,13 @@ mark_flags_life_zones (flags)
flags_set_1_rtx = flags;
/* Process each basic block. */
for (block = n_basic_blocks - 1; block >= 0; block--)
FOR_ALL_BB_REVERSE (block)
{
rtx insn, end;
int live;
insn = BLOCK_HEAD (block);
end = BLOCK_END (block);
insn = block->head;
end = block->end;
/* Look out for the (unlikely) case of flags being live across
basic block boundaries. */
@ -269,7 +269,7 @@ mark_flags_life_zones (flags)
{
int i;
for (i = 0; i < flags_nregs; ++i)
live |= REGNO_REG_SET_P (BASIC_BLOCK (block)->global_live_at_start,
live |= REGNO_REG_SET_P (block->global_live_at_start,
flags_regno + i);
}
#endif
@ -1061,6 +1061,7 @@ regmove_optimize (f, nregs, regmove_dump_file)
int pass;
int i;
rtx copy_src, copy_dst;
basic_block bb;
/* ??? Hack. Regmove doesn't examine the CFG, and gets mightily
confused by non-call exceptions ending blocks. */
@ -1076,8 +1077,8 @@ regmove_optimize (f, nregs, regmove_dump_file)
regmove_bb_head = (int *) xmalloc (sizeof (int) * (old_max_uid + 1));
for (i = old_max_uid; i >= 0; i--) regmove_bb_head[i] = -1;
for (i = 0; i < n_basic_blocks; i++)
regmove_bb_head[INSN_UID (BLOCK_HEAD (i))] = i;
FOR_ALL_BB (bb)
regmove_bb_head[INSN_UID (bb->head)] = bb->sindex;
/* A forward/backward pass. Replace output operands with input operands. */
@ -1504,15 +1505,15 @@ regmove_optimize (f, nregs, regmove_dump_file)
/* In fixup_match_1, some insns may have been inserted after basic block
ends. Fix that here. */
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
rtx end = BLOCK_END (i);
rtx end = bb->end;
rtx new = end;
rtx next = NEXT_INSN (new);
while (next != 0 && INSN_UID (next) >= old_max_uid
&& (i == n_basic_blocks - 1 || BLOCK_HEAD (i + 1) != next))
&& (bb->next_bb == EXIT_BLOCK_PTR || bb->next_bb->head != next))
new = next, next = NEXT_INSN (new);
BLOCK_END (i) = new;
bb->end = new;
}
done:
@ -2138,10 +2139,10 @@ static int record_stack_memrefs PARAMS ((rtx *, void *));
void
combine_stack_adjustments ()
{
int i;
basic_block bb;
for (i = 0; i < n_basic_blocks; ++i)
combine_stack_adjustments_for_block (BASIC_BLOCK (i));
FOR_ALL_BB (bb)
combine_stack_adjustments_for_block (bb);
}
/* Recognize a MEM of the form (sp) or (plus sp const). */

View File

@ -201,7 +201,7 @@ regrename_optimize ()
{
int tick[FIRST_PSEUDO_REGISTER];
int this_tick = 0;
int b;
basic_block bb;
char *first_obj;
memset (tick, 0, sizeof tick);
@ -209,9 +209,8 @@ regrename_optimize ()
gcc_obstack_init (&rename_obstack);
first_obj = (char *) obstack_alloc (&rename_obstack, 0);
for (b = 0; b < n_basic_blocks; b++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (b);
struct du_chain *all_chains = 0;
HARD_REG_SET unavailable;
HARD_REG_SET regs_seen;
@ -219,7 +218,7 @@ regrename_optimize ()
CLEAR_HARD_REG_SET (unavailable);
if (rtl_dump_file)
fprintf (rtl_dump_file, "\nBasic block %d:\n", b);
fprintf (rtl_dump_file, "\nBasic block %d:\n", bb->sindex);
all_chains = build_def_use (bb);
@ -1726,30 +1725,30 @@ copyprop_hardreg_forward ()
{
struct value_data *all_vd;
bool need_refresh;
int b;
basic_block bb, bbp;
need_refresh = false;
all_vd = xmalloc (sizeof (struct value_data) * n_basic_blocks);
all_vd = xmalloc (sizeof (struct value_data) * last_basic_block);
for (b = 0; b < n_basic_blocks; b++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (b);
/* If a block has a single predecessor, that we've already
processed, begin with the value data that was live at
the end of the predecessor block. */
/* ??? Ought to use more intelligent queueing of blocks. */
if (bb->pred)
for (bbp = bb; bbp && bbp != bb->pred->src; bbp = bbp->prev_bb);
if (bb->pred
&& ! bb->pred->pred_next
&& ! (bb->pred->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
&& bb->pred->src->index != ENTRY_BLOCK
&& bb->pred->src->index < b)
all_vd[b] = all_vd[bb->pred->src->index];
&& bb->pred->src != ENTRY_BLOCK_PTR
&& bbp)
all_vd[bb->sindex] = all_vd[bb->pred->src->sindex];
else
init_value_data (all_vd + b);
init_value_data (all_vd + bb->sindex);
if (copyprop_hardreg_forward_1 (bb, all_vd + b))
if (copyprop_hardreg_forward_1 (bb, all_vd + bb->sindex))
need_refresh = true;
}

View File

@ -676,6 +676,7 @@ reload (first, global)
int i;
rtx insn;
struct elim_table *ep;
basic_block bb;
/* The two pointers used to track the true location of the memory used
for label offsets. */
@ -1123,8 +1124,8 @@ reload (first, global)
pseudo. */
if (! frame_pointer_needed)
for (i = 0; i < n_basic_blocks; i++)
CLEAR_REGNO_REG_SET (BASIC_BLOCK (i)->global_live_at_start,
FOR_ALL_BB (bb)
CLEAR_REGNO_REG_SET (bb->global_live_at_start,
HARD_FRAME_POINTER_REGNUM);
/* Come here (with failure set nonzero) if we can't get enough spill regs
@ -8612,6 +8613,7 @@ reload_combine ()
int first_index_reg = -1;
int last_index_reg = 0;
int i;
basic_block bb;
unsigned int r;
int last_label_ruid;
int min_labelno, n_labels;
@ -8647,17 +8649,17 @@ reload_combine ()
label_live = (HARD_REG_SET *) xmalloc (n_labels * sizeof (HARD_REG_SET));
CLEAR_HARD_REG_SET (ever_live_at_start);
for (i = n_basic_blocks - 1; i >= 0; i--)
FOR_ALL_BB_REVERSE (bb)
{
insn = BLOCK_HEAD (i);
insn = bb->head;
if (GET_CODE (insn) == CODE_LABEL)
{
HARD_REG_SET live;
REG_SET_TO_HARD_REG_SET (live,
BASIC_BLOCK (i)->global_live_at_start);
bb->global_live_at_start);
compute_use_by_pseudos (&live,
BASIC_BLOCK (i)->global_live_at_start);
bb->global_live_at_start);
COPY_HARD_REG_SET (LABEL_LIVE (insn), live);
IOR_HARD_REG_SET (ever_live_at_start, live);
}
@ -9488,12 +9490,11 @@ copy_eh_notes (insn, x)
void
fixup_abnormal_edges ()
{
int i;
bool inserted = false;
basic_block bb;
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
edge e;
/* Look for cases we are interested in - an calls or instructions causing

View File

@ -3601,7 +3601,7 @@ dbr_schedule (first, file)
/* If the current function has no insns other than the prologue and
epilogue, then do not try to fill any delay slots. */
if (n_basic_blocks == 0)
if (num_basic_blocks == 0)
return;
/* Find the highest INSN_UID and allocate and initialize our map from

View File

@ -133,7 +133,7 @@ find_basic_block (insn, search_limit)
rtx insn;
int search_limit;
{
int i;
basic_block bb;
/* Scan backwards to the previous BARRIER. Then see if we can find a
label that starts a basic block. Return the basic block number. */
@ -156,9 +156,9 @@ find_basic_block (insn, search_limit)
insn && GET_CODE (insn) == CODE_LABEL;
insn = next_nonnote_insn (insn))
{
for (i = 0; i < n_basic_blocks; i++)
if (insn == BLOCK_HEAD (i))
return i;
FOR_ALL_BB (bb)
if (insn == bb->head)
return bb->sindex;
}
return -1;
@ -1240,7 +1240,7 @@ init_resource_info (epilogue_insn)
/* Allocate and initialize the tables used by mark_target_live_regs. */
target_hash_table = (struct target_info **)
xcalloc (TARGET_HASH_PRIME, sizeof (struct target_info *));
bb_ticks = (int *) xcalloc (n_basic_blocks, sizeof (int));
bb_ticks = (int *) xcalloc (last_basic_block, sizeof (int));
}
/* Free up the resources allcated to mark_target_live_regs (). This

View File

@ -446,7 +446,7 @@ sbitmap_intersection_of_succs (dst, src, bb)
if (e->dest == EXIT_BLOCK_PTR)
continue;
sbitmap_copy (dst, src[e->dest->index]);
sbitmap_copy (dst, src[e->dest->sindex]);
break;
}
@ -461,7 +461,7 @@ sbitmap_intersection_of_succs (dst, src, bb)
if (e->dest == EXIT_BLOCK_PTR)
continue;
p = src[e->dest->index]->elms;
p = src[e->dest->sindex]->elms;
r = dst->elms;
for (i = 0; i < set_size; i++)
*r++ &= *p++;
@ -486,7 +486,7 @@ sbitmap_intersection_of_preds (dst, src, bb)
if (e->src == ENTRY_BLOCK_PTR)
continue;
sbitmap_copy (dst, src[e->src->index]);
sbitmap_copy (dst, src[e->src->sindex]);
break;
}
@ -501,7 +501,7 @@ sbitmap_intersection_of_preds (dst, src, bb)
if (e->src == ENTRY_BLOCK_PTR)
continue;
p = src[e->src->index]->elms;
p = src[e->src->sindex]->elms;
r = dst->elms;
for (i = 0; i < set_size; i++)
*r++ &= *p++;
@ -526,7 +526,7 @@ sbitmap_union_of_succs (dst, src, bb)
if (e->dest == EXIT_BLOCK_PTR)
continue;
sbitmap_copy (dst, src[e->dest->index]);
sbitmap_copy (dst, src[e->dest->sindex]);
break;
}
@ -541,7 +541,7 @@ sbitmap_union_of_succs (dst, src, bb)
if (e->dest == EXIT_BLOCK_PTR)
continue;
p = src[e->dest->index]->elms;
p = src[e->dest->sindex]->elms;
r = dst->elms;
for (i = 0; i < set_size; i++)
*r++ |= *p++;
@ -566,7 +566,7 @@ sbitmap_union_of_preds (dst, src, bb)
if (e->src== ENTRY_BLOCK_PTR)
continue;
sbitmap_copy (dst, src[e->src->index]);
sbitmap_copy (dst, src[e->src->sindex]);
break;
}
@ -580,8 +580,8 @@ sbitmap_union_of_preds (dst, src, bb)
if (e->src == ENTRY_BLOCK_PTR)
continue;
p = src[e->src->index]->elms;
p = src[e->src->sindex]->elms;
r = dst->elms;
for (i = 0; i < set_size; i++)
*r++ |= *p++;

View File

@ -1494,7 +1494,7 @@ init_dependency_caches (luid)
average number of instructions in a basic block is very high. See
the comment before the declaration of true_dependency_cache for
what we consider "very high". */
if (luid / n_basic_blocks > 100 * 5)
if (luid / num_basic_blocks > 100 * 5)
{
true_dependency_cache = sbitmap_vector_alloc (luid, luid);
sbitmap_vector_zero (true_dependency_cache, luid);

View File

@ -279,11 +279,11 @@ void
schedule_ebbs (dump_file)
FILE *dump_file;
{
int i;
basic_block bb;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
if (n_basic_blocks == 0)
if (num_basic_blocks == 0)
return;
scope_to_insns_initialize ();
@ -296,20 +296,19 @@ schedule_ebbs (dump_file)
compute_bb_for_insn (get_max_uid ());
/* Schedule every region in the subroutine. */
for (i = 0; i < n_basic_blocks; i++)
{
rtx head = BASIC_BLOCK (i)->head;
FOR_ALL_BB (bb)
{
rtx head = bb->head;
rtx tail;
for (;;)
{
basic_block b = BASIC_BLOCK (i);
edge e;
tail = b->end;
if (i + 1 == n_basic_blocks
|| GET_CODE (BLOCK_HEAD (i + 1)) == CODE_LABEL)
tail = bb->end;
if (bb->next_bb == EXIT_BLOCK_PTR
|| GET_CODE (bb->next_bb->head) == CODE_LABEL)
break;
for (e = b->succ; e; e = e->succ_next)
for (e = bb->succ; e; e = e->succ_next)
if ((e->flags & EDGE_FALLTHRU) != 0)
break;
if (! e)
@ -325,7 +324,7 @@ schedule_ebbs (dump_file)
}
}
i++;
bb = bb->next_bb;
}
/* Blah. We should fix the rest of the code not to get confused by

View File

@ -319,7 +319,7 @@ static void free_pending_lists PARAMS ((void));
static int
is_cfg_nonregular ()
{
int b;
basic_block b;
rtx insn;
RTX_CODE code;
@ -346,8 +346,8 @@ is_cfg_nonregular ()
/* If we have non-jumping insns which refer to labels, then we consider
the cfg not well structured. */
/* Check for labels referred to other thn by jumps. */
for (b = 0; b < n_basic_blocks; b++)
for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
FOR_ALL_BB (b)
for (insn = b->head;; insn = NEXT_INSN (insn))
{
code = GET_CODE (insn);
if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
@ -361,7 +361,7 @@ is_cfg_nonregular ()
return 1;
}
if (insn == BLOCK_END (b))
if (insn == b->end)
break;
}
@ -382,6 +382,7 @@ build_control_flow (edge_list)
struct edge_list *edge_list;
{
int i, unreachable, num_edges;
basic_block b;
/* This already accounts for entry/exit edges. */
num_edges = NUM_EDGES (edge_list);
@ -393,10 +394,8 @@ build_control_flow (edge_list)
test is redundant with the one in find_rgns, but it's much
cheaper to go ahead and catch the trivial case here. */
unreachable = 0;
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (b)
{
basic_block b = BASIC_BLOCK (i);
if (b->pred == NULL
|| (b->pred->src == b
&& b->pred->pred_next == NULL))
@ -404,8 +403,8 @@ build_control_flow (edge_list)
}
/* ??? We can kill these soon. */
in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
in_edges = (int *) xcalloc (last_basic_block, sizeof (int));
out_edges = (int *) xcalloc (last_basic_block, sizeof (int));
edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
nr_edges = 0;
@ -415,7 +414,7 @@ build_control_flow (edge_list)
if (e->dest != EXIT_BLOCK_PTR
&& e->src != ENTRY_BLOCK_PTR)
new_edge (e->src->index, e->dest->index);
new_edge (e->src->sindex, e->dest->sindex);
}
/* Increment by 1, since edge 0 is unused. */
@ -544,17 +543,19 @@ debug_regions ()
static void
find_single_block_region ()
{
int i;
basic_block bb;
for (i = 0; i < n_basic_blocks; i++)
nr_regions = 0;
FOR_ALL_BB (bb)
{
rgn_bb_table[i] = i;
RGN_NR_BLOCKS (i) = 1;
RGN_BLOCKS (i) = i;
CONTAINING_RGN (i) = i;
BLOCK_TO_BB (i) = 0;
rgn_bb_table[nr_regions] = bb->sindex;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = nr_regions;
CONTAINING_RGN (bb->sindex) = nr_regions;
BLOCK_TO_BB (bb->sindex) = 0;
nr_regions++;
}
nr_regions = n_basic_blocks;
}
/* Update number of blocks and the estimate for number of insns
@ -631,6 +632,7 @@ find_rgns (edge_list, dom)
int count = 0, sp, idx = 0, current_edge = out_edges[0];
int num_bbs, num_insns, unreachable;
int too_large_failure;
basic_block bb;
/* Note if an edge has been passed. */
sbitmap passed;
@ -659,26 +661,26 @@ find_rgns (edge_list, dom)
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
max_hdr = (int *) xmalloc (n_basic_blocks * sizeof (int));
dfs_nr = (int *) xcalloc (n_basic_blocks, sizeof (int));
max_hdr = (int *) xmalloc (last_basic_block * sizeof (int));
dfs_nr = (int *) xcalloc (last_basic_block, sizeof (int));
stack = (int *) xmalloc (nr_edges * sizeof (int));
inner = sbitmap_alloc (n_basic_blocks);
inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
header = sbitmap_alloc (n_basic_blocks);
header = sbitmap_alloc (last_basic_block);
sbitmap_zero (header);
passed = sbitmap_alloc (nr_edges);
sbitmap_zero (passed);
in_queue = sbitmap_alloc (n_basic_blocks);
in_queue = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_queue);
in_stack = sbitmap_alloc (n_basic_blocks);
in_stack = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_stack);
for (i = 0; i < n_basic_blocks; i++)
for (i = 0; i < last_basic_block; i++)
max_hdr[i] = -1;
/* DFS traversal to find inner loops in the cfg. */
@ -772,8 +774,8 @@ find_rgns (edge_list, dom)
the entry node by placing a nonzero value in dfs_nr. Thus if
dfs_nr is zero for any block, then it must be unreachable. */
unreachable = 0;
for (i = 0; i < n_basic_blocks; i++)
if (dfs_nr[i] == 0)
FOR_ALL_BB (bb)
if (dfs_nr[bb->sindex] == 0)
{
unreachable = 1;
break;
@ -783,14 +785,14 @@ find_rgns (edge_list, dom)
to hold degree counts. */
degree = dfs_nr;
for (i = 0; i < n_basic_blocks; i++)
degree[i] = 0;
FOR_ALL_BB (bb)
degree[bb->sindex] = 0;
for (i = 0; i < num_edges; i++)
{
edge e = INDEX_EDGE (edge_list, i);
if (e->dest != EXIT_BLOCK_PTR)
degree[e->dest->index]++;
degree[e->dest->sindex]++;
}
/* Do not perform region scheduling if there are any unreachable
@ -805,16 +807,16 @@ find_rgns (edge_list, dom)
/* Second travsersal:find reducible inner loops and topologically sort
block of each region. */
queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
queue = (int *) xmalloc (num_basic_blocks * sizeof (int));
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
if (TEST_BIT (header, i) && TEST_BIT (inner, i))
if (TEST_BIT (header, bb->sindex) && TEST_BIT (inner, bb->sindex))
{
edge e;
int j;
basic_block jbb;
/* Now check that the loop is reducible. We do this separate
from finding inner loops so that we do not find a reducible
@ -827,15 +829,15 @@ find_rgns (edge_list, dom)
If there exists a block that is not dominated by the loop
header, then the block is reachable from outside the loop
and thus the loop is not a natural loop. */
for (j = 0; j < n_basic_blocks; j++)
FOR_ALL_BB (jbb)
{
/* First identify blocks in the loop, except for the loop
entry block. */
if (i == max_hdr[j] && i != j)
if (bb->sindex == max_hdr[jbb->sindex] && bb != jbb)
{
/* Now verify that the block is dominated by the loop
header. */
if (!TEST_BIT (dom[j], i))
if (!TEST_BIT (dom[jbb->sindex], bb->sindex))
break;
}
}
@ -843,25 +845,25 @@ find_rgns (edge_list, dom)
/* If we exited the loop early, then I is the header of
a non-reducible loop and we should quit processing it
now. */
if (j != n_basic_blocks)
if (jbb != EXIT_BLOCK_PTR)
continue;
/* I is a header of an inner loop, or block 0 in a subroutine
with no loops at all. */
head = tail = -1;
too_large_failure = 0;
loop_head = max_hdr[i];
loop_head = max_hdr[bb->sindex];
/* Decrease degree of all I's successors for topological
ordering. */
for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
for (e = bb->succ; e; e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR)
--degree[e->dest->index];
--degree[e->dest->sindex];
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
num_insns = (INSN_LUID (BLOCK_END (i))
- INSN_LUID (BLOCK_HEAD (i)));
num_insns = (INSN_LUID (bb->end)
- INSN_LUID (bb->head));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
@ -869,17 +871,17 @@ find_rgns (edge_list, dom)
Place those blocks into the queue. */
if (no_loops)
{
for (j = 0; j < n_basic_blocks; j++)
FOR_ALL_BB (jbb)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
if (BASIC_BLOCK (j)->succ
&& BASIC_BLOCK (j)->succ->dest == EXIT_BLOCK_PTR
&& BASIC_BLOCK (j)->succ->succ_next == NULL)
if (jbb->succ
&& jbb->succ->dest == EXIT_BLOCK_PTR
&& jbb->succ->succ_next == NULL)
{
queue[++tail] = j;
SET_BIT (in_queue, j);
queue[++tail] = jbb->sindex;
SET_BIT (in_queue, jbb->sindex);
if (too_large (j, &num_bbs, &num_insns))
if (too_large (jbb->sindex, &num_bbs, &num_insns))
{
too_large_failure = 1;
break;
@ -890,14 +892,14 @@ find_rgns (edge_list, dom)
{
edge e;
for (e = BASIC_BLOCK (i)->pred; e; e = e->pred_next)
for (e = bb->pred; e; e = e->pred_next)
{
if (e->src == ENTRY_BLOCK_PTR)
continue;
node = e->src->index;
node = e->src->sindex;
if (max_hdr[node] == loop_head && node != i)
if (max_hdr[node] == loop_head && node != bb->sindex)
{
/* This is a loop latch. */
queue[++tail] = node;
@ -949,7 +951,7 @@ find_rgns (edge_list, dom)
for (e = BASIC_BLOCK (child)->pred; e; e = e->pred_next)
{
node = e->src->index;
node = e->src->sindex;
/* See discussion above about nodes not marked as in
this loop during the initial DFS traversal. */
@ -959,7 +961,7 @@ find_rgns (edge_list, dom)
tail = -1;
break;
}
else if (!TEST_BIT (in_queue, node) && node != i)
else if (!TEST_BIT (in_queue, node) && node != bb->sindex)
{
queue[++tail] = node;
SET_BIT (in_queue, node);
@ -976,12 +978,12 @@ find_rgns (edge_list, dom)
if (tail >= 0 && !too_large_failure)
{
/* Place the loop header into list of region blocks. */
degree[i] = -1;
rgn_bb_table[idx] = i;
degree[bb->sindex] = -1;
rgn_bb_table[idx] = bb->sindex;
RGN_NR_BLOCKS (nr_regions) = num_bbs;
RGN_BLOCKS (nr_regions) = idx++;
CONTAINING_RGN (i) = nr_regions;
BLOCK_TO_BB (i) = count = 0;
CONTAINING_RGN (bb->sindex) = nr_regions;
BLOCK_TO_BB (bb->sindex) = count = 0;
/* Remove blocks from queue[] when their in degree
becomes zero. Repeat until no blocks are left on the
@ -1006,7 +1008,7 @@ find_rgns (edge_list, dom)
e;
e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR)
--degree[e->dest->index];
--degree[e->dest->sindex];
}
else
--head;
@ -1020,14 +1022,14 @@ find_rgns (edge_list, dom)
/* Any block that did not end up in a region is placed into a region
by itself. */
for (i = 0; i < n_basic_blocks; i++)
if (degree[i] >= 0)
FOR_ALL_BB (bb)
if (degree[bb->sindex] >= 0)
{
rgn_bb_table[idx] = i;
rgn_bb_table[idx] = bb->sindex;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = idx++;
CONTAINING_RGN (i) = nr_regions++;
BLOCK_TO_BB (i) = 0;
CONTAINING_RGN (bb->sindex) = nr_regions++;
BLOCK_TO_BB (bb->sindex) = 0;
}
free (max_hdr);
@ -1195,8 +1197,8 @@ compute_trg_info (trg)
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
update_blocks = (char *) alloca (n_basic_blocks);
memset (update_blocks, 0, n_basic_blocks);
update_blocks = (char *) alloca (last_basic_block);
memset (update_blocks, 0, last_basic_block);
update_idx = 0;
for (j = 0; j < el.nr_members; j++)
@ -2886,14 +2888,14 @@ init_regions ()
int rgn;
nr_regions = 0;
rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
block_to_bb = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
containing_rgn = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
rgn_table = (region *) xmalloc ((num_basic_blocks) * sizeof (region));
rgn_bb_table = (int *) xmalloc ((num_basic_blocks) * sizeof (int));
block_to_bb = (int *) xmalloc ((last_basic_block) * sizeof (int));
containing_rgn = (int *) xmalloc ((last_basic_block) * sizeof (int));
/* Compute regions for scheduling. */
if (reload_completed
|| n_basic_blocks == 1
|| num_basic_blocks == 1
|| !flag_schedule_interblock)
{
find_single_block_region ();
@ -2910,7 +2912,7 @@ init_regions ()
sbitmap *dom;
struct edge_list *edge_list;
dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
dom = sbitmap_vector_alloc (last_basic_block, last_basic_block);
/* The scheduler runs after flow; therefore, we can't blindly call
back into find_basic_blocks since doing so could invalidate the
@ -2951,7 +2953,7 @@ init_regions ()
if (CHECK_DEAD_NOTES)
{
blocks = sbitmap_alloc (n_basic_blocks);
blocks = sbitmap_alloc (last_basic_block);
deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
@ -2983,7 +2985,7 @@ schedule_insns (dump_file)
/* Taking care of this degenerate case makes the rest of
this code simpler. */
if (n_basic_blocks == 0)
if (num_basic_blocks == 0)
return;
scope_to_insns_initialize ();
@ -3018,10 +3020,10 @@ schedule_insns (dump_file)
compute_bb_for_insn (get_max_uid ());
any_large_regions = 0;
large_region_blocks = sbitmap_alloc (n_basic_blocks);
large_region_blocks = sbitmap_alloc (last_basic_block);
sbitmap_ones (large_region_blocks);
blocks = sbitmap_alloc (n_basic_blocks);
blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
/* Update life information. For regions consisting of multiple blocks

View File

@ -583,7 +583,7 @@ optimize_sibling_and_tail_recursive_calls ()
cleanup_cfg (CLEANUP_PRE_SIBCALL | CLEANUP_PRE_LOOP);
/* If there are no basic blocks, then there is nothing to do. */
if (n_basic_blocks == 0)
if (num_basic_blocks == 0)
return;
/* If we are using sjlj exceptions, we may need to add a call to
@ -610,7 +610,7 @@ optimize_sibling_and_tail_recursive_calls ()
/* Walk forwards through the last normal block and see if it
does nothing except fall into the exit block. */
for (insn = BLOCK_HEAD (n_basic_blocks - 1);
for (insn = EXIT_BLOCK_PTR->prev_bb->head;
insn;
insn = NEXT_INSN (insn))
{

View File

@ -648,13 +648,13 @@ examine_flow_edges ()
/* If this is the first time we've simulated this block, then we
must simulate each of its insns. */
if (!TEST_BIT (executable_blocks, succ_block->index))
if (!TEST_BIT (executable_blocks, succ_block->sindex))
{
rtx currinsn;
edge succ_edge = succ_block->succ;
/* Note that we have simulated this block. */
SET_BIT (executable_blocks, succ_block->index);
SET_BIT (executable_blocks, succ_block->sindex);
/* Simulate each insn within the block. */
currinsn = succ_block->head;
@ -740,6 +740,7 @@ optimize_unexecutable_edges (edges, executable_edges)
sbitmap executable_edges;
{
int i;
basic_block bb;
for (i = 0; i < NUM_EDGES (edges); i++)
{
@ -761,15 +762,15 @@ optimize_unexecutable_edges (edges, executable_edges)
remove_phi_alternative (PATTERN (insn), edge->src);
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Removing alternative for bb %d of phi %d\n",
edge->src->index, SSA_NAME (PATTERN (insn)));
"Removing alternative for bb %d of phi %d\n",
edge->src->sindex, SSA_NAME (PATTERN (insn)));
insn = NEXT_INSN (insn);
}
}
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Removing unexecutable edge from %d to %d\n",
edge->src->index, edge->dest->index);
edge->src->sindex, edge->dest->sindex);
/* Since the edge was not executable, remove it from the CFG. */
remove_edge (edge);
}
@ -797,9 +798,8 @@ optimize_unexecutable_edges (edges, executable_edges)
In cases B & C we are removing uses of registers, so make sure
to note those changes for the DF analyzer. */
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
rtx insn = bb->end;
edge edge = bb->succ;
@ -929,7 +929,7 @@ ssa_ccp_substitute_constants ()
static void
ssa_ccp_df_delete_unreachable_insns ()
{
int i;
basic_block b;
/* Use the CFG to find all the reachable blocks. */
find_unreachable_blocks ();
@ -937,10 +937,8 @@ ssa_ccp_df_delete_unreachable_insns ()
/* Now we know what blocks are not reachable. Mark all the insns
in those blocks as deleted for the DF analyzer. We'll let the
normal flow code actually remove the unreachable blocks. */
for (i = n_basic_blocks - 1; i >= 0; --i)
FOR_ALL_BB_REVERSE (b)
{
basic_block b = BASIC_BLOCK (i);
if (!(b->flags & BB_REACHABLE))
{
rtx start = b->head;
@ -1018,7 +1016,7 @@ ssa_const_prop ()
ssa_edges = sbitmap_alloc (VARRAY_SIZE (ssa_definition));
sbitmap_zero (ssa_edges);
executable_blocks = sbitmap_alloc (n_basic_blocks);
executable_blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (executable_blocks);
executable_edges = sbitmap_alloc (NUM_EDGES (edges));

View File

@ -153,7 +153,7 @@ static void delete_insn_bb
/* Create a control_dependent_block_to_edge_map, given the number
NUM_BASIC_BLOCKS of non-entry, non-exit basic blocks, e.g.,
n_basic_blocks. This memory must be released using
num_basic_blocks. This memory must be released using
control_dependent_block_to_edge_map_free (). */
static control_dependent_block_to_edge_map
@ -181,10 +181,10 @@ set_control_dependent_block_to_edge_map_bit (c, bb, edge_index)
basic_block bb;
int edge_index;
{
if (bb->index - (INVALID_BLOCK+1) >= c->length)
if (bb->sindex - (INVALID_BLOCK+1) >= c->length)
abort ();
bitmap_set_bit (c->data[bb->index - (INVALID_BLOCK+1)],
bitmap_set_bit (c->data[bb->sindex - (INVALID_BLOCK+1)],
edge_index);
}
@ -247,7 +247,7 @@ find_control_dependence (el, edge_index, pdom, cdbte)
abort ();
ending_block =
(INDEX_EDGE_PRED_BB (el, edge_index) == ENTRY_BLOCK_PTR)
? BASIC_BLOCK (0)
? ENTRY_BLOCK_PTR->next_bb
: find_pdom (pdom, INDEX_EDGE_PRED_BB (el, edge_index));
for (current_block = INDEX_EDGE_SUCC_BB (el, edge_index);
@ -271,15 +271,15 @@ find_pdom (pdom, block)
{
if (!block)
abort ();
if (block->index == INVALID_BLOCK)
if (block->sindex == INVALID_BLOCK)
abort ();
if (block == ENTRY_BLOCK_PTR)
return BASIC_BLOCK (0);
else if (block == EXIT_BLOCK_PTR || pdom[block->index] == EXIT_BLOCK)
return ENTRY_BLOCK_PTR->next_bb;
else if (block == EXIT_BLOCK_PTR || pdom[block->sindex] == EXIT_BLOCK)
return EXIT_BLOCK_PTR;
else
return BASIC_BLOCK (pdom[block->index]);
return BASIC_BLOCK (pdom[block->sindex]);
}
/* Determine if the given CURRENT_RTX uses a hard register not
@ -490,6 +490,7 @@ ssa_eliminate_dead_code ()
{
int i;
rtx insn;
basic_block bb;
/* Necessary instructions with operands to explore. */
varray_type unprocessed_instructions;
/* Map element (b,e) is nonzero if the block is control dependent on
@ -505,7 +506,7 @@ ssa_eliminate_dead_code ()
mark_all_insn_unnecessary ();
VARRAY_RTX_INIT (unprocessed_instructions, 64,
"unprocessed instructions");
cdbte = control_dependent_block_to_edge_map_create (n_basic_blocks);
cdbte = control_dependent_block_to_edge_map_create (last_basic_block);
/* Prepare for use of BLOCK_NUM (). */
connect_infinite_loops_to_exit ();
@ -513,12 +514,12 @@ ssa_eliminate_dead_code ()
compute_bb_for_insn (max_insn_uid);
/* Compute control dependence. */
pdom = (int *) xmalloc (n_basic_blocks * sizeof (int));
for (i = 0; i < n_basic_blocks; ++i)
pdom = (int *) xmalloc (last_basic_block * sizeof (int));
for (i = 0; i < last_basic_block; ++i)
pdom[i] = INVALID_BLOCK;
calculate_dominance_info (pdom, NULL, CDI_POST_DOMINATORS);
/* Assume there is a path from each node to the exit block. */
for (i = 0; i < n_basic_blocks; ++i)
for (i = 0; i < last_basic_block; ++i)
if (pdom[i] == INVALID_BLOCK)
pdom[i] = EXIT_BLOCK;
el = create_edge_list ();
@ -718,10 +719,8 @@ ssa_eliminate_dead_code ()
/* Find any blocks with no successors and ensure they are followed
by a BARRIER. delete_insn has the nasty habit of deleting barriers
when deleting insns. */
for (i = 0; i < n_basic_blocks; i++)
FOR_ALL_BB (bb)
{
basic_block bb = BASIC_BLOCK (i);
if (bb->succ == NULL)
{
rtx next = NEXT_INSN (bb->end);

120
gcc/ssa.c
View File

@ -430,7 +430,7 @@ remove_phi_alternative (set, block)
int num_elem = GET_NUM_ELEM (phi_vec);
int v, c;
c = block->index;
c = block->sindex;
for (v = num_elem - 2; v >= 0; v -= 2)
if (INTVAL (RTVEC_ELT (phi_vec, v + 1)) == c)
{
@ -470,18 +470,18 @@ find_evaluations (evals, nregs)
sbitmap *evals;
int nregs;
{
int bb;
basic_block bb;
sbitmap_vector_zero (evals, nregs);
fe_evals = evals;
for (bb = n_basic_blocks; --bb >= 0; )
FOR_ALL_BB_REVERSE (bb)
{
rtx p, last;
fe_current_bb = bb;
p = BLOCK_HEAD (bb);
last = BLOCK_END (bb);
fe_current_bb = bb->sindex;
p = bb->head;
last = bb->end;
while (1)
{
if (INSN_P (p))
@ -520,7 +520,7 @@ compute_dominance_frontiers_1 (frontiers, idom, bb, done)
{
basic_block b = BASIC_BLOCK (bb);
edge e;
int c;
basic_block c;
SET_BIT (done, bb);
sbitmap_zero (frontiers[bb]);
@ -528,25 +528,25 @@ compute_dominance_frontiers_1 (frontiers, idom, bb, done)
/* Do the frontier of the children first. Not all children in the
dominator tree (blocks dominated by this one) are children in the
CFG, so check all blocks. */
for (c = 0; c < n_basic_blocks; ++c)
if (idom[c] == bb && ! TEST_BIT (done, c))
compute_dominance_frontiers_1 (frontiers, idom, c, done);
FOR_ALL_BB (c)
if (idom[c->sindex] == bb && ! TEST_BIT (done, c->sindex))
compute_dominance_frontiers_1 (frontiers, idom, c->sindex, done);
/* Find blocks conforming to rule (1) above. */
for (e = b->succ; e; e = e->succ_next)
{
if (e->dest == EXIT_BLOCK_PTR)
continue;
if (idom[e->dest->index] != bb)
SET_BIT (frontiers[bb], e->dest->index);
if (idom[e->dest->sindex] != bb)
SET_BIT (frontiers[bb], e->dest->sindex);
}
/* Find blocks conforming to rule (2). */
for (c = 0; c < n_basic_blocks; ++c)
if (idom[c] == bb)
FOR_ALL_BB (c)
if (idom[c->sindex] == bb)
{
int x;
EXECUTE_IF_SET_IN_SBITMAP (frontiers[c], 0, x,
EXECUTE_IF_SET_IN_SBITMAP (frontiers[c->sindex], 0, x,
{
if (idom[x] != bb)
SET_BIT (frontiers[bb], x);
@ -559,7 +559,7 @@ compute_dominance_frontiers (frontiers, idom)
sbitmap *frontiers;
int *idom;
{
sbitmap done = sbitmap_alloc (n_basic_blocks);
sbitmap done = sbitmap_alloc (last_basic_block);
sbitmap_zero (done);
compute_dominance_frontiers_1 (frontiers, idom, 0, done);
@ -585,7 +585,7 @@ compute_iterated_dominance_frontiers (idfs, frontiers, evals, nregs)
sbitmap worklist;
int reg, passes = 0;
worklist = sbitmap_alloc (n_basic_blocks);
worklist = sbitmap_alloc (last_basic_block);
for (reg = 0; reg < nregs; ++reg)
{
@ -665,7 +665,7 @@ insert_phi_node (regno, bb)
if (e->src != ENTRY_BLOCK_PTR)
{
RTVEC_ELT (vec, i + 0) = pc_rtx;
RTVEC_ELT (vec, i + 1) = GEN_INT (e->src->index);
RTVEC_ELT (vec, i + 1) = GEN_INT (e->src->sindex);
}
phi = gen_rtx_PHI (VOIDmode, vec);
@ -975,7 +975,7 @@ rename_block (bb, idom)
edge e;
rtx insn, next, last;
struct rename_set_data *set_data = NULL;
int c;
basic_block c;
/* Step One: Walk the basic block, adding new names for sets and
replacing uses. */
@ -1078,9 +1078,9 @@ rename_block (bb, idom)
/* Step Three: Do the same to the children of this block in
dominator order. */
for (c = 0; c < n_basic_blocks; ++c)
if (idom[c] == bb)
rename_block (c, idom);
FOR_ALL_BB (c)
if (idom[c->sindex] == bb)
rename_block (c->sindex, idom);
/* Step Four: Update the sets to refer to their new register,
and restore ssa_rename_to to its previous state. */
@ -1140,6 +1140,8 @@ convert_to_ssa ()
int nregs;
basic_block bb;
/* Don't do it twice. */
if (in_ssa_form)
abort ();
@ -1148,28 +1150,27 @@ convert_to_ssa ()
dead code. We'll let the SSA optimizers do that. */
life_analysis (get_insns (), NULL, 0);
idom = (int *) alloca (n_basic_blocks * sizeof (int));
memset ((void *) idom, -1, (size_t) n_basic_blocks * sizeof (int));
idom = (int *) alloca (last_basic_block * sizeof (int));
memset ((void *) idom, -1, (size_t) last_basic_block * sizeof (int));
calculate_dominance_info (idom, NULL, CDI_DOMINATORS);
if (rtl_dump_file)
{
int i;
fputs (";; Immediate Dominators:\n", rtl_dump_file);
for (i = 0; i < n_basic_blocks; ++i)
fprintf (rtl_dump_file, ";\t%3d = %3d\n", i, idom[i]);
FOR_ALL_BB (bb)
fprintf (rtl_dump_file, ";\t%3d = %3d\n", bb->sindex, idom[bb->sindex]);
fflush (rtl_dump_file);
}
/* Compute dominance frontiers. */
dfs = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
dfs = sbitmap_vector_alloc (last_basic_block, last_basic_block);
compute_dominance_frontiers (dfs, idom);
if (rtl_dump_file)
{
dump_sbitmap_vector (rtl_dump_file, ";; Dominance Frontiers:",
"; Basic Block", dfs, n_basic_blocks);
"; Basic Block", dfs, last_basic_block);
fflush (rtl_dump_file);
}
@ -1177,12 +1178,12 @@ convert_to_ssa ()
ssa_max_reg_num = max_reg_num ();
nregs = ssa_max_reg_num;
evals = sbitmap_vector_alloc (nregs, n_basic_blocks);
evals = sbitmap_vector_alloc (nregs, last_basic_block);
find_evaluations (evals, nregs);
/* Compute the iterated dominance frontier for each register. */
idfs = sbitmap_vector_alloc (nregs, n_basic_blocks);
idfs = sbitmap_vector_alloc (nregs, last_basic_block);
compute_iterated_dominance_frontiers (idfs, dfs, evals, nregs);
if (rtl_dump_file)
@ -1383,7 +1384,7 @@ eliminate_phi (e, reg_partition)
n_nodes = 0;
for (; PHI_NODE_P (insn); insn = next_nonnote_insn (insn))
{
rtx* preg = phi_alternative (PATTERN (insn), e->src->index);
rtx* preg = phi_alternative (PATTERN (insn), e->src->sindex);
rtx tgt = SET_DEST (PATTERN (insn));
rtx reg;
@ -1445,7 +1446,7 @@ eliminate_phi (e, reg_partition)
insert_insn_on_edge (insn, e);
if (rtl_dump_file)
fprintf (rtl_dump_file, "Emitting copy on edge (%d,%d)\n",
e->src->index, e->dest->index);
e->src->sindex, e->dest->sindex);
sbitmap_free (visited);
out:
@ -1500,7 +1501,7 @@ make_regs_equivalent_over_bad_edges (bb, reg_partition)
for (e = b->pred; e; e = e->pred_next)
if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
{
rtx *alt = phi_alternative (set, e->src->index);
rtx *alt = phi_alternative (set, e->src->sindex);
int alt_regno;
/* If there is no alternative corresponding to this edge,
@ -1581,7 +1582,7 @@ make_equivalent_phi_alternatives_equivalent (bb, reg_partition)
/* Scan over edges. */
for (e = b->pred; e; e = e->pred_next)
{
int pred_block = e->src->index;
int pred_block = e->src->sindex;
/* Identify the phi alternatives from both phi
nodes corresponding to this edge. */
rtx *alt = phi_alternative (set, pred_block);
@ -1629,7 +1630,7 @@ make_equivalent_phi_alternatives_equivalent (bb, reg_partition)
static partition
compute_conservative_reg_partition ()
{
int bb;
basic_block bb;
int changed = 0;
/* We don't actually work with hard registers, but it's easier to
@ -1642,17 +1643,17 @@ compute_conservative_reg_partition ()
be copied on abnormal critical edges are placed in the same
partition. This saves us from having to split abnormal critical
edges. */
for (bb = n_basic_blocks; --bb >= 0; )
changed += make_regs_equivalent_over_bad_edges (bb, p);
FOR_ALL_BB_REVERSE (bb)
changed += make_regs_equivalent_over_bad_edges (bb->sindex, p);
/* Now we have to insure that corresponding arguments of phi nodes
assigning to corresponding regs are equivalent. Iterate until
nothing changes. */
while (changed > 0)
{
changed = 0;
for (bb = n_basic_blocks; --bb >= 0; )
changed += make_equivalent_phi_alternatives_equivalent (bb, p);
FOR_ALL_BB_REVERSE (bb)
changed += make_equivalent_phi_alternatives_equivalent (bb->sindex, p);
}
return p;
@ -1848,7 +1849,7 @@ coalesce_regs_in_successor_phi_nodes (bb, p, conflicts)
static partition
compute_coalesced_reg_partition ()
{
int bb;
basic_block bb;
int changed = 0;
regset_head phi_set_head;
regset phi_set = &phi_set_head;
@ -1860,8 +1861,8 @@ compute_coalesced_reg_partition ()
be copied on abnormal critical edges are placed in the same
partition. This saves us from having to split abnormal critical
edges (which can't be done). */
for (bb = n_basic_blocks; --bb >= 0; )
make_regs_equivalent_over_bad_edges (bb, p);
FOR_ALL_BB_REVERSE (bb)
make_regs_equivalent_over_bad_edges (bb->sindex, p);
INIT_REG_SET (phi_set);
@ -1883,12 +1884,11 @@ compute_coalesced_reg_partition ()
blocks first, so that most frequently executed copies would
be more likely to be removed by register coalescing. But any
order will generate correct, if non-optimal, results. */
for (bb = n_basic_blocks; --bb >= 0; )
FOR_ALL_BB_REVERSE (bb)
{
basic_block block = BASIC_BLOCK (bb);
changed += coalesce_regs_in_copies (block, p, conflicts);
changed +=
coalesce_regs_in_successor_phi_nodes (block, p, conflicts);
changed += coalesce_regs_in_copies (bb, p, conflicts);
changed +=
coalesce_regs_in_successor_phi_nodes (bb, p, conflicts);
}
conflict_graph_delete (conflicts);
@ -2094,11 +2094,10 @@ static void
rename_equivalent_regs (reg_partition)
partition reg_partition;
{
int bb;
basic_block b;
for (bb = n_basic_blocks; --bb >= 0; )
FOR_ALL_BB_REVERSE (b)
{
basic_block b = BASIC_BLOCK (bb);
rtx next = b->head;
rtx last = b->end;
rtx insn;
@ -2141,7 +2140,7 @@ rename_equivalent_regs (reg_partition)
void
convert_from_ssa ()
{
int bb;
basic_block b, bb;
partition reg_partition;
rtx insns = get_insns ();
@ -2167,9 +2166,8 @@ convert_from_ssa ()
rename_equivalent_regs (reg_partition);
/* Eliminate the PHI nodes. */
for (bb = n_basic_blocks; --bb >= 0; )
FOR_ALL_BB_REVERSE (b)
{
basic_block b = BASIC_BLOCK (bb);
edge e;
for (e = b->pred; e; e = e->pred_next)
@ -2180,17 +2178,17 @@ convert_from_ssa ()
partition_delete (reg_partition);
/* Actually delete the PHI nodes. */
for (bb = n_basic_blocks; --bb >= 0; )
FOR_ALL_BB_REVERSE (bb)
{
rtx insn = BLOCK_HEAD (bb);
rtx insn = bb->head;
while (1)
{
/* If this is a PHI node delete it. */
if (PHI_NODE_P (insn))
{
if (insn == BLOCK_END (bb))
BLOCK_END (bb) = PREV_INSN (insn);
if (insn == bb->end)
bb->end = PREV_INSN (insn);
insn = delete_insn (insn);
}
/* Since all the phi nodes come at the beginning of the
@ -2199,7 +2197,7 @@ convert_from_ssa ()
else if (INSN_P (insn))
break;
/* If we've reached the end of the block, stop. */
else if (insn == BLOCK_END (bb))
else if (insn == bb->end)
break;
else
insn = NEXT_INSN (insn);
@ -2259,7 +2257,7 @@ for_each_successor_phi (bb, fn, data)
{
int result;
rtx phi_set = PATTERN (insn);
rtx *alternative = phi_alternative (phi_set, bb->index);
rtx *alternative = phi_alternative (phi_set, bb->sindex);
rtx phi_src;
/* This phi function may not have an alternative