basic-block.h (create_basic_block, [...]): Kill.

* basic-block.h (create_basic_block, merge_blocks_nomove): Kill.
	* cfgcleanup.c (merge_blocks): Rename to merge_blocks_move.
	(merge_blocks_move_predecessor_nojumps,
	 merge_blocks_move_successor_nojumps): Use merge_blocks.
	(try_optimize_cfg): Use merge_blocks_move.
	* cfgrtl.c (create_basic_block): Rename to rtl_create_basic_block.
	(merge_blocks_nomove): Rename to rtl_merge_blocks.
	(cfg_layout_create_basic_block): New.
	(rtl_can_merge_blocks): New.
	(cfg_layout_split_block): Do not alloc aux by hand.
	* cfghooks.h (cfg_hooks): Add create_basic_block, can_merge_blocks_p,
	merge_blocks.
	(create_basic_block, can_merge_blocks_p, merge_blocks): New macros.
	* cfglayout.c (cfg_layout_duplicate_bb): Do not allocate aux by hand.
	* cfgloopmanip.c (loop_split_edge_with): Likewise.
	* ifcvt.c (merge_if_block): Use merge_blocks_nomove.

	* basic-block.h (basic_block_def): Add field 'rbi'.
	* bb-reorder.c (find_traces, rotate_loop, mark_bb_visited,
	find_traces_1_round, copy_bb, connect_traces): Update use of rbi.
	* cfg.c (entry_exit_blocks): Add new field.
	* cfglayout.c: Include alloc-pool.h;
	(cfg_layout_pool): New.
	(record_effective_endpoints, fixup_reorder_chain,
	fixup_fallthru_exit_predecessor, cfg_layout_duplicate_bb): Update use
	of rbi.
	(cfg_layout_initialize_rbi): New function.
	(cfg_layout_initialize): Use it.
	(cfg_layout_finalize): Clear rbi fields.
	* cfglayout.h (RBI): Kill.
	(cfg_layout_initialize_rbi): Declare.
	* cfgloopmanip.c (copy_bbs): Use rbi.
	(record_exit_edges): Likewise.
	(duplicate_loop_to_header_edge): Likewise.
	* cfgrtl.c (cfg_layout_create_basic_block): Use
	cfg_layout_initialize_rbi.
	(cfg_layout_split_block): Use rbi.
	(cfg_layout_delete_block): Likewise.
	* loop-init.c (loop_optimizer_finalize): Likewise.
	* loop-unswitch.c (unswitch_loop): Likewise.
	* tracer.c (seen, tail_duplicate, layout_superblocks): Likewise.

	* cfgrtl.c: Update comments.
	(try_redirect_by_replacing_jump): New argument.
	(redirect_branch_edge): Break out from ...
	(rtl_redirect_edge_and_branch): ... this one.
	(update_cfg_after_block_merging): Break out from ...
	(rtl_merge_blocks): ... this one.
	(cfg_layout_split_edge): New.
	(cfg_layout_merge_blocks): New.
	(cfg_layout_can_merge_blocks_p): New.
	(cfg_layout_redirect_edge_and_branch): Reorganize.
	(cfg_layout_rtl_cfg_hooks): Fill in.
	(cfg_layout_delete_block): Kill barriers.
	* cfganal.c (can_fallthru): Deal with exit blocks
	* cfglayout.c (cfg_layout_function_header): New function
	(record_effective_endpoints): Record function header.
	(fixup_reorder_chain): Fixup dead jumptables; place header

	* basic-block.h (CLEANUP_CFGLAYOUT): New flag.
	* bb-reorder.c (cfg_layout_initialize): Update call.
	* cfgcleanup.c (try_optimize_cfg): Supress optimizations of fallthru
	edges in cfglayout mode.
	* cfglayout.c (cleanup_unconditional_jumps): Kill.
	(cfg_layout_initialize): Kill agrument loops; use cfgcleanup.
	* cfglayout.h (cfg_layout_initialize): Update prototype.
	* cfgloop.h (CP_INSIDE_CFGLAYOUT): Kill.
	* cfgloopmanip.c (loop_split_edge_with): Use split_edge.
	* flow.c (propagate_block): Do not crash when basic block ends
	by first insn in the chain.
	* loop-init.c (loop_optimizer_init):  First enter cfglayout mode; later
	do loop discovery.
	* tracer.c (tracer): Update call of cfg_layout_initialize.

From-SVN: r68899
This commit is contained in:
Jan Hubicka 2003-07-03 20:40:29 +02:00 committed by Jan Hubicka
parent 9b269fc7bc
commit bc35512f09
17 changed files with 667 additions and 341 deletions

View File

@ -1,3 +1,79 @@
Thu Jul 3 20:36:47 CEST 2003 Jan Hubicka <jh@suse.cz>
* basic-block.h (create_basic_block, merge_blocks_nomove): Kill.
* cfgcleanup.c (merge_blocks): Rename to merge_blocks_move.
(merge_blocks_move_predecessor_nojumps,
merge_blocks_move_successor_nojumps): Use merge_blocks.
(try_optimize_cfg): Use merge_blocks_move.
* cfgrtl.c (create_basic_block): Rename to rtl_create_basic_block.
(merge_blocks_nomove): Rename to rtl_merge_blocks.
(cfg_layout_create_basic_block): New.
(rtl_can_merge_blocks): New.
(cfg_layout_split_block): Do not alloc aux by hand.
* cfghooks.h (cfg_hooks): Add create_basic_block, can_merge_blocks_p,
merge_blocks.
(create_basic_block, can_merge_blocks_p, merge_blocks): New macros.
* cfglayout.c (cfg_layout_duplicate_bb): Do not allocate aux by hand.
* cfgloopmanip.c (loop_split_edge_with): Likewise.
* ifcvt.c (merge_if_block): Use merge_blocks_nomove.
* basic-block.h (basic_block_def): Add field 'rbi'.
* bb-reorder.c (find_traces, rotate_loop, mark_bb_visited,
find_traces_1_round, copy_bb, connect_traces): Update use of rbi.
* cfg.c (entry_exit_blocks): Add new field.
* cfglayout.c: Include alloc-pool.h;
(cfg_layout_pool): New.
(record_effective_endpoints, fixup_reorder_chain,
fixup_fallthru_exit_predecessor, cfg_layout_duplicate_bb): Update use
of rbi.
(cfg_layout_initialize_rbi): New function.
(cfg_layout_initialize): Use it.
(cfg_layout_finalize): Clear rbi fields.
* cfglayout.h (RBI): Kill.
(cfg_layout_initialize_rbi): Declare.
* cfgloopmanip.c (copy_bbs): Use rbi.
(record_exit_edges): Likewise.
(duplicate_loop_to_header_edge): Likewise.
* cfgrtl.c (cfg_layout_create_basic_block): Use
cfg_layout_initialize_rbi.
(cfg_layout_split_block): Use rbi.
(cfg_layout_delete_block): Likewise.
* loop-init.c (loop_optimizer_finalize): Likewise.
* loop-unswitch.c (unswitch_loop): Likewise.
* tracer.c (seen, tail_duplicate, layout_superblocks): Likewise.
* cfgrtl.c: Update comments.
(try_redirect_by_replacing_jump): New argument.
(redirect_branch_edge): Break out from ...
(rtl_redirect_edge_and_branch): ... this one.
(update_cfg_after_block_merging): Break out from ...
(rtl_merge_blocks): ... this one.
(cfg_layout_split_edge): New.
(cfg_layout_merge_blocks): New.
(cfg_layout_can_merge_blocks_p): New.
(cfg_layout_redirect_edge_and_branch): Reorganize.
(cfg_layout_rtl_cfg_hooks): Fill in.
(cfg_layout_delete_block): Kill barriers.
* cfganal.c (can_fallthru): Deal with exit blocks
* cfglayout.c (cfg_layout_function_header): New function
(record_effective_endpoints): Record function header.
(fixup_reorder_chain): Fixup dead jumptables; place header
* basic-block.h (CLEANUP_CFGLAYOUT): New flag.
* bb-reorder.c (cfg_layout_initialize): Update call.
* cfgcleanup.c (try_optimize_cfg): Supress optimizations of fallthru
edges in cfglayout mode.
* cfglayout.c (cleanup_unconditional_jumps): Kill.
(cfg_layout_initialize): Kill agrument loops; use cfgcleanup.
* cfglayout.h (cfg_layout_initialize): Update prototype.
* cfgloop.h (CP_INSIDE_CFGLAYOUT): Kill.
* cfgloopmanip.c (loop_split_edge_with): Use split_edge.
* flow.c (propagate_block): Do not crash when basic block ends
by first insn in the chain.
* loop-init.c (loop_optimizer_init): First enter cfglayout mode; later
do loop discovery.
* tracer.c (tracer): Update call of cfg_layout_initialize.
2003-07-03 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
* Makefile.in: Use dependency variables in lieu of explicit

View File

@ -241,6 +241,9 @@ typedef struct basic_block_def {
/* Various flags. See BB_* below. */
int flags;
/* Additional data maintained by cfg_layout routines. */
struct reorder_block_def *rbi;
} *basic_block;
#define BB_FREQ_MAX 10000
@ -362,9 +365,7 @@ extern void redirect_edge_succ PARAMS ((edge, basic_block));
extern edge redirect_edge_succ_nodup PARAMS ((edge, basic_block));
extern void redirect_edge_pred PARAMS ((edge, basic_block));
extern basic_block create_basic_block_structure PARAMS ((rtx, rtx, rtx, basic_block));
extern basic_block create_basic_block PARAMS ((rtx, rtx, basic_block));
extern void clear_bb_flags PARAMS ((void));
extern void merge_blocks_nomove PARAMS ((basic_block, basic_block));
extern void tidy_fallthru_edge PARAMS ((edge, basic_block,
basic_block));
extern void tidy_fallthru_edges PARAMS ((void));
@ -500,6 +501,7 @@ enum update_life_extent
#define CLEANUP_THREADING 64 /* Do jump threading. */
#define CLEANUP_NO_INSN_DEL 128 /* Do not try to delete trivially dead
insns. */
#define CLEANUP_CFGLAYOUT 256 /* Do cleanup in cfglayout mode. */
extern void life_analysis PARAMS ((rtx, FILE *, int));
extern int update_life_info PARAMS ((sbitmap, enum update_life_extent,
int));

View File

@ -205,7 +205,7 @@ find_traces (int *n_traces, struct trace *traces)
basic_block bb;
fprintf (rtl_dump_file, "Trace %d (round %d): ", i + 1,
traces[i].round + 1);
for (bb = traces[i].first; bb != traces[i].last; bb = RBI (bb)->next)
for (bb = traces[i].first; bb != traces[i].last; bb = bb->rbi->next)
fprintf (rtl_dump_file, "%d [%d] ", bb->index, bb->frequency);
fprintf (rtl_dump_file, "%d [%d]\n", bb->index, bb->frequency);
}
@ -237,14 +237,14 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
edge e;
for (e = bb->succ; e; e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR
&& RBI (e->dest)->visited != trace_n
&& e->dest->rbi->visited != trace_n
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX))
{
if (is_preferred)
{
/* The best edge is preferred. */
if (!RBI (e->dest)->visited
if (!e->dest->rbi->visited
|| bbd[e->dest->index].start_of_trace >= 0)
{
/* The current edge E is also preferred. */
@ -260,7 +260,7 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
}
else
{
if (!RBI (e->dest)->visited
if (!e->dest->rbi->visited
|| bbd[e->dest->index].start_of_trace >= 0)
{
/* The current edge E is preferred. */
@ -283,7 +283,7 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
}
}
}
bb = RBI (bb)->next;
bb = bb->rbi->next;
}
while (bb != back_edge->dest);
@ -293,17 +293,17 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
the trace. */
if (back_edge->dest == trace->first)
{
trace->first = RBI (best_bb)->next;
trace->first = best_bb->rbi->next;
}
else
{
basic_block prev_bb;
for (prev_bb = trace->first;
RBI (prev_bb)->next != back_edge->dest;
prev_bb = RBI (prev_bb)->next)
prev_bb->rbi->next != back_edge->dest;
prev_bb = prev_bb->rbi->next)
;
RBI (prev_bb)->next = RBI (best_bb)->next;
prev_bb->rbi->next = best_bb->rbi->next;
/* Try to get rid of uncond jump to cond jump. */
if (prev_bb->succ && !prev_bb->succ->succ_next)
@ -324,7 +324,7 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
/* We have not found suitable loop tail so do no rotation. */
best_bb = back_edge->src;
}
RBI (best_bb)->next = NULL;
best_bb->rbi->next = NULL;
return best_bb;
}
@ -333,7 +333,7 @@ rotate_loop (edge back_edge, struct trace *trace, int trace_n)
static void
mark_bb_visited (basic_block bb, int trace)
{
RBI (bb)->visited = trace;
bb->rbi->visited = trace;
if (bbd[bb->index].heap)
{
fibheap_delete_node (bbd[bb->index].heap, bbd[bb->index].node);
@ -420,8 +420,8 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
if (e->dest == EXIT_BLOCK_PTR)
continue;
if (RBI (e->dest)->visited
&& RBI (e->dest)->visited != *n_traces)
if (e->dest->rbi->visited
&& e->dest->rbi->visited != *n_traces)
continue;
prob = e->probability;
@ -453,7 +453,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
{
if (e == best_edge
|| e->dest == EXIT_BLOCK_PTR
|| RBI (e->dest)->visited)
|| e->dest->rbi->visited)
continue;
key = bb_to_key (e->dest);
@ -508,7 +508,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
if (best_edge) /* Suitable successor was found. */
{
if (RBI (best_edge->dest)->visited == *n_traces)
if (best_edge->dest->rbi->visited == *n_traces)
{
/* We do nothing with one basic block loops. */
if (best_edge->dest != bb)
@ -528,7 +528,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
"Rotating loop %d - %d\n",
best_edge->dest->index, bb->index);
}
RBI (bb)->next = best_edge->dest;
bb->rbi->next = best_edge->dest;
bb = rotate_loop (best_edge, trace, *n_traces);
}
}
@ -583,7 +583,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
if (e != best_edge
&& (e->flags & EDGE_CAN_FALLTHRU)
&& !(e->flags & EDGE_COMPLEX)
&& !RBI (e->dest)->visited
&& !e->dest->rbi->visited
&& !e->dest->pred->pred_next
&& e->dest->succ
&& (e->dest->succ->flags & EDGE_CAN_FALLTHRU)
@ -599,7 +599,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
break;
}
RBI (bb)->next = best_edge->dest;
bb->rbi->next = best_edge->dest;
bb = best_edge->dest;
}
}
@ -615,7 +615,7 @@ find_traces_1_round (int branch_th, int exec_th, gcov_type count_th,
for (e = bb->succ; e; e = e->succ_next)
{
if (e->dest == EXIT_BLOCK_PTR
|| RBI (e->dest)->visited)
|| e->dest->rbi->visited)
continue;
if (bbd[e->dest->index].heap)
@ -656,15 +656,15 @@ copy_bb (basic_block old_bb, edge e, basic_block bb, int trace)
new_bb = cfg_layout_duplicate_bb (old_bb, e);
if (e->dest != new_bb)
abort ();
if (RBI (e->dest)->visited)
if (e->dest->rbi->visited)
abort ();
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Duplicated bb %d (created bb %d)\n",
old_bb->index, new_bb->index);
RBI (new_bb)->visited = trace;
RBI (new_bb)->next = RBI (bb)->next;
RBI (bb)->next = new_bb;
new_bb->rbi->visited = trace;
new_bb->rbi->next = bb->rbi->next;
bb->rbi->next = new_bb;
if (new_bb->index >= array_size || last_basic_block > array_size)
{
@ -826,7 +826,7 @@ connect_traces (int n_traces, struct trace *traces)
}
if (best)
{
RBI (best->src)->next = best->dest;
best->src->rbi->next = best->dest;
t2 = bbd[best->src->index].end_of_trace;
connected[t2] = true;
if (rtl_dump_file)
@ -840,7 +840,7 @@ connect_traces (int n_traces, struct trace *traces)
}
if (last_trace >= 0)
RBI (traces[last_trace].last)->next = traces[t2].first;
traces[last_trace].last->rbi->next = traces[t2].first;
last_trace = t;
/* Find the successor traces. */
@ -876,7 +876,7 @@ connect_traces (int n_traces, struct trace *traces)
best->src->index, best->dest->index);
}
t = bbd[best->dest->index].start_of_trace;
RBI (traces[last_trace].last)->next = traces[t].first;
traces[last_trace].last->rbi->next = traces[t].first;
connected[t] = true;
last_trace = t;
}
@ -964,7 +964,7 @@ connect_traces (int n_traces, struct trace *traces)
if (next_bb && next_bb != EXIT_BLOCK_PTR)
{
t = bbd[next_bb->index].start_of_trace;
RBI (traces[last_trace].last)->next = traces[t].first;
traces[last_trace].last->rbi->next = traces[t].first;
connected[t] = true;
last_trace = t;
}
@ -982,7 +982,7 @@ connect_traces (int n_traces, struct trace *traces)
basic_block bb;
fprintf (rtl_dump_file, "Final order:\n");
for (bb = traces[0].first; bb; bb = RBI (bb)->next)
for (bb = traces[0].first; bb; bb = bb->rbi->next)
fprintf (rtl_dump_file, "%d ", bb->index);
fprintf (rtl_dump_file, "\n");
fflush (rtl_dump_file);
@ -1064,7 +1064,7 @@ reorder_basic_blocks (void)
if ((* targetm.cannot_modify_jumps_p) ())
return;
cfg_layout_initialize (NULL);
cfg_layout_initialize ();
set_edge_can_fallthru_flag ();
mark_dfs_back_edges ();

View File

@ -113,7 +113,8 @@ struct basic_block_def entry_exit_blocks[2]
NULL, /* loop_father */
0, /* count */
0, /* frequency */
0 /* flags */
0, /* flags */
NULL /* rbi */
},
{
NULL, /* head */
@ -134,7 +135,8 @@ struct basic_block_def entry_exit_blocks[2]
NULL, /* loop_father */
0, /* count */
0, /* frequency */
0 /* flags */
0, /* flags */
NULL /* rbi */
}
};

View File

@ -104,12 +104,12 @@ bool
can_fallthru (basic_block src, basic_block target)
{
rtx insn = src->end;
rtx insn2 = target->head;
rtx insn2 = target == EXIT_BLOCK_PTR ? NULL : target->head;
if (src->next_bb != target)
return 0;
if (!active_insn_p (insn2))
if (insn2 && !active_insn_p (insn2))
insn2 = next_active_insn (insn2);
/* ??? Later we may add code to move jump tables offline. */

View File

@ -77,7 +77,6 @@ static bool label_is_jump_target_p (rtx, rtx);
static bool tail_recursion_label_p (rtx);
static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
static basic_block merge_blocks (edge,basic_block,basic_block, int);
static bool try_optimize_cfg (int);
static bool try_simplify_condjump (basic_block);
static bool try_forward_edges (int, basic_block);
@ -704,7 +703,7 @@ merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
link_block (a, b->prev_bb);
/* Now blocks A and B are contiguous. Merge them. */
merge_blocks_nomove (a, b);
merge_blocks (a, b);
}
/* Blocks A and B are to be merged into a single block. B has no outgoing
@ -758,7 +757,7 @@ merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
b->index, a->index);
/* Now blocks A and B are contiguous. Merge them. */
merge_blocks_nomove (a, b);
merge_blocks (a, b);
}
/* Attempt to merge basic blocks that are potentially non-adjacent.
@ -774,7 +773,7 @@ merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
relative ordering of these two. Hopefully it is not too common. */
static basic_block
merge_blocks (edge e, basic_block b, basic_block c, int mode)
merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
{
basic_block next;
/* If C has a tail recursion label, do not merge. There is no
@ -790,7 +789,7 @@ merge_blocks (edge e, basic_block b, basic_block c, int mode)
if (e->flags & EDGE_FALLTHRU)
{
int b_index = b->index, c_index = c->index;
merge_blocks_nomove (b, c);
merge_blocks (b, c);
update_forwarder_flag (b);
if (rtl_dump_file)
@ -1686,7 +1685,8 @@ try_optimize_cfg (int mode)
b->index);
delete_block (b);
changed = true;
if (!(mode & CLEANUP_CFGLAYOUT))
changed = true;
b = c;
}
@ -1712,15 +1712,24 @@ try_optimize_cfg (int mode)
{
rtx label = b->head;
b->head = NEXT_INSN (b->head);
delete_insn_chain (label, label);
/* In the case label is undeletable, move it after the
BASIC_BLOCK note. */
if (NOTE_LINE_NUMBER (b->head) == NOTE_INSN_DELETED_LABEL)
{
rtx bb_note = NEXT_INSN (b->head);
reorder_insns_nobb (label, label, bb_note);
b->head = bb_note;
}
if (rtl_dump_file)
fprintf (rtl_dump_file, "Deleted label in block %i.\n",
b->index);
}
/* If we fall through an empty block, we can remove it. */
if (b->pred->pred_next == NULL
if (!(mode & CLEANUP_CFGLAYOUT)
&& b->pred->pred_next == NULL
&& (b->pred->flags & EDGE_FALLTHRU)
&& GET_CODE (b->head) != CODE_LABEL
&& FORWARDER_BLOCK_P (b)
@ -1746,21 +1755,39 @@ try_optimize_cfg (int mode)
&& !(s->flags & EDGE_COMPLEX)
&& (c = s->dest) != EXIT_BLOCK_PTR
&& c->pred->pred_next == NULL
&& b != c
/* If the jump insn has side effects,
we can't kill the edge. */
&& (GET_CODE (b->end) != JUMP_INSN
|| (flow2_completed
? simplejump_p (b->end)
: onlyjump_p (b->end)))
&& (next = merge_blocks (s, b, c, mode)))
{
b = next;
changed_here = true;
&& b != c)
{
/* When not in cfg_layout mode use code aware of reordering
INSN. This code possibly creates new basic blocks so it
does not fit merge_blocks interface and is kept here in
hope that it will become useless once more of compiler
is transformed to use cfg_layout mode. */
if ((mode & CLEANUP_CFGLAYOUT)
&& can_merge_blocks_p (b, c))
{
merge_blocks (b, c);
update_forwarder_flag (b);
changed_here = true;
}
else if (!(mode & CLEANUP_CFGLAYOUT)
/* If the jump insn has side effects,
we can't kill the edge. */
&& (GET_CODE (b->end) != JUMP_INSN
|| (flow2_completed
? simplejump_p (b->end)
: onlyjump_p (b->end)))
&& (next = merge_blocks_move (s, b, c, mode)))
{
b = next;
changed_here = true;
}
}
/* Simplify branch over branch. */
if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
if ((mode & CLEANUP_EXPENSIVE)
&& !(mode & CLEANUP_CFGLAYOUT)
&& try_simplify_condjump (b))
changed_here = true;
/* If B has a single outgoing edge, but uses a

View File

@ -31,6 +31,9 @@ struct cfg_hooks
/* Basic CFG manipulation. */
/* Return new basic block */
basic_block (*create_basic_block) PARAMS ((void *head, void *end, basic_block after));
/* Redirect edge E to the given basic block B and update underlying program
representation. Returns false when edge is not easily redirectable for
whatever reason. */
@ -47,6 +50,12 @@ struct cfg_hooks
/* Split basic block B after specified instruction I. */
edge (*split_block) (basic_block b, void * i);
/* Return true when blocks A and B can be merged into single basic block. */
bool (*can_merge_blocks_p) PARAMS ((basic_block a, basic_block b));
/* Merge blocks A and B. */
void (*merge_blocks) PARAMS ((basic_block a, basic_block b));
/* Higher level functions representable by primitive operations above if
we didn't have some oddities in RTL and Tree representations. */
basic_block (*cfgh_split_edge) (edge);
@ -57,6 +66,9 @@ struct cfg_hooks
#define split_block(e,i) cfg_hooks->split_block (e,i)
#define delete_block(b) cfg_hooks->delete_block (b)
#define split_edge(e) cfg_hooks->cfgh_split_edge (e)
#define create_basic_block(h,e,a) cfg_hooks->create_basic_block (h,e,a)
#define can_merge_blocks_p(a,b) cfg_hooks->can_merge_blocks_p (a,b)
#define merge_blocks(a,b) cfg_hooks->merge_blocks (a,b)
/* Hooks containers. */
extern struct cfg_hooks rtl_cfg_hooks;

View File

@ -34,13 +34,16 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "cfgloop.h"
#include "target.h"
#include "ggc.h"
#include "alloc-pool.h"
/* The contents of the current function definition are allocated
in this obstack, and all are freed at the end of the function. */
extern struct obstack flow_obstack;
alloc_pool cfg_layout_pool;
/* Holds the interesting trailing notes for the function. */
rtx cfg_layout_function_footer;
rtx cfg_layout_function_footer, cfg_layout_function_header;
static rtx skip_insns_after_block (basic_block);
static void record_effective_endpoints (void);
@ -51,7 +54,6 @@ static void set_block_levels (tree, int);
static void change_scope (rtx, tree, tree);
void verify_insn_chain (void);
static void cleanup_unconditional_jumps (struct loops *);
static void fixup_fallthru_exit_predecessor (void);
static rtx duplicate_insn_chain (rtx, rtx);
static void break_superblocks (void);
@ -189,19 +191,38 @@ label_for_bb (basic_block bb)
static void
record_effective_endpoints (void)
{
rtx next_insn = get_insns ();
rtx next_insn;
basic_block bb;
rtx insn;
for (insn = get_insns ();
NEXT_INSN (insn) && GET_CODE (insn) == NOTE;
insn = NEXT_INSN (insn))
{
if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
{
insn = NULL;
break;
}
if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
break;
}
if (insn)
cfg_layout_function_header = unlink_insn_chain (get_insns (), insn);
else
cfg_layout_function_header = NULL_RTX;
next_insn = get_insns ();
FOR_EACH_BB (bb)
{
rtx end;
if (PREV_INSN (bb->head) && next_insn != bb->head)
RBI (bb)->header = unlink_insn_chain (next_insn,
bb->rbi->header = unlink_insn_chain (next_insn,
PREV_INSN (bb->head));
end = skip_insns_after_block (bb);
if (NEXT_INSN (bb->end) && bb->end != end)
RBI (bb)->footer = unlink_insn_chain (NEXT_INSN (bb->end), end);
bb->rbi->footer = unlink_insn_chain (NEXT_INSN (bb->end), end);
next_insn = NEXT_INSN (bb->end);
}
@ -529,21 +550,29 @@ fixup_reorder_chain (void)
int index;
rtx insn = NULL;
if (cfg_layout_function_header)
{
set_first_insn (cfg_layout_function_header);
insn = cfg_layout_function_header;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
}
/* First do the bulk reordering -- rechain the blocks without regard to
the needed changes to jumps and labels. */
for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
bb != 0;
bb = RBI (bb)->next, index++)
bb = bb->rbi->next, index++)
{
if (RBI (bb)->header)
if (bb->rbi->header)
{
if (insn)
NEXT_INSN (insn) = RBI (bb)->header;
NEXT_INSN (insn) = bb->rbi->header;
else
set_first_insn (RBI (bb)->header);
PREV_INSN (RBI (bb)->header) = insn;
insn = RBI (bb)->header;
set_first_insn (bb->rbi->header);
PREV_INSN (bb->rbi->header) = insn;
insn = bb->rbi->header;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
}
@ -553,10 +582,10 @@ fixup_reorder_chain (void)
set_first_insn (bb->head);
PREV_INSN (bb->head) = insn;
insn = bb->end;
if (RBI (bb)->footer)
if (bb->rbi->footer)
{
NEXT_INSN (insn) = RBI (bb)->footer;
PREV_INSN (RBI (bb)->footer) = insn;
NEXT_INSN (insn) = bb->rbi->footer;
PREV_INSN (bb->rbi->footer) = insn;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
}
@ -580,7 +609,7 @@ fixup_reorder_chain (void)
/* Now add jumps and labels as needed to match the blocks new
outgoing edges. */
for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = RBI (bb)->next)
for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = bb->rbi->next)
{
edge e_fall, e_taken, e;
rtx bb_end_insn;
@ -604,8 +633,8 @@ fixup_reorder_chain (void)
if (any_condjump_p (bb_end_insn))
{
/* If the old fallthru is still next, nothing to do. */
if (RBI (bb)->next == e_fall->dest
|| (!RBI (bb)->next
if (bb->rbi->next == e_fall->dest
|| (!bb->rbi->next
&& e_fall->dest == EXIT_BLOCK_PTR))
continue;
@ -645,7 +674,7 @@ fixup_reorder_chain (void)
such as happens at the very end of a function, then we'll
need to add a new unconditional jump. Choose the taken
edge based on known or assumed probability. */
else if (RBI (bb)->next != e_taken->dest)
else if (bb->rbi->next != e_taken->dest)
{
rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0);
@ -684,7 +713,7 @@ fixup_reorder_chain (void)
#ifdef CASE_DROPS_THROUGH
/* Except for VAX. Since we didn't have predication for the
tablejump, the fallthru block should not have moved. */
if (RBI (bb)->next == e_fall->dest)
if (bb->rbi->next == e_fall->dest)
continue;
bb_end_insn = skip_insns_after_block (bb);
#else
@ -701,11 +730,11 @@ fixup_reorder_chain (void)
continue;
/* If the fallthru block is still next, nothing to do. */
if (RBI (bb)->next == e_fall->dest)
if (bb->rbi->next == e_fall->dest)
continue;
/* A fallthru to exit block. */
if (!RBI (bb)->next && e_fall->dest == EXIT_BLOCK_PTR)
if (!bb->rbi->next && e_fall->dest == EXIT_BLOCK_PTR)
continue;
}
@ -713,10 +742,10 @@ fixup_reorder_chain (void)
nb = force_nonfallthru (e_fall);
if (nb)
{
alloc_aux_for_block (nb, sizeof (struct reorder_block_def));
RBI (nb)->visited = 1;
RBI (nb)->next = RBI (bb)->next;
RBI (bb)->next = nb;
cfg_layout_initialize_rbi (nb);
nb->rbi->visited = 1;
nb->rbi->next = bb->rbi->next;
bb->rbi->next = nb;
/* Don't process this new block. */
bb = nb;
}
@ -727,12 +756,12 @@ fixup_reorder_chain (void)
if (rtl_dump_file)
{
fprintf (rtl_dump_file, "Reordered sequence:\n");
for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; bb; bb = RBI (bb)->next, index ++)
for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; bb; bb = bb->rbi->next, index ++)
{
fprintf (rtl_dump_file, " %i ", index);
if (RBI (bb)->original)
if (bb->rbi->original)
fprintf (rtl_dump_file, "duplicate of %i ",
RBI (bb)->original->index);
bb->rbi->original->index);
else if (forwarder_block_p (bb) && GET_CODE (bb->head) != CODE_LABEL)
fprintf (rtl_dump_file, "compensation ");
else
@ -745,7 +774,7 @@ fixup_reorder_chain (void)
bb = ENTRY_BLOCK_PTR->next_bb;
index = 0;
for (; bb; prev_bb = bb, bb = RBI (bb)->next, index ++)
for (; bb; prev_bb = bb, bb = bb->rbi->next, index ++)
{
bb->index = index;
BASIC_BLOCK (index) = bb;
@ -755,6 +784,16 @@ fixup_reorder_chain (void)
}
prev_bb->next_bb = EXIT_BLOCK_PTR;
EXIT_BLOCK_PTR->prev_bb = prev_bb;
/* Anoying special case - jump around dead jumptables left in the code. */
FOR_EACH_BB (bb)
{
edge e;
for (e = bb->succ; e && !(e->flags & EDGE_FALLTHRU); e = e->succ_next)
continue;
if (e && !can_fallthru (e->src, e->dest))
force_nonfallthru (e);
}
}
/* Perform sanity checks on the insn chain.
@ -788,87 +827,6 @@ verify_insn_chain (void)
abort ();
}
/* Remove any unconditional jumps and forwarder block creating fallthru
edges instead. During BB reordering, fallthru edges are not required
to target next basic block in the linear CFG layout, so the unconditional
jumps are not needed. If LOOPS is not null, also update loop structure &
dominators. */
static void
cleanup_unconditional_jumps (struct loops *loops)
{
basic_block bb;
FOR_EACH_BB (bb)
{
if (!bb->succ)
continue;
if (bb->succ->flags & EDGE_FALLTHRU)
continue;
if (!bb->succ->succ_next)
{
rtx insn;
if (GET_CODE (bb->head) != CODE_LABEL && forwarder_block_p (bb)
&& bb->prev_bb != ENTRY_BLOCK_PTR)
{
basic_block prev = bb->prev_bb;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Removing forwarder BB %i\n",
bb->index);
if (loops)
{
/* bb cannot be loop header, as it only has one entry
edge. It could be a loop latch. */
if (bb->loop_father->header == bb)
abort ();
if (bb->loop_father->latch == bb)
bb->loop_father->latch = bb->pred->src;
if (get_immediate_dominator
(loops->cfg.dom, bb->succ->dest) == bb)
set_immediate_dominator
(loops->cfg.dom, bb->succ->dest, bb->pred->src);
remove_bb_from_loops (bb);
delete_from_dominance_info (loops->cfg.dom, bb);
}
redirect_edge_succ_nodup (bb->pred, bb->succ->dest);
delete_block (bb);
bb = prev;
}
else if (simplejump_p (bb->end))
{
rtx jump = bb->end;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Removing jump %i in BB %i\n",
INSN_UID (jump), bb->index);
delete_insn (jump);
bb->succ->flags |= EDGE_FALLTHRU;
}
else
continue;
insn = NEXT_INSN (bb->end);
while (insn
&& (GET_CODE (insn) != NOTE
|| NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK))
{
rtx next = NEXT_INSN (insn);
if (GET_CODE (insn) == BARRIER)
delete_barrier (insn);
insn = next;
}
}
}
}
/* The block falling through to exit must be the last one in the
reordered chain. Ensure that this condition is met. */
static void
@ -881,19 +839,19 @@ fixup_fallthru_exit_predecessor (void)
if (e->flags & EDGE_FALLTHRU)
bb = e->src;
if (bb && RBI (bb)->next)
if (bb && bb->rbi->next)
{
basic_block c = ENTRY_BLOCK_PTR->next_bb;
while (RBI (c)->next != bb)
c = RBI (c)->next;
while (c->rbi->next != bb)
c = c->rbi->next;
RBI (c)->next = RBI (bb)->next;
while (RBI (c)->next)
c = RBI (c)->next;
c->rbi->next = bb->rbi->next;
while (c->rbi->next)
c = c->rbi->next;
RBI (c)->next = bb;
RBI (bb)->next = NULL;
c->rbi->next = bb;
bb->rbi->next = NULL;
}
}
@ -1050,26 +1008,25 @@ cfg_layout_duplicate_bb (basic_block bb, edge e)
new_bb = create_basic_block (insn,
insn ? get_last_insn () : NULL,
EXIT_BLOCK_PTR->prev_bb);
alloc_aux_for_block (new_bb, sizeof (struct reorder_block_def));
if (RBI (bb)->header)
if (bb->rbi->header)
{
insn = RBI (bb)->header;
insn = bb->rbi->header;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
insn = duplicate_insn_chain (RBI (bb)->header, insn);
insn = duplicate_insn_chain (bb->rbi->header, insn);
if (insn)
RBI (new_bb)->header = unlink_insn_chain (insn, get_last_insn ());
new_bb->rbi->header = unlink_insn_chain (insn, get_last_insn ());
}
if (RBI (bb)->footer)
if (bb->rbi->footer)
{
insn = RBI (bb)->footer;
insn = bb->rbi->footer;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
insn = duplicate_insn_chain (RBI (bb)->footer, insn);
insn = duplicate_insn_chain (bb->rbi->footer, insn);
if (insn)
RBI (new_bb)->footer = unlink_insn_chain (insn, get_last_insn ());
new_bb->rbi->footer = unlink_insn_chain (insn, get_last_insn ());
}
if (bb->global_live_at_start)
@ -1113,25 +1070,42 @@ cfg_layout_duplicate_bb (basic_block bb, edge e)
if (bb->frequency < 0)
bb->frequency = 0;
RBI (new_bb)->original = bb;
RBI (bb)->copy = new_bb;
new_bb->rbi->original = bb;
bb->rbi->copy = new_bb;
return new_bb;
}
void
cfg_layout_initialize_rbi (bb)
basic_block bb;
{
if (bb->rbi)
abort ();
bb->rbi = pool_alloc (cfg_layout_pool);
memset (bb->rbi, 0, sizeof (struct reorder_block_def));
}
/* Main entry point to this module - initialize the datastructures for
CFG layout changes. It keeps LOOPS up-to-date if not null. */
void
cfg_layout_initialize (struct loops *loops)
cfg_layout_initialize ()
{
basic_block bb;
/* Our algorithm depends on fact that there are now dead jumptables
around the code. */
alloc_aux_for_blocks (sizeof (struct reorder_block_def));
cfg_layout_pool =
create_alloc_pool ("cfg layout pool", sizeof (struct reorder_block_def),
n_basic_blocks + 2);
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
cfg_layout_initialize_rbi (bb);
cfg_layout_rtl_register_cfg_hooks ();
cleanup_unconditional_jumps (loops);
record_effective_endpoints ();
cleanup_cfg (CLEANUP_CFGLAYOUT);
}
/* Splits superblocks. */
@ -1169,6 +1143,8 @@ break_superblocks (void)
void
cfg_layout_finalize (void)
{
basic_block bb;
#ifdef ENABLE_CHECKING
verify_flow_info ();
#endif
@ -1180,7 +1156,9 @@ cfg_layout_finalize (void)
verify_insn_chain ();
#endif
free_aux_for_blocks ();
free_alloc_pool (cfg_layout_pool);
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
bb->rbi = NULL;
break_superblocks ();

View File

@ -33,13 +33,12 @@ typedef struct reorder_block_def
int visited;
} *reorder_block_def;
#define RBI(BB) ((reorder_block_def) (BB)->aux)
extern rtx cfg_layout_function_footer;
extern void cfg_layout_initialize (struct loops *);
extern void cfg_layout_initialize (void);
extern void cfg_layout_finalize (void);
extern bool cfg_layout_can_duplicate_bb_p (basic_block);
extern basic_block cfg_layout_duplicate_bb (basic_block, edge);
extern void insn_locators_initialize (void);
extern void reemit_insn_block_notes (void);
extern void cfg_layout_initialize_rbi (basic_block);

View File

@ -294,8 +294,7 @@ extern int fix_loop_placement (struct loop *);
enum
{
CP_SIMPLE_PREHEADERS = 1,
CP_INSIDE_CFGLAYOUT = 2
CP_SIMPLE_PREHEADERS = 1
};
extern void create_preheaders (struct loops *, int);

View File

@ -860,7 +860,7 @@ copy_bbs (basic_block *bbs, int n, edge entry, edge latch_edge,
/* Duplicate. */
bb = bbs[i];
new_bb = (*new_bbs)[i] = cfg_layout_duplicate_bb (bb, NULL);
RBI (new_bb)->duplicated = 1;
new_bb->rbi->duplicated = 1;
/* Add to loop. */
add_bb_to_loop (new_bb, bb->loop_father->copy);
add_to_dominance_info (loops->cfg.dom, new_bb);
@ -886,7 +886,7 @@ copy_bbs (basic_block *bbs, int n, edge entry, edge latch_edge,
{
/* For anything else than loop header, just copy it. */
dom_bb = get_immediate_dominator (loops->cfg.dom, bb);
dom_bb = RBI (dom_bb)->copy;
dom_bb = dom_bb->rbi->copy;
}
else
{
@ -910,7 +910,7 @@ copy_bbs (basic_block *bbs, int n, edge entry, edge latch_edge,
e_pred = e->pred_next;
if (!RBI (src)->duplicated)
if (!src->rbi->duplicated)
continue;
/* Leads to copied loop and it is not latch edge, redirect it. */
@ -919,24 +919,24 @@ copy_bbs (basic_block *bbs, int n, edge entry, edge latch_edge,
if (add_irreducible_flag
&& (bb->loop_father == header->loop_father
|| RBI (src)->original->loop_father == header->loop_father))
|| src->rbi->original->loop_father == header->loop_father))
e->flags |= EDGE_IRREDUCIBLE_LOOP;
}
}
/* Redirect header edge. */
bb = RBI (latch_edge->src)->copy;
bb = latch_edge->src->rbi->copy;
for (e = bb->succ; e->dest != latch_edge->dest; e = e->succ_next);
*header_edge = e;
loop_redirect_edge (*header_edge, header);
/* Redirect entry to copy of header. */
loop_redirect_edge (entry, RBI (header)->copy);
loop_redirect_edge (entry, header->rbi->copy);
*copy_header_edge = entry;
/* Clear information about duplicates. */
for (i = 0; i < n; i++)
RBI ((*new_bbs)[i])->duplicated = 0;
(*new_bbs)[i]->rbi->duplicated = 0;
}
/* Check whether LOOP's body can be duplicated. */
@ -995,7 +995,7 @@ record_exit_edges (edge orig, basic_block *bbs, int nbbs, edge *to_remove,
return;
}
for (e = RBI (orig->src)->copy->succ; e; e = e->succ_next)
for (e = orig->src->rbi->copy->succ; e; e = e->succ_next)
if (e->dest == orig->dest)
break;
if (!e)
@ -1175,7 +1175,7 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops,
copy_bbs (bbs, n, e, latch_edge, &new_bbs, loops,
&e, &he, add_irreducible_flag);
if (is_latch)
loop->latch = RBI (latch)->copy;
loop->latch = latch->rbi->copy;
/* Record exit edges in this copy. */
if (TEST_BIT (wont_exit, j + 1))
@ -1209,7 +1209,7 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops,
if (!first_active_latch)
{
memcpy (first_active, new_bbs, n * sizeof (basic_block));
first_active_latch = RBI (latch)->copy;
first_active_latch = latch->rbi->copy;
}
free (new_bbs);
@ -1217,11 +1217,11 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e, struct loops *loops,
/* Original loop header is dominated by latch copy
if we duplicated on its only entry edge. */
if (!is_latch && !header->pred->pred_next->pred_next)
set_immediate_dominator (loops->cfg.dom, header, RBI (latch)->copy);
set_immediate_dominator (loops->cfg.dom, header, latch->rbi->copy);
if (is_latch && j == 0)
{
/* Update edge from latch. */
for (latch_edge = RBI (header)->copy->pred;
for (latch_edge = header->rbi->copy->pred;
latch_edge->src != latch;
latch_edge = latch_edge->pred_next);
}
@ -1409,33 +1409,20 @@ loop_split_edge_with (edge e, rtx insns, struct loops *loops)
/* Create basic block for it. */
new_bb = create_basic_block (NULL_RTX, NULL_RTX, EXIT_BLOCK_PTR->prev_bb);
new_bb = split_edge (e);
add_to_dominance_info (loops->cfg.dom, new_bb);
add_bb_to_loop (new_bb, loop_c);
new_bb->flags = insns ? BB_SUPERBLOCK : 0;
new_e = make_edge (new_bb, dest, EDGE_FALLTHRU);
new_e->probability = REG_BR_PROB_BASE;
new_e->count = e->count;
new_e = new_bb->succ;
if (e->flags & EDGE_IRREDUCIBLE_LOOP)
{
new_bb->flags |= BB_IRREDUCIBLE_LOOP;
new_e->flags |= EDGE_IRREDUCIBLE_LOOP;
}
new_bb->count = e->count;
new_bb->frequency = EDGE_FREQUENCY (e);
redirect_edge_and_branch_force (e, new_bb);
alloc_aux_for_block (new_bb, sizeof (struct reorder_block_def));
if (insns)
{
start_sequence ();
emit_insn (insns);
insns = get_insns ();
end_sequence ();
emit_insn_after (insns, new_bb->end);
}
emit_insn_after (insns, new_bb->end);
set_immediate_dominator (loops->cfg.dom, new_bb, src);
set_immediate_dominator (loops->cfg.dom, dest,

View File

@ -23,20 +23,19 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
that are aware of the RTL intermediate language.
Available functionality:
- Basic CFG/RTL manipulation API documented in cfghooks.h
- CFG-aware instruction chain manipulation
delete_insn, delete_insn_chain
- Basic block manipulation
create_basic_block, rtl_delete_block,rtl_split_block,
merge_blocks_nomove
- Edge splitting and committing to edges
insert_insn_on_edge, commit_edge_insertions
- CFG updating after insn simplification
purge_dead_edges, purge_all_dead_edges
Functions not supposed for generic use:
- Infrastructure to determine quickly basic block for insn
compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
- Edge redirection with updating and optimizing of insn chain
block_label, redirect_edge_and_branch,
redirect_edge_and_branch_force, tidy_fallthru_edge, force_nonfallthru
- Edge splitting and committing to edges
split_edge, insert_insn_on_edge, commit_edge_insertions
- CFG updating after constant propagation
purge_dead_edges, purge_all_dead_edges */
block_label, tidy_fallthru_edge, force_nonfallthru */
#include "config.h"
#include "system.h"
@ -73,7 +72,6 @@ rtx tail_recursion_label_list;
static int can_delete_note_p (rtx);
static int can_delete_label_p (rtx);
static void commit_one_edge_insertion (edge, int);
static bool try_redirect_by_replacing_jump (edge, basic_block);
static rtx last_loop_beg_note (rtx);
static bool back_edge_of_syntactic_loop_p (basic_block, basic_block);
basic_block force_nonfallthru_and_redirect (edge, basic_block);
@ -326,9 +324,10 @@ create_basic_block_structure (rtx head, rtx end, rtx bb_note, basic_block after)
create new empty basic block before HEAD. Both END and HEAD can be NULL to
create basic block at the end of INSN chain. */
basic_block
create_basic_block (rtx head, rtx end, basic_block after)
static basic_block
rtl_create_basic_block (void *headp, void *endp, basic_block after)
{
rtx head = headp, end = endp;
basic_block bb;
/* Place the new block just after the end. */
@ -340,6 +339,15 @@ create_basic_block (rtx head, rtx end, basic_block after)
bb->aux = NULL;
return bb;
}
static basic_block
cfg_layout_create_basic_block (void *head, void *end, basic_block after)
{
basic_block newbb = rtl_create_basic_block (head, end, after);
cfg_layout_initialize_rbi (newbb);
return newbb;
}
/* Delete the insns in a (non-live) block. We physically delete every
non-deleted-note insn, and update the flow graph appropriately.
@ -517,16 +525,42 @@ rtl_split_block (basic_block bb, void *insnp)
return new_edge;
}
/* Blocks A and B are to be merged into a single block A. The insns
are already contiguous, hence `nomove'. */
/* Assume that the code of basic block B has been merged into A.
Do corresponding CFG updates: redirect edges accordingly etc. */
static void
update_cfg_after_block_merging (basic_block a, basic_block b)
{
edge e;
void
merge_blocks_nomove (basic_block a, basic_block b)
/* Normally there should only be one successor of A and that is B, but
partway though the merge of blocks for conditional_execution we'll
be merging a TEST block with THEN and ELSE successors. Free the
whole lot of them and hope the caller knows what they're doing. */
while (a->succ)
remove_edge (a->succ);
/* Adjust the edges out of B for the new owner. */
for (e = b->succ; e; e = e->succ_next)
e->src = a;
a->succ = b->succ;
a->flags |= b->flags;
/* B hasn't quite yet ceased to exist. Attempt to prevent mishap. */
b->pred = b->succ = NULL;
a->global_live_at_end = b->global_live_at_end;
expunge_block (b);
}
/* Blocks A and B are to be merged into a single block A. The insns
are already contiguous. */
static void
rtl_merge_blocks (basic_block a, basic_block b)
{
rtx b_head = b->head, b_end = b->end, a_end = a->end;
rtx del_first = NULL_RTX, del_last = NULL_RTX;
int b_empty = 0;
edge e;
/* If there was a CODE_LABEL beginning B, delete it. */
if (GET_CODE (b_head) == CODE_LABEL)
@ -585,24 +619,7 @@ merge_blocks_nomove (basic_block a, basic_block b)
else if (GET_CODE (NEXT_INSN (a_end)) == BARRIER)
del_first = NEXT_INSN (a_end);
/* Normally there should only be one successor of A and that is B, but
partway though the merge of blocks for conditional_execution we'll
be merging a TEST block with THEN and ELSE successors. Free the
whole lot of them and hope the caller knows what they're doing. */
while (a->succ)
remove_edge (a->succ);
/* Adjust the edges out of B for the new owner. */
for (e = b->succ; e; e = e->succ_next)
e->src = a;
a->succ = b->succ;
a->flags |= b->flags;
/* B hasn't quite yet ceased to exist. Attempt to prevent mishap. */
b->pred = b->succ = NULL;
a->global_live_at_end = b->global_live_at_end;
expunge_block (b);
update_cfg_after_block_merging (a, b);
/* Delete everything marked above as well as crap that might be
hanging out between the two blocks. */
@ -623,6 +640,24 @@ merge_blocks_nomove (basic_block a, basic_block b)
a->end = a_end;
}
/* Return true when block A and B can be merged. */
static bool
rtl_can_merge_blocks (basic_block a,basic_block b)
{
/* There must be exactly one edge in between the blocks. */
return (a->succ && !a->succ->succ_next && a->succ->dest == b
&& !b->pred->pred_next && a != b
/* Must be simple edge. */
&& !(a->succ->flags & EDGE_COMPLEX)
&& a->next_bb == b
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
we can't kill the edge. */
&& (GET_CODE (a->end) != JUMP_INSN
|| (flow2_completed
? simplejump_p (a->end) : onlyjump_p (a->end))));
}
/* Return the label in the head of basic block BLOCK. Create one if it doesn't
exist. */
@ -647,7 +682,7 @@ block_label (basic_block block)
return values are equivalent to redirect_edge_and_branch. */
static bool
try_redirect_by_replacing_jump (edge e, basic_block target)
try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
{
basic_block src = e->src;
rtx insn = src->end, kill_from;
@ -679,14 +714,38 @@ try_redirect_by_replacing_jump (edge e, basic_block target)
#endif
/* See if we can create the fallthru edge. */
if (can_fallthru (src, target))
if (in_cfglayout || can_fallthru (src, target))
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Removing jump %i.\n", INSN_UID (insn));
fallthru = 1;
/* Selectively unlink whole insn chain. */
delete_insn_chain (kill_from, PREV_INSN (target->head));
if (in_cfglayout)
{
rtx insn = src->rbi->footer;
delete_insn_chain (kill_from, src->end);
/* Remove barriers but keep jumptables. */
while (insn)
{
if (GET_CODE (insn) == BARRIER)
{
if (PREV_INSN (insn))
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
else
src->rbi->footer = NEXT_INSN (insn);
if (NEXT_INSN (insn))
PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
}
if (GET_CODE (insn) == CODE_LABEL)
break;
insn = NEXT_INSN (insn);
}
}
else
delete_insn_chain (kill_from, PREV_INSN (target->head));
}
/* If this already is simplejump, redirect it. */
@ -782,36 +841,15 @@ last_loop_beg_note (rtx insn)
return last;
}
/* Attempt to change code to redirect edge E to TARGET. Don't do that on
expense of adding new instructions or reordering basic blocks.
Function can be also called with edge destination equivalent to the TARGET.
Then it should try the simplifications and do nothing if none is possible.
Return true if transformation succeeded. We still return false in case E
already destinated TARGET and we didn't managed to simplify instruction
stream. */
/* Redirect edge representing branch of (un)conditional jump or tablejump. */
static bool
rtl_redirect_edge_and_branch (edge e, basic_block target)
redirect_branch_edge (edge e, basic_block target)
{
rtx tmp;
rtx old_label = e->dest->head;
basic_block src = e->src;
rtx insn = src->end;
if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
return false;
if (try_redirect_by_replacing_jump (e, target))
return true;
/* Do this fast path late, as we want above code to simplify for cases
where called on single edge leaving basic block containing nontrivial
jump insn. */
else if (e->dest == target)
return false;
/* We can only redirect non-fallthru edges of jump insn. */
if (e->flags & EDGE_FALLTHRU)
return false;
@ -884,6 +922,37 @@ rtl_redirect_edge_and_branch (edge e, basic_block target)
if (e->dest != target)
redirect_edge_succ_nodup (e, target);
return true;
}
/* Attempt to change code to redirect edge E to TARGET. Don't do that on
expense of adding new instructions or reordering basic blocks.
Function can be also called with edge destination equivalent to the TARGET.
Then it should try the simplifications and do nothing if none is possible.
Return true if transformation succeeded. We still return false in case E
already destinated TARGET and we didn't managed to simplify instruction
stream. */
static bool
rtl_redirect_edge_and_branch (e, target)
edge e;
basic_block target;
{
if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
return false;
if (try_redirect_by_replacing_jump (e, target, false))
return true;
/* Do this fast path late, as we want above code to simplify for cases
where called on single edge leaving basic block containing nontrivial
jump insn. */
else if (e->dest == target)
return false;
else if (!redirect_branch_edge (e, target))
return false;
return true;
}
@ -2323,9 +2392,8 @@ cfg_layout_split_block (basic_block bb, void *insnp)
edge fallthru = rtl_split_block (bb, insn);
alloc_aux_for_block (fallthru->dest, sizeof (struct reorder_block_def));
RBI (fallthru->dest)->footer = RBI (fallthru->src)->footer;
RBI (fallthru->src)->footer = NULL;
fallthru->dest->rbi->footer = fallthru->src->rbi->footer;
fallthru->src->rbi->footer = NULL;
return fallthru;
}
@ -2335,14 +2403,33 @@ static bool
cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
{
basic_block src = e->src;
basic_block old_next_bb = src->next_bb;
bool ret;
if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
return false;
if (e->src != ENTRY_BLOCK_PTR
&& try_redirect_by_replacing_jump (e, dest, true))
return true;
if (e->dest == dest)
return true;
if (e->src == ENTRY_BLOCK_PTR
&& (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Redirecting entry edge from bb %i to %i\n",
e->src->index, dest->index);
redirect_edge_succ (e, dest);
return true;
}
/* Redirect_edge_and_branch may decide to turn branch into fallthru edge
in the case the basic block appears to be in sequence. Avoid this
transformation. */
src->next_bb = NULL;
if (e->flags & EDGE_FALLTHRU)
{
/* Redirect any branch edges unified with the fallthru one. */
@ -2364,20 +2451,18 @@ cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
delete_insn (src->end);
}
redirect_edge_succ_nodup (e, dest);
if (rtl_dump_file)
fprintf (rtl_dump_file, "Fallthru edge %i->%i redirected to %i\n",
e->src->index, e->dest->index, dest->index);
ret = true;
}
else
ret = rtl_redirect_edge_and_branch (e, dest);
ret = redirect_branch_edge (e, dest);
/* We don't want simplejumps in the insn stream during cfglayout. */
if (simplejump_p (src->end))
{
delete_insn (src->end);
delete_barrier (NEXT_INSN (src->end));
src->succ->flags |= EDGE_FALLTHRU;
}
src->next_bb = old_next_bb;
abort ();
return ret;
}
@ -2397,36 +2482,55 @@ cfg_layout_delete_block (basic_block bb)
{
rtx insn, next, prev = PREV_INSN (bb->head), *to, remaints;
if (RBI (bb)->header)
if (bb->rbi->header)
{
next = bb->head;
if (prev)
NEXT_INSN (prev) = RBI (bb)->header;
NEXT_INSN (prev) = bb->rbi->header;
else
set_first_insn (RBI (bb)->header);
PREV_INSN (RBI (bb)->header) = prev;
insn = RBI (bb)->header;
set_first_insn (bb->rbi->header);
PREV_INSN (bb->rbi->header) = prev;
insn = bb->rbi->header;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
NEXT_INSN (insn) = next;
PREV_INSN (next) = insn;
}
next = NEXT_INSN (bb->end);
if (RBI (bb)->footer)
if (bb->rbi->footer)
{
insn = bb->end;
NEXT_INSN (insn) = RBI (bb)->footer;
PREV_INSN (RBI (bb)->footer) = insn;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
NEXT_INSN (insn) = next;
if (next)
PREV_INSN (next) = insn;
else
set_last_insn (insn);
insn = bb->rbi->footer;
while (insn)
{
if (GET_CODE (insn) == BARRIER)
{
if (PREV_INSN (insn))
NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
else
bb->rbi->footer = NEXT_INSN (insn);
if (NEXT_INSN (insn))
PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
}
if (GET_CODE (insn) == CODE_LABEL)
break;
insn = NEXT_INSN (insn);
}
if (bb->rbi->footer)
{
insn = bb->end;
NEXT_INSN (insn) = bb->rbi->footer;
PREV_INSN (bb->rbi->footer) = insn;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
NEXT_INSN (insn) = next;
if (next)
PREV_INSN (next) = insn;
else
set_last_insn (insn);
}
}
if (bb->next_bb != EXIT_BLOCK_PTR)
to = &RBI(bb->next_bb)->header;
to = &bb->next_bb->rbi->header;
else
to = &cfg_layout_function_footer;
rtl_delete_block (bb);
@ -2453,14 +2557,141 @@ cfg_layout_delete_block (basic_block bb)
}
}
/* return true when blocks A and B can be safely merged. */
static bool
cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
{
/* There must be exactly one edge in between the blocks. */
return (a->succ && !a->succ->succ_next && a->succ->dest == b
&& !b->pred->pred_next && a != b
/* Must be simple edge. */
&& !(a->succ->flags & EDGE_COMPLEX)
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
we can't kill the edge. */
&& (GET_CODE (a->end) != JUMP_INSN
|| (flow2_completed
? simplejump_p (a->end) : onlyjump_p (a->end))));
}
/* Merge block A and B, abort when it is not possible. */
static void
cfg_layout_merge_blocks (basic_block a, basic_block b)
{
#ifdef ENABLE_CHECKING
if (!cfg_layout_can_merge_blocks_p (a, b))
abort ();
#endif
/* If there was a CODE_LABEL beginning B, delete it. */
if (GET_CODE (b->head) == CODE_LABEL)
delete_insn (b->head);
/* We should have fallthru edge in a, or we can do dummy redirection to get
it cleaned up. */
if (GET_CODE (a->end) == JUMP_INSN)
redirect_edge_and_branch (a->succ, b);
if (GET_CODE (a->end) == JUMP_INSN)
abort ();
/* Possible line number notes should appear in between. */
if (b->rbi->header)
{
rtx first = a->end, last;
last = emit_insn_after (b->rbi->header, a->end);
delete_insn_chain (NEXT_INSN (first), last);
b->rbi->header = NULL;
}
/* In the case basic blocks are not adjacent, move them around. */
if (NEXT_INSN (a->end) != b->head)
{
rtx first = unlink_insn_chain (b->head, b->end);
emit_insn_after (first, a->end);
/* Skip possible DELETED_LABEL insn. */
if (!NOTE_INSN_BASIC_BLOCK_P (first))
first = NEXT_INSN (first);
if (!NOTE_INSN_BASIC_BLOCK_P (first))
abort ();
b->head = NULL;
delete_insn (first);
}
/* Otherwise just re-associate the instructions. */
else
{
rtx insn;
for (insn = b->head; insn != NEXT_INSN (b->end); insn = NEXT_INSN (insn))
set_block_for_insn (insn, a);
insn = b->head;
/* Skip possible DELETED_LABEL insn. */
if (!NOTE_INSN_BASIC_BLOCK_P (insn))
insn = NEXT_INSN (insn);
if (!NOTE_INSN_BASIC_BLOCK_P (insn))
abort ();
b->head = NULL;
a->end = b->end;
delete_insn (insn);
}
/* Possible tablejumps and barriers should appear after the block. */
if (b->rbi->footer)
{
if (!a->rbi->footer)
a->rbi->footer = b->rbi->footer;
else
{
rtx last = a->rbi->footer;
while (NEXT_INSN (last))
last = NEXT_INSN (last);
NEXT_INSN (last) = b->rbi->footer;
PREV_INSN (b->rbi->footer) = last;
}
b->rbi->footer = NULL;
}
if (rtl_dump_file)
fprintf (rtl_dump_file, "Merged blocks %d and %d.\n",
a->index, b->index);
update_cfg_after_block_merging (a, b);
}
/* Split edge E. */
static basic_block
cfg_layout_split_edge (edge e)
{
edge new_e;
basic_block new_bb =
create_basic_block (e->src != ENTRY_BLOCK_PTR
? NEXT_INSN (e->src-> end) : get_insns (),
NULL_RTX, e->src);
new_bb->count = e->count;
new_bb->frequency = EDGE_FREQUENCY (e);
new_e = make_edge (new_bb, e->dest, EDGE_FALLTHRU);
new_e->probability = REG_BR_PROB_BASE;
new_e->count = e->count;
redirect_edge_and_branch_force (e, new_bb);
return new_bb;
}
/* Implementation of CFG manipulation for linearized RTL. */
struct cfg_hooks rtl_cfg_hooks = {
rtl_verify_flow_info,
rtl_dump_bb,
rtl_create_basic_block,
rtl_redirect_edge_and_branch,
rtl_redirect_edge_and_branch_force,
rtl_delete_block,
rtl_split_block,
rtl_can_merge_blocks, /* can_merge_blocks_p */
rtl_merge_blocks,
rtl_split_edge
};
@ -2469,11 +2700,14 @@ struct cfg_hooks rtl_cfg_hooks = {
This representation will hopefully become the default one in future
version of the compiler. */
struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
rtl_verify_flow_info_1, /* verify_flow_info. */
rtl_verify_flow_info_1,
rtl_dump_bb,
cfg_layout_create_basic_block,
cfg_layout_redirect_edge_and_branch,
cfg_layout_redirect_edge_and_branch_force,
cfg_layout_delete_block,
cfg_layout_split_block,
NULL /* split_edge. */
cfg_layout_can_merge_blocks_p,
cfg_layout_merge_blocks,
cfg_layout_split_edge
};

View File

@ -2063,7 +2063,10 @@ propagate_block (basic_block bb, regset live, regset local_set,
IOR_REG_SET (regs_live_at_setjmp, pbi->reg_live);
prev = propagate_one_insn (pbi, insn);
changed |= NEXT_INSN (prev) != insn;
if (!prev)
changed |= insn != get_insns ();
else
changed |= NEXT_INSN (prev) != insn;
if (insn == bb->head)
break;

View File

@ -2016,7 +2016,7 @@ merge_if_block (ce_info)
fallthru = block_fallthru (bb);
if (post_dominators)
delete_from_dominance_info (post_dominators, bb);
merge_blocks_nomove (combo_bb, bb);
merge_blocks (combo_bb, bb);
num_removed_blocks++;
}
while (bb != last_test_bb);
@ -2033,7 +2033,7 @@ merge_if_block (ce_info)
then_bb->global_live_at_end);
if (post_dominators)
delete_from_dominance_info (post_dominators, then_bb);
merge_blocks_nomove (combo_bb, then_bb);
merge_blocks (combo_bb, then_bb);
num_removed_blocks++;
}
@ -2044,7 +2044,7 @@ merge_if_block (ce_info)
{
if (post_dominators)
delete_from_dominance_info (post_dominators, else_bb);
merge_blocks_nomove (combo_bb, else_bb);
merge_blocks (combo_bb, else_bb);
num_removed_blocks++;
}
@ -2101,7 +2101,7 @@ merge_if_block (ce_info)
if (post_dominators)
delete_from_dominance_info (post_dominators, join_bb);
merge_blocks_nomove (combo_bb, join_bb);
merge_blocks (combo_bb, join_bb);
num_removed_blocks++;
}
else
@ -3129,7 +3129,7 @@ if_convert (x_life_data_ok)
life_data_ok = (x_life_data_ok != 0);
/* Free up basic_block_for_insn so that we don't have to keep it
up to date, either here or in merge_blocks_nomove. */
up to date, either here or in merge_blocks. */
free_basic_block_vars (1);
/* Compute postdominators if we think we'll use them. */

View File

@ -37,6 +37,9 @@ loop_optimizer_init (dumpfile)
struct loops *loops = xcalloc (1, sizeof (struct loops));
edge e;
/* Initialize structures for layout changes. */
cfg_layout_initialize ();
/* Avoid annoying special cases of edges going to exit
block. */
for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
@ -47,9 +50,16 @@ loop_optimizer_init (dumpfile)
if (flow_loops_find (loops, LOOP_TREE) <= 1)
{
basic_block bb;
/* No loops. */
flow_loops_free (loops);
free (loops);
/* Make chain. */
FOR_EACH_BB (bb)
if (bb->next_bb != EXIT_BLOCK_PTR)
bb->rbi->next = bb->next_bb;
cfg_layout_finalize ();
return NULL;
}
@ -59,11 +69,8 @@ loop_optimizer_init (dumpfile)
free (loops->cfg.dfs_order);
loops->cfg.dfs_order = NULL;
/* Initialize structures for layout changes. */
cfg_layout_initialize (loops);
/* Create pre-headers. */
create_preheaders (loops, CP_SIMPLE_PREHEADERS | CP_INSIDE_CFGLAYOUT);
create_preheaders (loops, CP_SIMPLE_PREHEADERS);
/* Force all latches to have only single successor. */
force_single_succ_latches (loops);
@ -94,7 +101,7 @@ loop_optimizer_finalize (loops, dumpfile)
/* Make chain. */
FOR_EACH_BB (bb)
if (bb->next_bb != EXIT_BLOCK_PTR)
RBI (bb)->next = bb->next_bb;
bb->rbi->next = bb->next_bb;
/* Another dump. */
flow_loops_dump (loops, dumpfile, NULL, 1);

View File

@ -377,7 +377,7 @@ unswitch_loop (loops, loop, unswitch_on)
entry->flags |= irred_flag;
/* Record the block with condition we unswitch on. */
unswitch_on_alt = RBI (unswitch_on)->copy;
unswitch_on_alt = unswitch_on->rbi->copy;
/* Make a copy of the block containing the condition; we will use
it as switch to decide which loop we want to use. */
@ -395,14 +395,14 @@ unswitch_loop (loops, loop, unswitch_on)
switch_bb->succ->succ_next->flags &= ~EDGE_IRREDUCIBLE_LOOP;
}
add_to_dominance_info (loops->cfg.dom, switch_bb);
RBI (unswitch_on)->copy = unswitch_on_alt;
unswitch_on->rbi->copy = unswitch_on_alt;
/* Loopify from the copy of LOOP body, constructing the new loop. */
for (latch_edge = RBI (loop->latch)->copy->succ;
for (latch_edge = loop->latch->rbi->copy->succ;
latch_edge->dest != loop->header;
latch_edge = latch_edge->succ_next);
nloop = loopify (loops, latch_edge,
RBI (loop->header)->copy->pred, switch_bb);
loop->header->rbi->copy->pred, switch_bb);
/* Remove branches that are now unreachable in new loops. We rely on the
fact that cfg_layout_duplicate_bb reverses list of edges. */

View File

@ -64,7 +64,7 @@ static int branch_ratio_cutoff;
/* Return true if BB has been seen - it is connected to some trace
already. */
#define seen(bb) (RBI (bb)->visited || RBI (bb)->next)
#define seen(bb) (bb->rbi->visited || bb->rbi->next)
/* Return true if we should ignore the basic block for purposes of tracing. */
static bool
@ -295,8 +295,8 @@ tail_duplicate ()
fprintf (rtl_dump_file, "Duplicated %i as %i [%i]\n",
old->index, bb2->index, bb2->frequency);
}
RBI (bb)->next = bb2;
RBI (bb2)->visited = 1;
bb->rbi->next = bb2;
bb2->rbi->visited = 1;
bb = bb2;
/* In case the trace became infrequent, stop duplicating. */
if (ignore_bb_p (bb))
@ -330,28 +330,28 @@ layout_superblocks ()
while (bb != EXIT_BLOCK_PTR)
{
edge e, best = NULL;
while (RBI (end)->next)
end = RBI (end)->next;
while (end->rbi->next)
end = end->rbi->next;
for (e = end->succ; e; e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR
&& e->dest != ENTRY_BLOCK_PTR->succ->dest
&& !RBI (e->dest)->visited
&& !e->dest->rbi->visited
&& (!best || EDGE_FREQUENCY (e) > EDGE_FREQUENCY (best)))
best = e;
if (best)
{
RBI (end)->next = best->dest;
RBI (best->dest)->visited = 1;
end->rbi->next = best->dest;
best->dest->rbi->visited = 1;
}
else
for (; bb != EXIT_BLOCK_PTR; bb = bb->next_bb)
{
if (!RBI (bb)->visited)
if (!bb->rbi->visited)
{
RBI (end)->next = bb;
RBI (bb)->visited = 1;
end->rbi->next = bb;
bb->rbi->visited = 1;
break;
}
}
@ -365,7 +365,7 @@ tracer ()
{
if (n_basic_blocks <= 1)
return;
cfg_layout_initialize (NULL);
cfg_layout_initialize ();
mark_dfs_back_edges ();
if (rtl_dump_file)
dump_flow_info (rtl_dump_file);