re PR middle-end/59917 (ICE in calc_dfs_tree, at dominance.c:401)

PR middle-end/59917
	PR tree-optimization/59920
	* tree.c (build_common_builtin_nodes): Remove
	__builtin_setjmp_dispatcher initialization.
	* omp-low.h (make_gimple_omp_edges): Add a new int * argument.
	* profile.c (branch_prob): Use gsi_start_nondebug_after_labels_bb
	instead of gsi_after_labels + manually skipping debug stmts.
	Don't ignore bbs with BUILT_IN_SETJMP_DISPATCHER, instead
	ignore bbs with IFN_ABNORMAL_DISPATCHER.
	* tree-inline.c (copy_edges_for_bb): Remove
	can_make_abnormal_goto argument, instead add abnormal_goto_dest
	argument.  Ignore computed_goto_p stmts.  Don't call
	make_abnormal_goto_edges.  If a call might need abnormal edges
	for non-local gotos, see if it already has an edge to
	IFN_ABNORMAL_DISPATCHER or if it is IFN_ABNORMAL_DISPATCHER
	with true argument, don't do anything then, otherwise add
	EDGE_ABNORMAL from the call's bb to abnormal_goto_dest.
	(copy_cfg_body): Compute abnormal_goto_dest, adjust copy_edges_for_bb
	caller.
	* gimple-low.c (struct lower_data): Remove calls_builtin_setjmp.
	(lower_function_body): Don't emit __builtin_setjmp_dispatcher.
	(lower_stmt): Don't set data->calls_builtin_setjmp.
	(lower_builtin_setjmp): Adjust comment.
	* builtins.def (BUILT_IN_SETJMP_DISPATCHER): Remove.
	* tree-cfg.c (found_computed_goto): Remove.
	(factor_computed_gotos): Remove.
	(make_goto_expr_edges): Return bool, true for computed gotos.
	Don't call make_abnormal_goto_edges.
	(build_gimple_cfg): Don't set found_computed_goto, don't call
	factor_computed_gotos.
	(computed_goto_p): No longer static.
	(make_blocks): Don't set found_computed_goto.
	(get_abnormal_succ_dispatcher, handle_abnormal_edges): New functions.
	(make_edges): If make_goto_expr_edges returns true, push bb
	into ab_edge_goto vector, for stmt_can_make_abnormal_goto calls
	instead of calling make_abnormal_goto_edges push bb into ab_edge_call
	vector.  Record mapping between bbs and OpenMP regions if there
	are any, adjust make_gimple_omp_edges caller.  Call
	handle_abnormal_edges.
	(make_abnormal_goto_edges): Remove.
	* tree-cfg.h (make_abnormal_goto_edges): Remove.
	(computed_goto_p, get_abnormal_succ_dispatcher): New prototypes.
	* internal-fn.c (expand_ABNORMAL_DISPATCHER): New function.
	* builtins.c (expand_builtin): Don't handle
	BUILT_IN_SETJMP_DISPATCHER.
	* internal-fn.def (ABNORMAL_DISPATCHER): New.
	* omp-low.c (make_gimple_omp_edges): Add region_idx argument, when
	filling *region also set *region_idx to (*region)->entry->index.

	* gcc.dg/pr59920-1.c: New test.
	* gcc.dg/pr59920-2.c: New test.
	* gcc.dg/pr59920-3.c: New test.
	* c-c++-common/gomp/pr59917-1.c: New test.
	* c-c++-common/gomp/pr59917-2.c: New test.

From-SVN: r207231
This commit is contained in:
Jakub Jelinek 2014-01-29 12:02:46 +01:00 committed by Jakub Jelinek
parent 42eb8bd179
commit 09b22f48d4
19 changed files with 499 additions and 235 deletions

View File

@ -1,5 +1,54 @@
2014-01-29 Jakub Jelinek <jakub@redhat.com>
PR middle-end/59917
PR tree-optimization/59920
* tree.c (build_common_builtin_nodes): Remove
__builtin_setjmp_dispatcher initialization.
* omp-low.h (make_gimple_omp_edges): Add a new int * argument.
* profile.c (branch_prob): Use gsi_start_nondebug_after_labels_bb
instead of gsi_after_labels + manually skipping debug stmts.
Don't ignore bbs with BUILT_IN_SETJMP_DISPATCHER, instead
ignore bbs with IFN_ABNORMAL_DISPATCHER.
* tree-inline.c (copy_edges_for_bb): Remove
can_make_abnormal_goto argument, instead add abnormal_goto_dest
argument. Ignore computed_goto_p stmts. Don't call
make_abnormal_goto_edges. If a call might need abnormal edges
for non-local gotos, see if it already has an edge to
IFN_ABNORMAL_DISPATCHER or if it is IFN_ABNORMAL_DISPATCHER
with true argument, don't do anything then, otherwise add
EDGE_ABNORMAL from the call's bb to abnormal_goto_dest.
(copy_cfg_body): Compute abnormal_goto_dest, adjust copy_edges_for_bb
caller.
* gimple-low.c (struct lower_data): Remove calls_builtin_setjmp.
(lower_function_body): Don't emit __builtin_setjmp_dispatcher.
(lower_stmt): Don't set data->calls_builtin_setjmp.
(lower_builtin_setjmp): Adjust comment.
* builtins.def (BUILT_IN_SETJMP_DISPATCHER): Remove.
* tree-cfg.c (found_computed_goto): Remove.
(factor_computed_gotos): Remove.
(make_goto_expr_edges): Return bool, true for computed gotos.
Don't call make_abnormal_goto_edges.
(build_gimple_cfg): Don't set found_computed_goto, don't call
factor_computed_gotos.
(computed_goto_p): No longer static.
(make_blocks): Don't set found_computed_goto.
(get_abnormal_succ_dispatcher, handle_abnormal_edges): New functions.
(make_edges): If make_goto_expr_edges returns true, push bb
into ab_edge_goto vector, for stmt_can_make_abnormal_goto calls
instead of calling make_abnormal_goto_edges push bb into ab_edge_call
vector. Record mapping between bbs and OpenMP regions if there
are any, adjust make_gimple_omp_edges caller. Call
handle_abnormal_edges.
(make_abnormal_goto_edges): Remove.
* tree-cfg.h (make_abnormal_goto_edges): Remove.
(computed_goto_p, get_abnormal_succ_dispatcher): New prototypes.
* internal-fn.c (expand_ABNORMAL_DISPATCHER): New function.
* builtins.c (expand_builtin): Don't handle
BUILT_IN_SETJMP_DISPATCHER.
* internal-fn.def (ABNORMAL_DISPATCHER): New.
* omp-low.c (make_gimple_omp_edges): Add region_idx argument, when
filling *region also set *region_idx to (*region)->entry->index.
PR other/58712
* read-rtl.c (read_rtx_code): Clear all of RTX_CODE_SIZE (code).
For REGs set ORIGINAL_REGNO.

View File

@ -6205,20 +6205,6 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode,
}
break;
case BUILT_IN_SETJMP_DISPATCHER:
/* __builtin_setjmp_dispatcher is passed the dispatcher label. */
if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
{
tree label = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
rtx label_r = label_rtx (label);
/* Remove the dispatcher label from the list of non-local labels
since the receiver labels have been added to it above. */
remove_node_from_expr_list (label_r, &nonlocal_goto_handler_labels);
return const0_rtx;
}
break;
case BUILT_IN_SETJMP_RECEIVER:
/* __builtin_setjmp_receiver is passed the receiver label. */
if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE))

View File

@ -783,7 +783,6 @@ DEF_BUILTIN_STUB (BUILT_IN_NONLOCAL_GOTO, "__builtin_nonlocal_goto")
/* Implementing __builtin_setjmp. */
DEF_BUILTIN_STUB (BUILT_IN_SETJMP_SETUP, "__builtin_setjmp_setup")
DEF_BUILTIN_STUB (BUILT_IN_SETJMP_DISPATCHER, "__builtin_setjmp_dispatcher")
DEF_BUILTIN_STUB (BUILT_IN_SETJMP_RECEIVER, "__builtin_setjmp_receiver")
/* Implementing variable sized local variables. */

View File

@ -76,9 +76,6 @@ struct lower_data
/* True if the current statement cannot fall through. */
bool cannot_fallthru;
/* True if the function calls __builtin_setjmp. */
bool calls_builtin_setjmp;
};
static void lower_stmt (gimple_stmt_iterator *, struct lower_data *);
@ -99,7 +96,6 @@ lower_function_body (void)
gimple_seq lowered_body;
gimple_stmt_iterator i;
gimple bind;
tree t;
gimple x;
/* The gimplifier should've left a body of exactly one statement,
@ -146,34 +142,6 @@ lower_function_body (void)
gsi_insert_after (&i, t.stmt, GSI_CONTINUE_LINKING);
}
/* If the function calls __builtin_setjmp, we need to emit the computed
goto that will serve as the unique dispatcher for all the receivers. */
if (data.calls_builtin_setjmp)
{
tree disp_label, disp_var, arg;
/* Build 'DISP_LABEL:' and insert. */
disp_label = create_artificial_label (cfun->function_end_locus);
/* This mark will create forward edges from every call site. */
DECL_NONLOCAL (disp_label) = 1;
cfun->has_nonlocal_label = 1;
x = gimple_build_label (disp_label);
gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
/* Build 'DISP_VAR = __builtin_setjmp_dispatcher (DISP_LABEL);'
and insert. */
disp_var = create_tmp_var (ptr_type_node, "setjmpvar");
arg = build_addr (disp_label, current_function_decl);
t = builtin_decl_implicit (BUILT_IN_SETJMP_DISPATCHER);
x = gimple_build_call (t, 1, arg);
gimple_call_set_lhs (x, disp_var);
/* Build 'goto DISP_VAR;' and insert. */
gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
x = gimple_build_goto (disp_var);
gsi_insert_after (&i, x, GSI_CONTINUE_LINKING);
}
/* Once the old body has been lowered, replace it with the new
lowered sequence. */
gimple_set_body (current_function_decl, lowered_body);
@ -364,7 +332,6 @@ lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
{
lower_builtin_setjmp (gsi);
data->cannot_fallthru = false;
data->calls_builtin_setjmp = true;
return;
}
@ -689,15 +656,12 @@ lower_gimple_return (gimple_stmt_iterator *gsi, struct lower_data *data)
all will be used on all machines). It operates similarly to the C
library function of the same name, but is more efficient.
It is lowered into 3 other builtins, namely __builtin_setjmp_setup,
__builtin_setjmp_dispatcher and __builtin_setjmp_receiver, but with
__builtin_setjmp_dispatcher shared among all the instances; that's
why it is only emitted at the end by lower_function_body.
It is lowered into 2 other builtins, namely __builtin_setjmp_setup,
__builtin_setjmp_receiver.
After full lowering, the body of the function should look like:
{
void * setjmpvar.0;
int D.1844;
int D.2844;
@ -727,14 +691,13 @@ lower_gimple_return (gimple_stmt_iterator *gsi, struct lower_data *data)
<D3850>:;
return;
<D3853>: [non-local];
setjmpvar.0 = __builtin_setjmp_dispatcher (&<D3853>);
goto setjmpvar.0;
}
The dispatcher block will be both the unique destination of all the
abnormal call edges and the unique source of all the abnormal edges
to the receivers, thus keeping the complexity explosion localized. */
During cfg creation an extra per-function (or per-OpenMP region)
block with ABNORMAL_DISPATCHER internal call will be added, unique
destination of all the abnormal call edges and the unique source of
all the abnormal edges to the receivers, thus keeping the complexity
explosion localized. */
static void
lower_builtin_setjmp (gimple_stmt_iterator *gsi)

View File

@ -857,6 +857,11 @@ expand_MASK_STORE (gimple stmt)
expand_insn (optab_handler (maskstore_optab, TYPE_MODE (type)), 3, ops);
}
static void
expand_ABNORMAL_DISPATCHER (gimple)
{
}
/* Routines to expand each internal function, indexed by function number.
Each routine has the prototype:

View File

@ -51,3 +51,4 @@ DEF_INTERNAL_FN (UBSAN_NULL, ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (UBSAN_CHECK_ADD, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (UBSAN_CHECK_SUB, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (UBSAN_CHECK_MUL, ECF_CONST | ECF_LEAF | ECF_NOTHROW)
DEF_INTERNAL_FN (ABNORMAL_DISPATCHER, ECF_NORETURN)

View File

@ -10449,7 +10449,8 @@ diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
/* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
codes. */
bool
make_gimple_omp_edges (basic_block bb, struct omp_region **region)
make_gimple_omp_edges (basic_block bb, struct omp_region **region,
int *region_idx)
{
gimple last = last_stmt (bb);
enum gimple_code code = gimple_code (last);
@ -10556,7 +10557,13 @@ make_gimple_omp_edges (basic_block bb, struct omp_region **region)
}
if (*region != cur_region)
*region = cur_region;
{
*region = cur_region;
if (cur_region)
*region_idx = cur_region->entry->index;
else
*region_idx = 0;
}
return fallthru;
}

View File

@ -26,6 +26,6 @@ extern tree find_omp_clause (tree, enum omp_clause_code);
extern void omp_expand_local (basic_block);
extern void free_omp_regions (void);
extern tree omp_reduction_init (tree, tree);
extern bool make_gimple_omp_edges (basic_block, struct omp_region **);
extern bool make_gimple_omp_edges (basic_block, struct omp_region **, int *);
#endif /* GCC_OMP_LOW_H */

View File

@ -1106,27 +1106,22 @@ branch_prob (void)
gimple first;
tree fndecl;
gsi = gsi_after_labels (bb);
gsi = gsi_start_nondebug_after_labels_bb (bb);
gcc_checking_assert (!gsi_end_p (gsi));
first = gsi_stmt (gsi);
if (is_gimple_debug (first))
{
gsi_next_nondebug (&gsi);
gcc_checking_assert (!gsi_end_p (gsi));
first = gsi_stmt (gsi);
}
/* Don't split the bbs containing __builtin_setjmp_receiver
or __builtin_setjmp_dispatcher calls. These are very
or ABNORMAL_DISPATCHER calls. These are very
special and don't expect anything to be inserted before
them. */
if (is_gimple_call (first)
&& (((fndecl = gimple_call_fndecl (first)) != NULL
&& DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& (DECL_FUNCTION_CODE (fndecl)
== BUILT_IN_SETJMP_RECEIVER
|| (DECL_FUNCTION_CODE (fndecl)
== BUILT_IN_SETJMP_DISPATCHER)))
|| gimple_call_flags (first) & ECF_RETURNS_TWICE))
== BUILT_IN_SETJMP_RECEIVER))
|| (gimple_call_flags (first) & ECF_RETURNS_TWICE)
|| (gimple_call_internal_p (first)
&& (gimple_call_internal_fn (first)
== IFN_ABNORMAL_DISPATCHER))))
continue;
if (dump_file)

View File

@ -1,5 +1,13 @@
2014-01-29 Jakub Jelinek <jakub@redhat.com>
PR middle-end/59917
PR tree-optimization/59920
* gcc.dg/pr59920-1.c: New test.
* gcc.dg/pr59920-2.c: New test.
* gcc.dg/pr59920-3.c: New test.
* c-c++-common/gomp/pr59917-1.c: New test.
* c-c++-common/gomp/pr59917-2.c: New test.
PR tree-optimization/59594
* gcc.dg/vect/no-vfa-vect-depend-2.c: New test.
* gcc.dg/vect/no-vfa-vect-depend-3.c: New test.

View File

@ -0,0 +1,22 @@
/* PR middle-end/59917 */
/* { dg-do compile } */
/* { dg-options "-O2 -fopenmp" } */
struct J { long buf[8]; };
extern int setjmp (struct J[1]);
extern struct J j[1];
void foo (int);
void
bar (void)
{
if (setjmp (j) == 0)
{
int k;
foo (-1);
#pragma omp parallel
for (k = 0; k < 10; ++k)
foo (k);
foo (-2);
}
}

View File

@ -0,0 +1,22 @@
/* PR middle-end/59917 */
/* { dg-do compile } */
/* { dg-options "-O2 -fopenmp" } */
struct J { long buf[8]; };
extern int setjmp (struct J[1]);
void foo (int);
void
bar (void)
{
int k;
foo (-1);
#pragma omp parallel
for (k = 0; k < 10; ++k)
{
struct J j[1];
if (setjmp (j) == 0)
foo (k);
}
foo (-2);
}

View File

@ -0,0 +1,20 @@
/* PR tree-optimization/59920 */
/* { dg-do compile } */
/* { dg-options "-O0" } */
#include <setjmp.h>
int bar (void);
void baz (int);
#define A { int x = bar (); if (setjmp (buf) == 0) baz (x); }
#define B A A A A A A A A A A
#define C B B B B B B B B B B
extern jmp_buf buf;
void
foo (void)
{
C C
}

View File

@ -0,0 +1,30 @@
/* PR tree-optimization/59920 */
/* { dg-do compile } */
/* { dg-options "-O0" } */
void *bar (void **);
void *baz (int, void **);
#define A(n) \
{ __label__ l1_##n, l2_##n, l3_##n; \
static void *a[] = { &&l1_##n, &&l2_##n, &&l3_##n };\
void *b = bar (a); \
goto *b; \
l1_##n: \
b = baz (1, a); \
goto *b; \
l2_##n: \
b = baz (2, a); \
goto *b; \
l3_##n:; \
}
#define B(n) A(n##0) A(n##1) A(n##2) A(n##3) A(n##4) \
A(n##5) A(n##6) A(n##7) A(n##8) A(n##9)
#define C(n) B(n##0) B(n##1) B(n##2) B(n##3) B(n##4) \
B(n##5) B(n##6) B(n##7) B(n##8) B(n##9)
void
foo (void)
{
C(1)
}

View File

@ -0,0 +1,47 @@
/* PR tree-optimization/59920 */
/* { dg-do compile } */
/* { dg-options "-O0" } */
void *bar (void **);
void *baz (int, void **);
#define A(n) __label__ l##n;
#define B(n) A(n##0) A(n##1) A(n##2) A(n##3) A(n##4) \
A(n##5) A(n##6) A(n##7) A(n##8) A(n##9)
#define C(n) B(n##0) B(n##1) B(n##2) B(n##3) B(n##4) \
B(n##5) B(n##6) B(n##7) B(n##8) B(n##9)
#define D C(1)
int
foo (void)
{
D
int bar (int i)
{
switch (i)
{
#undef A
#define A(n) \
case n: goto l##n;
D
}
return i;
}
int w = 0;
#undef A
#define A(n) int w##n = 0;
D
#undef A
#define A(n) \
{ l##n:; \
w##n += bar (10000 + n) - 10000; \
w##n += bar (10001 + n) - 10000; \
bar (n + 1); \
return w##n; \
}
D
#undef A
#define A(n) w += w##n;
D
return w;
}

View File

@ -106,9 +106,6 @@ struct cfg_stats_d
static struct cfg_stats_d cfg_stats;
/* Nonzero if we found a computed goto while building basic blocks. */
static bool found_computed_goto;
/* Hash table to store last discriminator assigned for each locus. */
struct locus_discrim_map
{
@ -148,14 +145,13 @@ static hash_table <locus_discrim_hasher> discriminator_per_locus;
/* Basic blocks and flowgraphs. */
static void make_blocks (gimple_seq);
static void factor_computed_gotos (void);
/* Edges. */
static void make_edges (void);
static void assign_discriminators (void);
static void make_cond_expr_edges (basic_block);
static void make_gimple_switch_edges (basic_block);
static void make_goto_expr_edges (basic_block);
static bool make_goto_expr_edges (basic_block);
static void make_gimple_asm_edges (basic_block);
static edge gimple_redirect_edge_and_branch (edge, basic_block);
static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
@ -225,17 +221,8 @@ build_gimple_cfg (gimple_seq seq)
init_empty_tree_cfg ();
found_computed_goto = 0;
make_blocks (seq);
/* Computed gotos are hell to deal with, especially if there are
lots of them with a large number of destinations. So we factor
them to a common computed goto location before we build the
edge list. After we convert back to normal form, we will un-factor
the computed gotos since factoring introduces an unwanted jump. */
if (found_computed_goto)
factor_computed_gotos ();
/* Make sure there is always at least one block, even if it's empty. */
if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
@ -385,7 +372,7 @@ make_pass_build_cfg (gcc::context *ctxt)
/* Return true if T is a computed goto. */
static bool
bool
computed_goto_p (gimple t)
{
return (gimple_code (t) == GIMPLE_GOTO
@ -437,82 +424,6 @@ assert_unreachable_fallthru_edge_p (edge e)
}
/* Search the CFG for any computed gotos. If found, factor them to a
common computed goto site. Also record the location of that site so
that we can un-factor the gotos after we have converted back to
normal form. */
static void
factor_computed_gotos (void)
{
basic_block bb;
tree factored_label_decl = NULL;
tree var = NULL;
gimple factored_computed_goto_label = NULL;
gimple factored_computed_goto = NULL;
/* We know there are one or more computed gotos in this function.
Examine the last statement in each basic block to see if the block
ends with a computed goto. */
FOR_EACH_BB_FN (bb, cfun)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple last;
if (gsi_end_p (gsi))
continue;
last = gsi_stmt (gsi);
/* Ignore the computed goto we create when we factor the original
computed gotos. */
if (last == factored_computed_goto)
continue;
/* If the last statement is a computed goto, factor it. */
if (computed_goto_p (last))
{
gimple assignment;
/* The first time we find a computed goto we need to create
the factored goto block and the variable each original
computed goto will use for their goto destination. */
if (!factored_computed_goto)
{
basic_block new_bb = create_empty_bb (bb);
gimple_stmt_iterator new_gsi = gsi_start_bb (new_bb);
/* Create the destination of the factored goto. Each original
computed goto will put its desired destination into this
variable and jump to the label we create immediately
below. */
var = create_tmp_var (ptr_type_node, "gotovar");
/* Build a label for the new block which will contain the
factored computed goto. */
factored_label_decl = create_artificial_label (UNKNOWN_LOCATION);
factored_computed_goto_label
= gimple_build_label (factored_label_decl);
gsi_insert_after (&new_gsi, factored_computed_goto_label,
GSI_NEW_STMT);
/* Build our new computed goto. */
factored_computed_goto = gimple_build_goto (var);
gsi_insert_after (&new_gsi, factored_computed_goto, GSI_NEW_STMT);
}
/* Copy the original computed goto's destination into VAR. */
assignment = gimple_build_assign (var, gimple_goto_dest (last));
gsi_insert_before (&gsi, assignment, GSI_SAME_STMT);
/* And re-vector the computed goto to the new destination. */
gimple_goto_set_dest (last, factored_label_decl);
}
}
}
/* Build a flowgraph for the sequence of stmts SEQ. */
static void
@ -546,9 +457,6 @@ make_blocks (gimple_seq seq)
codes. */
gimple_set_bb (stmt, bb);
if (computed_goto_p (stmt))
found_computed_goto = true;
/* If STMT is a basic block terminator, set START_NEW_BLOCK for the
next iteration. */
if (stmt_ends_bb_p (stmt))
@ -666,6 +574,144 @@ fold_cond_expr_cond (void)
}
}
/* If basic block BB has an abnormal edge to a basic block
containing IFN_ABNORMAL_DISPATCHER internal call, return
that the dispatcher's basic block, otherwise return NULL. */
basic_block
get_abnormal_succ_dispatcher (basic_block bb)
{
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
{
gimple_stmt_iterator gsi
= gsi_start_nondebug_after_labels_bb (e->dest);
gimple g = gsi_stmt (gsi);
if (g
&& is_gimple_call (g)
&& gimple_call_internal_p (g)
&& gimple_call_internal_fn (g) == IFN_ABNORMAL_DISPATCHER)
return e->dest;
}
return NULL;
}
/* Helper function for make_edges. Create a basic block with
with ABNORMAL_DISPATCHER internal call in it if needed, and
create abnormal edges from BBS to it and from it to FOR_BB
if COMPUTED_GOTO is false, otherwise factor the computed gotos. */
static void
handle_abnormal_edges (basic_block *dispatcher_bbs,
basic_block for_bb, int *bb_to_omp_idx,
auto_vec<basic_block> *bbs, bool computed_goto)
{
basic_block *dispatcher = dispatcher_bbs + (computed_goto ? 1 : 0);
unsigned int idx = 0;
basic_block bb;
bool inner = false;
if (bb_to_omp_idx)
{
dispatcher = dispatcher_bbs + 2 * bb_to_omp_idx[for_bb->index];
if (bb_to_omp_idx[for_bb->index] != 0)
inner = true;
}
/* If the dispatcher has been created already, then there are basic
blocks with abnormal edges to it, so just make a new edge to
for_bb. */
if (*dispatcher == NULL)
{
/* Check if there are any basic blocks that need to have
abnormal edges to this dispatcher. If there are none, return
early. */
if (bb_to_omp_idx == NULL)
{
if (bbs->is_empty ())
return;
}
else
{
FOR_EACH_VEC_ELT (*bbs, idx, bb)
if (bb_to_omp_idx[bb->index] == bb_to_omp_idx[for_bb->index])
break;
if (bb == NULL)
return;
}
/* Create the dispatcher bb. */
*dispatcher = create_basic_block (NULL, NULL, for_bb);
if (computed_goto)
{
/* Factor computed gotos into a common computed goto site. Also
record the location of that site so that we can un-factor the
gotos after we have converted back to normal form. */
gimple_stmt_iterator gsi = gsi_start_bb (*dispatcher);
/* Create the destination of the factored goto. Each original
computed goto will put its desired destination into this
variable and jump to the label we create immediately below. */
tree var = create_tmp_var (ptr_type_node, "gotovar");
/* Build a label for the new block which will contain the
factored computed goto. */
tree factored_label_decl
= create_artificial_label (UNKNOWN_LOCATION);
gimple factored_computed_goto_label
= gimple_build_label (factored_label_decl);
gsi_insert_after (&gsi, factored_computed_goto_label, GSI_NEW_STMT);
/* Build our new computed goto. */
gimple factored_computed_goto = gimple_build_goto (var);
gsi_insert_after (&gsi, factored_computed_goto, GSI_NEW_STMT);
FOR_EACH_VEC_ELT (*bbs, idx, bb)
{
if (bb_to_omp_idx
&& bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
continue;
gsi = gsi_last_bb (bb);
gimple last = gsi_stmt (gsi);
gcc_assert (computed_goto_p (last));
/* Copy the original computed goto's destination into VAR. */
gimple assignment
= gimple_build_assign (var, gimple_goto_dest (last));
gsi_insert_before (&gsi, assignment, GSI_SAME_STMT);
edge e = make_edge (bb, *dispatcher, EDGE_FALLTHRU);
e->goto_locus = gimple_location (last);
gsi_remove (&gsi, true);
}
}
else
{
tree arg = inner ? boolean_true_node : boolean_false_node;
gimple g = gimple_build_call_internal (IFN_ABNORMAL_DISPATCHER,
1, arg);
gimple_stmt_iterator gsi = gsi_after_labels (*dispatcher);
gsi_insert_after (&gsi, g, GSI_NEW_STMT);
/* Create predecessor edges of the dispatcher. */
FOR_EACH_VEC_ELT (*bbs, idx, bb)
{
if (bb_to_omp_idx
&& bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
continue;
make_edge (bb, *dispatcher, EDGE_ABNORMAL);
}
}
}
make_edge (*dispatcher, for_bb, EDGE_ABNORMAL);
}
/* Join all the blocks in the flowgraph. */
static void
@ -673,6 +719,10 @@ make_edges (void)
{
basic_block bb;
struct omp_region *cur_region = NULL;
auto_vec<basic_block> ab_edge_goto;
auto_vec<basic_block> ab_edge_call;
int *bb_to_omp_idx = NULL;
int cur_omp_region_idx = 0;
/* Create an edge from entry to the first block with executable
statements in it. */
@ -686,13 +736,17 @@ make_edges (void)
gimple last = last_stmt (bb);
bool fallthru;
if (bb_to_omp_idx)
bb_to_omp_idx[bb->index] = cur_omp_region_idx;
if (last)
{
enum gimple_code code = gimple_code (last);
switch (code)
{
case GIMPLE_GOTO:
make_goto_expr_edges (bb);
if (make_goto_expr_edges (bb))
ab_edge_goto.safe_push (bb);
fallthru = false;
break;
case GIMPLE_RETURN:
@ -720,7 +774,7 @@ make_edges (void)
make edges from this call site to all the nonlocal goto
handlers. */
if (stmt_can_make_abnormal_goto (last))
make_abnormal_goto_edges (bb, true);
ab_edge_call.safe_push (bb);
/* If this statement has reachable exception handlers, then
create abnormal edges to them. */
@ -728,8 +782,10 @@ make_edges (void)
/* BUILTIN_RETURN is really a return statement. */
if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0), fallthru =
false;
{
make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
fallthru = false;
}
/* Some calls are known not to return. */
else
fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
@ -749,7 +805,10 @@ make_edges (void)
break;
CASE_GIMPLE_OMP:
fallthru = make_gimple_omp_edges (bb, &cur_region);
fallthru = make_gimple_omp_edges (bb, &cur_region,
&cur_omp_region_idx);
if (cur_region && bb_to_omp_idx == NULL)
bb_to_omp_idx = XCNEWVEC (int, n_basic_blocks_for_fn (cfun));
break;
case GIMPLE_TRANSACTION:
@ -773,6 +832,77 @@ make_edges (void)
make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
}
/* Computed gotos are hell to deal with, especially if there are
lots of them with a large number of destinations. So we factor
them to a common computed goto location before we build the
edge list. After we convert back to normal form, we will un-factor
the computed gotos since factoring introduces an unwanted jump.
For non-local gotos and abnormal edges from calls to calls that return
twice or forced labels, factor the abnormal edges too, by having all
abnormal edges from the calls go to a common artificial basic block
with ABNORMAL_DISPATCHER internal call and abnormal edges from that
basic block to all forced labels and calls returning twice.
We do this per-OpenMP structured block, because those regions
are guaranteed to be single entry single exit by the standard,
so it is not allowed to enter or exit such regions abnormally this way,
thus all computed gotos, non-local gotos and setjmp/longjmp calls
must not transfer control across SESE region boundaries. */
if (!ab_edge_goto.is_empty () || !ab_edge_call.is_empty ())
{
gimple_stmt_iterator gsi;
basic_block dispatcher_bb_array[2] = { NULL, NULL };
basic_block *dispatcher_bbs = dispatcher_bb_array;
int count = n_basic_blocks_for_fn (cfun);
if (bb_to_omp_idx)
dispatcher_bbs = XCNEWVEC (basic_block, 2 * count);
FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple label_stmt = gsi_stmt (gsi);
tree target;
if (gimple_code (label_stmt) != GIMPLE_LABEL)
break;
target = gimple_label_label (label_stmt);
/* Make an edge to every label block that has been marked as a
potential target for a computed goto or a non-local goto. */
if (FORCED_LABEL (target))
handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
&ab_edge_goto, true);
if (DECL_NONLOCAL (target))
{
handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
&ab_edge_call, false);
break;
}
}
if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
gsi_next_nondebug (&gsi);
if (!gsi_end_p (gsi))
{
/* Make an edge to every setjmp-like call. */
gimple call_stmt = gsi_stmt (gsi);
if (is_gimple_call (call_stmt)
&& ((gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE)
|| gimple_call_builtin_p (call_stmt,
BUILT_IN_SETJMP_RECEIVER)))
handle_abnormal_edges (dispatcher_bbs, bb, bb_to_omp_idx,
&ab_edge_call, false);
}
}
if (bb_to_omp_idx)
XDELETE (dispatcher_bbs);
}
XDELETE (bb_to_omp_idx);
free_omp_regions ();
/* Fold COND_EXPR_COND of each COND_EXPR. */
@ -1045,53 +1175,10 @@ label_to_block_fn (struct function *ifun, tree dest)
return (*ifun->cfg->x_label_to_block_map)[uid];
}
/* Create edges for an abnormal goto statement at block BB. If FOR_CALL
is true, the source statement is a CALL_EXPR instead of a GOTO_EXPR. */
/* Create edges for a goto statement at block BB. Returns true
if abnormal edges should be created. */
void
make_abnormal_goto_edges (basic_block bb, bool for_call)
{
basic_block target_bb;
gimple_stmt_iterator gsi;
FOR_EACH_BB_FN (target_bb, cfun)
{
for (gsi = gsi_start_bb (target_bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple label_stmt = gsi_stmt (gsi);
tree target;
if (gimple_code (label_stmt) != GIMPLE_LABEL)
break;
target = gimple_label_label (label_stmt);
/* Make an edge to every label block that has been marked as a
potential target for a computed goto or a non-local goto. */
if ((FORCED_LABEL (target) && !for_call)
|| (DECL_NONLOCAL (target) && for_call))
{
make_edge (bb, target_bb, EDGE_ABNORMAL);
break;
}
}
if (!gsi_end_p (gsi)
&& is_gimple_debug (gsi_stmt (gsi)))
gsi_next_nondebug (&gsi);
if (!gsi_end_p (gsi))
{
/* Make an edge to every setjmp-like call. */
gimple call_stmt = gsi_stmt (gsi);
if (is_gimple_call (call_stmt)
&& (gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE))
make_edge (bb, target_bb, EDGE_ABNORMAL);
}
}
}
/* Create edges for a goto statement at block BB. */
static void
static bool
make_goto_expr_edges (basic_block bb)
{
gimple_stmt_iterator last = gsi_last_bb (bb);
@ -1105,11 +1192,11 @@ make_goto_expr_edges (basic_block bb)
edge e = make_edge (bb, label_bb, EDGE_FALLTHRU);
e->goto_locus = gimple_location (goto_t);
gsi_remove (&last, true);
return;
return false;
}
/* A computed GOTO creates abnormal edges. */
make_abnormal_goto_edges (bb, false);
return true;
}
/* Create edges for an asm statement with labels at block BB. */

View File

@ -31,7 +31,6 @@ extern void start_recording_case_labels (void);
extern void end_recording_case_labels (void);
extern basic_block label_to_block_fn (struct function *, tree);
#define label_to_block(t) (label_to_block_fn (cfun, t))
extern void make_abnormal_goto_edges (basic_block, bool);
extern void cleanup_dead_labels (void);
extern void group_case_labels_stmt (gimple);
extern void group_case_labels (void);
@ -46,7 +45,9 @@ extern void gimple_debug_cfg (int);
extern void gimple_dump_cfg (FILE *, int);
extern void dump_cfg_stats (FILE *);
extern void debug_cfg_stats (void);
extern bool computed_goto_p (gimple);
extern bool stmt_can_make_abnormal_goto (gimple);
extern basic_block get_abnormal_succ_dispatcher (basic_block);
extern bool is_ctrl_stmt (gimple);
extern bool is_ctrl_altering_stmt (gimple);
extern bool simple_goto_p (gimple);

View File

@ -1967,7 +1967,7 @@ update_ssa_across_abnormal_edges (basic_block bb, basic_block ret_bb,
static bool
copy_edges_for_bb (basic_block bb, gcov_type count_scale, basic_block ret_bb,
bool can_make_abnormal_goto)
basic_block abnormal_goto_dest)
{
basic_block new_bb = (basic_block) bb->aux;
edge_iterator ei;
@ -2021,7 +2021,9 @@ copy_edges_for_bb (basic_block bb, gcov_type count_scale, basic_block ret_bb,
into a COMPONENT_REF which doesn't. If the copy
can throw, the original could also throw. */
can_throw = stmt_can_throw_internal (copy_stmt);
nonlocal_goto = stmt_can_make_abnormal_goto (copy_stmt);
nonlocal_goto
= (stmt_can_make_abnormal_goto (copy_stmt)
&& !computed_goto_p (copy_stmt));
if (can_throw || nonlocal_goto)
{
@ -2052,9 +2054,26 @@ copy_edges_for_bb (basic_block bb, gcov_type count_scale, basic_block ret_bb,
/* If the call we inline cannot make abnormal goto do not add
additional abnormal edges but only retain those already present
in the original function body. */
nonlocal_goto &= can_make_abnormal_goto;
if (abnormal_goto_dest == NULL)
nonlocal_goto = false;
if (nonlocal_goto)
make_abnormal_goto_edges (gimple_bb (copy_stmt), true);
{
basic_block copy_stmt_bb = gimple_bb (copy_stmt);
if (get_abnormal_succ_dispatcher (copy_stmt_bb))
nonlocal_goto = false;
/* ABNORMAL_DISPATCHER (1) is for longjmp/setjmp or nonlocal gotos
in OpenMP regions which aren't allowed to be left abnormally.
So, no need to add abnormal edge in that case. */
else if (is_gimple_call (copy_stmt)
&& gimple_call_internal_p (copy_stmt)
&& (gimple_call_internal_fn (copy_stmt)
== IFN_ABNORMAL_DISPATCHER)
&& gimple_call_arg (copy_stmt, 0) == boolean_true_node)
nonlocal_goto = false;
else
make_edge (copy_stmt_bb, abnormal_goto_dest, EDGE_ABNORMAL);
}
if ((can_throw || nonlocal_goto)
&& gimple_in_ssa_p (cfun))
@ -2493,13 +2512,22 @@ copy_cfg_body (copy_body_data * id, gcov_type count, int frequency_scale,
last = last_basic_block_for_fn (cfun);
/* Now that we've duplicated the blocks, duplicate their edges. */
bool can_make_abormal_goto
= id->gimple_call && stmt_can_make_abnormal_goto (id->gimple_call);
basic_block abnormal_goto_dest = NULL;
if (id->gimple_call
&& stmt_can_make_abnormal_goto (id->gimple_call))
{
gimple_stmt_iterator gsi = gsi_for_stmt (id->gimple_call);
bb = gimple_bb (id->gimple_call);
gsi_next (&gsi);
if (gsi_end_p (gsi))
abnormal_goto_dest = get_abnormal_succ_dispatcher (bb);
}
FOR_ALL_BB_FN (bb, cfun_to_copy)
if (!id->blocks_to_copy
|| (bb->index > 0 && bitmap_bit_p (id->blocks_to_copy, bb->index)))
need_debug_cleanup |= copy_edges_for_bb (bb, count_scale, exit_block_map,
can_make_abormal_goto);
abnormal_goto_dest);
if (new_entry)
{

View File

@ -9977,12 +9977,6 @@ build_common_builtin_nodes (void)
BUILT_IN_SETJMP_SETUP,
"__builtin_setjmp_setup", ECF_NOTHROW);
ftype = build_function_type_list (ptr_type_node, ptr_type_node, NULL_TREE);
local_define_builtin ("__builtin_setjmp_dispatcher", ftype,
BUILT_IN_SETJMP_DISPATCHER,
"__builtin_setjmp_dispatcher",
ECF_PURE | ECF_NOTHROW);
ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
local_define_builtin ("__builtin_setjmp_receiver", ftype,
BUILT_IN_SETJMP_RECEIVER,