re PR middle-end/79805 (ICE (verify_flow_info failed) with -fnon-call-exceptions -O)

PR middle-end/79805
	* internal-fn.def (ATOMIC_BIT_TEST_AND_SET, ATOMIC_BIT_TEST_AND_RESET,
	ATOMIC_BIT_TEST_AND_COMPLEMENT, ATOMIC_COMPARE_EXCHANGE): Remove
	ECF_NOTHROW.
	* gimple-fold.c (fold_builtin_atomic_compare_exchange): Set
	gimple_call_nothrow_p flag based on whether original builtin can throw.
	If it can, emit following stmts on the fallthrough edge.
	* tree-ssa-ccp.c (optimize_atomic_bit_test_and): Similarly, except
	don't create new bb if inserting just debug stmts on the edge, try to
	insert them on the fallthru bb or just reset debug stmts.

	* g++.dg/opt/pr79805.C: New test.

From-SVN: r245882
This commit is contained in:
Jakub Jelinek 2017-03-03 20:32:01 +01:00 committed by Jakub Jelinek
parent f325c45673
commit cc195d46a3
6 changed files with 312 additions and 22 deletions

View File

@ -1,3 +1,16 @@
2017-03-03 Jakub Jelinek <jakub@redhat.com>
PR middle-end/79805
* internal-fn.def (ATOMIC_BIT_TEST_AND_SET, ATOMIC_BIT_TEST_AND_RESET,
ATOMIC_BIT_TEST_AND_COMPLEMENT, ATOMIC_COMPARE_EXCHANGE): Remove
ECF_NOTHROW.
* gimple-fold.c (fold_builtin_atomic_compare_exchange): Set
gimple_call_nothrow_p flag based on whether original builtin can throw.
If it can, emit following stmts on the fallthrough edge.
* tree-ssa-ccp.c (optimize_atomic_bit_test_and): Similarly, except
don't create new bb if inserting just debug stmts on the edge, try to
insert them on the fallthru bb or just reset debug stmts.
2017-03-03 Segher Boesssenkool <segher@kernel.crashing.org>
PR target/43763

View File

@ -3533,6 +3533,8 @@ fold_builtin_atomic_compare_exchange (gimple_stmt_iterator *gsi)
tree itype = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (parmt)));
tree ctype = build_complex_type (itype);
tree expected = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
bool throws = false;
edge e = NULL;
gimple *g = gimple_build_assign (make_ssa_name (TREE_TYPE (expected)),
expected);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
@ -3558,19 +3560,39 @@ fold_builtin_atomic_compare_exchange (gimple_stmt_iterator *gsi)
gimple_set_vdef (g, gimple_vdef (stmt));
gimple_set_vuse (g, gimple_vuse (stmt));
SSA_NAME_DEF_STMT (gimple_vdef (g)) = g;
if (gimple_call_lhs (stmt))
tree oldlhs = gimple_call_lhs (stmt);
if (stmt_can_throw_internal (stmt))
{
throws = true;
e = find_fallthru_edge (gsi_bb (*gsi)->succs);
}
gimple_call_set_nothrow (as_a <gcall *> (g),
gimple_call_nothrow_p (as_a <gcall *> (stmt)));
gimple_call_set_lhs (stmt, NULL_TREE);
gsi_replace (gsi, g, true);
if (oldlhs)
{
gsi_insert_before (gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (make_ssa_name (itype), IMAGPART_EXPR,
build1 (IMAGPART_EXPR, itype, lhs));
gsi_insert_before (gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (gimple_call_lhs (stmt), NOP_EXPR,
gimple_assign_lhs (g));
if (throws)
{
gsi_insert_on_edge_immediate (e, g);
*gsi = gsi_for_stmt (g);
}
else
gsi_insert_after (gsi, g, GSI_NEW_STMT);
g = gimple_build_assign (oldlhs, NOP_EXPR, gimple_assign_lhs (g));
gsi_insert_after (gsi, g, GSI_NEW_STMT);
}
gsi_replace (gsi, g, true);
g = gimple_build_assign (make_ssa_name (itype), REALPART_EXPR,
build1 (REALPART_EXPR, itype, lhs));
gsi_insert_after (gsi, g, GSI_NEW_STMT);
if (throws && oldlhs == NULL_TREE)
{
gsi_insert_on_edge_immediate (e, g);
*gsi = gsi_for_stmt (g);
}
else
gsi_insert_after (gsi, g, GSI_NEW_STMT);
if (!useless_type_conversion_p (TREE_TYPE (expected), itype))
{
g = gimple_build_assign (make_ssa_name (TREE_TYPE (expected)),

View File

@ -205,11 +205,13 @@ DEF_INTERNAL_FN (GOACC_TILE, ECF_NOTHROW | ECF_LEAF, NULL)
current target. */
DEF_INTERNAL_FN (SET_EDOM, ECF_LEAF | ECF_NOTHROW, NULL)
/* Atomic functions. */
DEF_INTERNAL_FN (ATOMIC_BIT_TEST_AND_SET, ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (ATOMIC_BIT_TEST_AND_COMPLEMENT, ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (ATOMIC_BIT_TEST_AND_RESET, ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (ATOMIC_COMPARE_EXCHANGE, ECF_LEAF | ECF_NOTHROW, NULL)
/* Atomic functions. These don't have ECF_NOTHROW because for
-fnon-call-exceptions they can throw, otherwise we set
gimple_call_nothrow_p on it. */
DEF_INTERNAL_FN (ATOMIC_BIT_TEST_AND_SET, ECF_LEAF, NULL)
DEF_INTERNAL_FN (ATOMIC_BIT_TEST_AND_COMPLEMENT, ECF_LEAF, NULL)
DEF_INTERNAL_FN (ATOMIC_BIT_TEST_AND_RESET, ECF_LEAF, NULL)
DEF_INTERNAL_FN (ATOMIC_COMPARE_EXCHANGE, ECF_LEAF, NULL)
/* To implement [[fallthrough]]. */
DEF_INTERNAL_FN (FALLTHROUGH, ECF_LEAF | ECF_NOTHROW, NULL)

View File

@ -1,3 +1,8 @@
2017-03-03 Jakub Jelinek <jakub@redhat.com>
PR middle-end/79805
* g++.dg/opt/pr79805.C: New test.
2017-03-03 Andrew Senkevich <andrew.senkevich@intel.com>
* gcc.target/i386/avx512vpopcntdq-check.h: New.

View File

@ -0,0 +1,219 @@
// PR middle-end/79805
// { dg-do compile }
// { dg-options "-O2 -fnon-call-exceptions" }
struct A { A (); ~A (); };
void bar (void);
int
f0 (int *d, int f)
{
A z;
int e = __atomic_compare_exchange_n (d, &f, 1, 1, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return e;
}
int
f1 (int *a, int bit)
{
A z;
unsigned int mask = (1u << bit);
return (__sync_fetch_and_or (a, mask) & mask) != 0;
}
int
f2 (int *a, int bit)
{
A z;
unsigned int mask = (1u << bit);
unsigned int t1 = __atomic_fetch_or (a, mask, __ATOMIC_RELAXED);
unsigned int t2 = t1 & mask;
return t2 != 0;
}
long int
f3 (long int *a, int bit)
{
A z;
unsigned long int mask = (1ul << bit);
return (__atomic_fetch_or (a, mask, __ATOMIC_SEQ_CST) & mask) == 0;
}
int
f4 (int *a)
{
A z;
unsigned int mask = (1u << 7);
return (__sync_fetch_and_or (a, mask) & mask) != 0;
}
int
f5 (int *a)
{
A z;
unsigned int mask = (1u << 13);
return (__atomic_fetch_or (a, mask, __ATOMIC_RELAXED) & mask) != 0;
}
int
f6 (int *a)
{
A z;
unsigned int mask = (1u << 0);
return (__atomic_fetch_or (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
}
void
f7 (int *a, int bit)
{
A z;
unsigned int mask = (1u << bit);
if ((__sync_fetch_and_xor (a, mask) & mask) != 0)
bar ();
}
void
f8 (int *a, int bit)
{
A z;
unsigned int mask = (1u << bit);
if ((__atomic_fetch_xor (a, mask, __ATOMIC_RELAXED) & mask) == 0)
bar ();
}
int
f9 (int *a, int bit)
{
A z;
unsigned int mask = (1u << bit);
return (__atomic_fetch_xor (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
}
int
f10 (int *a)
{
A z;
unsigned int mask = (1u << 7);
return (__sync_fetch_and_xor (a, mask) & mask) != 0;
}
int
f11 (int *a)
{
A z;
unsigned int mask = (1u << 13);
return (__atomic_fetch_xor (a, mask, __ATOMIC_RELAXED) & mask) != 0;
}
int
f12 (int *a)
{
A z;
unsigned int mask = (1u << 0);
return (__atomic_fetch_xor (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
}
int
f13 (int *a, int bit)
{
A z;
unsigned int mask = (1u << bit);
return (__sync_fetch_and_and (a, ~mask) & mask) != 0;
}
int
f14 (int *a, int bit)
{
A z;
unsigned int mask = (1u << bit);
return (__atomic_fetch_and (a, ~mask, __ATOMIC_RELAXED) & mask) != 0;
}
int
f15 (int *a, int bit)
{
A z;
unsigned int mask = (1u << bit);
return (__atomic_fetch_and (a, ~mask, __ATOMIC_SEQ_CST) & mask) != 0;
}
int
f16 (int *a)
{
A z;
unsigned int mask = (1u << 7);
return (__sync_fetch_and_and (a, ~mask) & mask) != 0;
}
int
f17 (int *a)
{
A z;
unsigned int mask = (1u << 13);
return (__atomic_fetch_and (a, ~mask, __ATOMIC_RELAXED) & mask) != 0;
}
int
f18 (int *a)
{
A z;
unsigned int mask = (1u << 0);
return (__atomic_fetch_and (a, ~mask, __ATOMIC_SEQ_CST) & mask) != 0;
}
unsigned long int
f19 (unsigned long int *a, int bit)
{
A z;
unsigned long int mask = (1ul << bit);
return (__atomic_xor_fetch (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
}
unsigned long int
f20 (unsigned long int *a)
{
A z;
unsigned long int mask = (1ul << 7);
return (__atomic_xor_fetch (a, mask, __ATOMIC_SEQ_CST) & mask) == 0;
}
int
f21 (int *a, int bit)
{
A z;
unsigned int mask = (1u << bit);
return (__sync_fetch_and_or (a, mask) & mask);
}
unsigned long int
f22 (unsigned long int *a)
{
A z;
unsigned long int mask = (1ul << 7);
return (__atomic_xor_fetch (a, mask, __ATOMIC_SEQ_CST) & mask);
}
unsigned long int
f23 (unsigned long int *a)
{
A z;
unsigned long int mask = (1ul << 7);
return (__atomic_fetch_xor (a, mask, __ATOMIC_SEQ_CST) & mask);
}
unsigned short int
f24 (unsigned short int *a)
{
A z;
unsigned short int mask = (1u << 7);
return (__sync_fetch_and_or (a, mask) & mask) != 0;
}
unsigned short int
f25 (unsigned short int *a)
{
A z;
unsigned short int mask = (1u << 7);
return (__atomic_fetch_or (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
}

View File

@ -2890,9 +2890,19 @@ optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
gimple_set_location (g, gimple_location (call));
gimple_set_vuse (g, gimple_vuse (call));
gimple_set_vdef (g, gimple_vdef (call));
bool throws = stmt_can_throw_internal (call);
gimple_call_set_nothrow (as_a <gcall *> (g),
gimple_call_nothrow_p (as_a <gcall *> (call)));
SSA_NAME_DEF_STMT (gimple_vdef (call)) = g;
gimple_stmt_iterator gsi = *gsip;
gsi_insert_after (&gsi, g, GSI_NEW_STMT);
edge e = NULL;
if (throws)
{
maybe_clean_or_replace_eh_stmt (call, g);
if (after || (use_bool && has_debug_uses))
e = find_fallthru_edge (gsi_bb (gsi)->succs);
}
if (after)
{
/* The internal function returns the value of the specified bit
@ -2905,23 +2915,42 @@ optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
: mask);
new_lhs = gimple_assign_lhs (g);
gsi_insert_after (&gsi, g, GSI_NEW_STMT);
if (throws)
{
gsi_insert_on_edge_immediate (e, g);
gsi = gsi_for_stmt (g);
}
else
gsi_insert_after (&gsi, g, GSI_NEW_STMT);
}
if (use_bool && has_debug_uses)
{
tree temp = make_node (DEBUG_EXPR_DECL);
DECL_ARTIFICIAL (temp) = 1;
TREE_TYPE (temp) = TREE_TYPE (lhs);
SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
g = gimple_build_debug_bind (temp, t, g);
gsi_insert_after (&gsi, g, GSI_NEW_STMT);
tree temp = NULL_TREE;
if (!throws || after || single_pred_p (e->dest))
{
temp = make_node (DEBUG_EXPR_DECL);
DECL_ARTIFICIAL (temp) = 1;
TREE_TYPE (temp) = TREE_TYPE (lhs);
SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
g = gimple_build_debug_bind (temp, t, g);
if (throws && !after)
{
gsi = gsi_after_labels (e->dest);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
else
gsi_insert_after (&gsi, g, GSI_NEW_STMT);
}
FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
if (is_gimple_debug (g))
{
use_operand_p use_p;
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, temp);
if (temp == NULL_TREE)
gimple_debug_bind_reset_value (g);
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, temp);
update_stmt (g);
}
}