re PR middle-end/51038 (29_atomics/atomic_flag/clear/1.cc test_and_set/explicit.cc implicit.cc)

PR middle-end/51038

	libstdc++-v3
	* include/bits/atomic_base.h (atomic_thread_fence): Call built-in.
	(atomic_signal_fence): Call built-in.
	(test_and_set, clear): Call new atomic built-ins.

	gcc
	* builtins.c (expand_builtin_atomic_clear): New.  Expand atomic_clear.
	(expand_builtin_atomic_test_and_set): New.  Expand atomic test_and_set.
	(expand_builtin): Add cases for test_and_set and clear.
	* sync-builtins.def (BUILT_IN_ATOMIC_TEST_AND_SET): New.
	(BUILT_IN_ATOMIC_CLEAR): New.

	testsuite
	* gcc.dg/atomic-invalid.c: Add test for invalid __atomic_clear models.
	* gcc.dg/atomic-flag.c: New.  Test __atomic_test_and_set and
	__atomic_clear.

From-SVN: r181271
This commit is contained in:
Andrew MacLeod 2011-11-10 20:38:33 +00:00 committed by Andrew Macleod
parent 49fe93f410
commit d660c35ea2
8 changed files with 159 additions and 52 deletions

View File

@ -1,3 +1,12 @@
2011-11-10 Andrew MacLeod <amacleod@redhat.com>
PR middle-end/51038
* builtins.c (expand_builtin_atomic_clear): New. Expand atomic_clear.
(expand_builtin_atomic_test_and_set): New. Expand atomic test_and_set.
(expand_builtin): Add cases for test_and_set and clear.
* sync-builtins.def (BUILT_IN_ATOMIC_TEST_AND_SET): New.
(BUILT_IN_ATOMIC_CLEAR): New.
2011-11-10 Roberto Agostino Vitillo <ravitillo@lbl.gov>
PR debug/50983
@ -37,8 +46,6 @@
be AND followed by NOT.
* builtins.c (expand_builtin_atomic_fetch_op): Patchup code for NAND
should be AND followed by NOT.
* testsuite/gcc.dg/atomic-noinline[-aux].c: Test no-inline NAND and
patchup code.
2011-11-10 Jakub Jelinek <jakub@redhat.com>

View File

@ -5474,6 +5474,71 @@ expand_builtin_atomic_fetch_op (enum machine_mode mode, tree exp, rtx target,
return ret;
}
/* Expand an atomic clear operation.
void _atomic_clear (BOOL *obj, enum memmodel)
EXP is the call expression. */
static rtx
expand_builtin_atomic_clear (tree exp)
{
enum machine_mode mode;
rtx mem, ret;
enum memmodel model;
mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
if (model == MEMMODEL_ACQUIRE || model == MEMMODEL_ACQ_REL)
{
error ("invalid memory model for %<__atomic_store%>");
return const0_rtx;
}
/* Try issuing an __atomic_store, and allow fallback to __sync_lock_release.
Failing that, a store is issued by __atomic_store. The only way this can
fail is if the bool type is larger than a word size. Unlikely, but
handle it anyway for completeness. Assume a single threaded model since
there is no atomic support in this case, and no barriers are required. */
ret = expand_atomic_store (mem, const0_rtx, model, true);
if (!ret)
emit_move_insn (mem, const0_rtx);
return const0_rtx;
}
/* Expand an atomic test_and_set operation.
bool _atomic_test_and_set (BOOL *obj, enum memmodel)
EXP is the call expression. */
static rtx
expand_builtin_atomic_test_and_set (tree exp)
{
rtx mem, ret;
enum memmodel model;
enum machine_mode mode;
mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
/* Try issuing an exchange. If it is lock free, or if there is a limited
functionality __sync_lock_test_and_set, this will utilize it. */
ret = expand_atomic_exchange (NULL_RTX, mem, const1_rtx, model, true);
if (ret)
return ret;
/* Otherwise, there is no lock free support for test and set. Simply
perform a load and a store. Since this presumes a non-atomic architecture,
also assume single threadedness and don't issue barriers either. */
ret = gen_reg_rtx (mode);
emit_move_insn (ret, mem);
emit_move_insn (mem, const1_rtx);
return ret;
}
/* Return true if (optional) argument ARG1 of size ARG0 is always lock free on
this architecture. If ARG1 is NULL, use typical alignment for size ARG0. */
@ -6702,6 +6767,12 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode,
if (target)
return target;
break;
case BUILT_IN_ATOMIC_TEST_AND_SET:
return expand_builtin_atomic_test_and_set (exp);
case BUILT_IN_ATOMIC_CLEAR:
return expand_builtin_atomic_clear (exp);
case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE:
return expand_builtin_atomic_always_lock_free (exp);

View File

@ -259,6 +259,12 @@ DEF_SYNC_BUILTIN (BUILT_IN_SYNC_SYNCHRONIZE, "__sync_synchronize",
/* __sync* builtins for the C++ memory model. */
DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_TEST_AND_SET, "__atomic_test_and_set",
BT_FN_BOOL_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_CLEAR, "__atomic_clear", BT_FN_VOID_VPTR_INT,
ATTR_NOTHROW_LEAF_LIST)
DEF_SYNC_BUILTIN (BUILT_IN_ATOMIC_EXCHANGE,
"__atomic_exchange",
BT_FN_VOID_SIZE_VPTR_PTR_PTR_INT, ATTR_NOTHROW_LEAF_LIST)

View File

@ -1,3 +1,16 @@
2011-11-10 Andrew MacLeod <amacleod@redhat.com>
PR middle-end/51038
* gcc.dg/atomic-invalid.c: Add test for invalid __atomic_clear models.
* gcc.dg/atomic-flag.c: New. Test __atomic_test_and_set and
__atomic_clear.
2011-11-10 Andrew MacLeod <amacleod@redhat.com>
PR rtl-optimization/51040
* testsuite/gcc.dg/atomic-noinline[-aux].c: Test no-inline NAND and
patchup code.
2011-11-10 Jason Merrill <jason@redhat.com>
PR c++/51079

View File

@ -0,0 +1,32 @@
/* Test __atomic routines for existence and execution. */
/* { dg-do run } */
#include <stdbool.h>
/* Test that __atomic_test_and_set and __atomic_clear builtins execute. */
extern void abort(void);
bool a;
main ()
{
bool b;
__atomic_clear (&a, __ATOMIC_RELAXED);
if (a != 0)
abort ();
b = __atomic_test_and_set (&a, __ATOMIC_SEQ_CST);
if (a != 1 || b != 0)
abort ();
b = __atomic_test_and_set (&a, __ATOMIC_ACQ_REL);
if (b != 1 || a != 1)
abort ();
__atomic_clear (&a, __ATOMIC_SEQ_CST);
if (a != 0)
abort ();
return 0;
}

View File

@ -4,9 +4,11 @@
/* { dg-require-effective-target sync_int_long } */
#include <stddef.h>
#include <stdbool.h>
int i, e, b;
size_t s;
bool x;
main ()
{
@ -26,4 +28,9 @@ main ()
i = __atomic_always_lock_free (s, NULL); /* { dg-error "non-constant argument" } */
__atomic_load_n (&i, 44); /* { dg-warning "invalid memory model" } */
__atomic_clear (&x, __ATOMIC_ACQUIRE); /* { dg-error "invalid memory model" } */
__atomic_clear (&x, __ATOMIC_ACQ_REL); /* { dg-error "invalid memory model" } */
}

View File

@ -1,3 +1,10 @@
2011-11-10 Andrew MacLeod <amacleod@redhat.com>
PR middle-end/51038
* include/bits/atomic_base.h (atomic_thread_fence): Call built-in.
(atomic_signal_fence): Call built-in.
(test_and_set, clear): Call new atomic built-ins.
2011-11-09 Jonathan Wakely <jwakely.gcc@gmail.com>
* include/bits/allocator.h (__shrink_to_fit_aux::_S_do_it): Create

View File

@ -68,11 +68,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
return __mo2;
}
void
atomic_thread_fence(memory_order __m) noexcept;
inline void
atomic_thread_fence(memory_order __m) noexcept
{
__atomic_thread_fence (__m);
}
void
atomic_signal_fence(memory_order __m) noexcept;
inline void
atomic_signal_fence(memory_order __m) noexcept
{
__atomic_thread_fence (__m);
}
/// kill_dependency
template<typename _Tp>
@ -261,35 +267,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
test_and_set(memory_order __m = memory_order_seq_cst) noexcept
{
/* The standard *requires* this to be lock free. If exchange is not
always lock free, the resort to the old test_and_set. */
if (__atomic_always_lock_free (sizeof (_M_i), 0))
return __atomic_exchange_n(&_M_i, 1, __m);
else
{
/* Sync test and set is only guaranteed to be acquire. */
if (__m == memory_order_seq_cst || __m == memory_order_release
|| __m == memory_order_acq_rel)
atomic_thread_fence (__m);
return __sync_lock_test_and_set (&_M_i, 1);
}
return __atomic_test_and_set (&_M_i, __m);
}
bool
test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
{
/* The standard *requires* this to be lock free. If exchange is not
always lock free, the resort to the old test_and_set. */
if (__atomic_always_lock_free (sizeof (_M_i), 0))
return __atomic_exchange_n(&_M_i, 1, __m);
else
{
/* Sync test and set is only guaranteed to be acquire. */
if (__m == memory_order_seq_cst || __m == memory_order_release
|| __m == memory_order_acq_rel)
atomic_thread_fence (__m);
return __sync_lock_test_and_set (&_M_i, 1);
}
return __atomic_test_and_set (&_M_i, __m);
}
void
@ -299,17 +283,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
/* The standard *requires* this to be lock free. If store is not always
lock free, the resort to the old style __sync_lock_release. */
if (__atomic_always_lock_free (sizeof (_M_i), 0))
__atomic_store_n(&_M_i, 0, __m);
else
{
__sync_lock_release (&_M_i, 0);
/* __sync_lock_release is only guaranteed to be a release barrier. */
if (__m == memory_order_seq_cst)
atomic_thread_fence (__m);
}
__atomic_clear (&_M_i, __m);
}
void
@ -319,17 +293,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__glibcxx_assert(__m != memory_order_acquire);
__glibcxx_assert(__m != memory_order_acq_rel);
/* The standard *requires* this to be lock free. If store is not always
lock free, the resort to the old style __sync_lock_release. */
if (__atomic_always_lock_free (sizeof (_M_i), 0))
__atomic_store_n(&_M_i, 0, __m);
else
{
__sync_lock_release (&_M_i, 0);
/* __sync_lock_release is only guaranteed to be a release barrier. */
if (__m == memory_order_seq_cst)
atomic_thread_fence (__m);
}
__atomic_clear (&_M_i, __m);
}
};