Eenable -Winvalid-memory-order for C++ [PR99612].

Resolves:
PR middle-end/99612 - Remove "#pragma GCC system_header" from atomic file to warn on incorrect memory order

gcc/ChangeLog:

	PR middle-end/99612
	* builtins.c (get_memmodel): Move warning code to
	gimple-ssa-warn-access.cc.
	(expand_builtin_atomic_compare_exchange): Same.
	(expand_ifn_atomic_compare_exchange): Same.
	(expand_builtin_atomic_load): Same.
	(expand_builtin_atomic_store): Same.
	(expand_builtin_atomic_clear): Same.
	* doc/extend.texi (__atomic_exchange_n): Update valid memory
	models.
	* gimple-ssa-warn-access.cc (memmodel_to_uhwi): New function.
	(struct memmodel_pair): New struct.
	(memmodel_name): New function.
	(pass_waccess::maybe_warn_memmodel): New function.
	(pass_waccess::check_atomic_memmodel): New function.
	(pass_waccess::check_atomic_builtin): Handle memory model.
	* input.c (expansion_point_location_if_in_system_header): Return
	original location if expansion location is in a system header.

gcc/testsuite/ChangeLog:

	PR middle-end/99612
	* c-c++-common/pr83059.c: Adjust text of expected diagnostics.
	* gcc.dg/atomic-invalid-2.c: Same.
	* gcc.dg/atomic-invalid.c: Same.
	* c-c++-common/Winvalid-memory-model.c: New test.
	* g++.dg/warn/Winvalid-memory-model-2.C: New test.
	* g++.dg/warn/Winvalid-memory-model.C: New test.
This commit is contained in:
Martin Sebor 2022-01-04 13:44:23 -07:00
parent 708b87dcb6
commit 5a431b60d1
10 changed files with 736 additions and 120 deletions

View File

@ -5791,35 +5791,22 @@ expand_builtin_sync_lock_release (machine_mode mode, tree exp)
static enum memmodel
get_memmodel (tree exp)
{
rtx op;
unsigned HOST_WIDE_INT val;
location_t loc
= expansion_point_location_if_in_system_header (input_location);
/* If the parameter is not a constant, it's a run time value so we'll just
convert it to MEMMODEL_SEQ_CST to avoid annoying runtime checking. */
if (TREE_CODE (exp) != INTEGER_CST)
return MEMMODEL_SEQ_CST;
op = expand_normal (exp);
rtx op = expand_normal (exp);
val = INTVAL (op);
unsigned HOST_WIDE_INT val = INTVAL (op);
if (targetm.memmodel_check)
val = targetm.memmodel_check (val);
else if (val & ~MEMMODEL_MASK)
{
warning_at (loc, OPT_Winvalid_memory_model,
"unknown architecture specifier in memory model to builtin");
return MEMMODEL_SEQ_CST;
}
/* Should never see a user explicit SYNC memodel model, so >= LAST works. */
if (memmodel_base (val) >= MEMMODEL_LAST)
{
warning_at (loc, OPT_Winvalid_memory_model,
"invalid memory model argument to builtin");
return MEMMODEL_SEQ_CST;
}
/* Workaround for Bugzilla 59448. GCC doesn't track consume properly, so
be conservative and promote consume to acquire. */
@ -5866,28 +5853,17 @@ expand_builtin_atomic_compare_exchange (machine_mode mode, tree exp,
{
rtx expect, desired, mem, oldval;
rtx_code_label *label;
enum memmodel success, failure;
tree weak;
bool is_weak;
location_t loc
= expansion_point_location_if_in_system_header (input_location);
success = get_memmodel (CALL_EXPR_ARG (exp, 4));
failure = get_memmodel (CALL_EXPR_ARG (exp, 5));
memmodel success = get_memmodel (CALL_EXPR_ARG (exp, 4));
memmodel failure = get_memmodel (CALL_EXPR_ARG (exp, 5));
if (failure > success)
{
warning_at (loc, OPT_Winvalid_memory_model,
"failure memory model cannot be stronger than success "
"memory model for %<__atomic_compare_exchange%>");
success = MEMMODEL_SEQ_CST;
}
if (is_mm_release (failure) || is_mm_acq_rel (failure))
{
warning_at (loc, OPT_Winvalid_memory_model,
"invalid failure memory model for "
"%<__atomic_compare_exchange%>");
failure = MEMMODEL_SEQ_CST;
success = MEMMODEL_SEQ_CST;
}
@ -5992,29 +5968,15 @@ expand_ifn_atomic_compare_exchange (gcall *call)
int size = tree_to_shwi (gimple_call_arg (call, 3)) & 255;
gcc_assert (size == 1 || size == 2 || size == 4 || size == 8 || size == 16);
machine_mode mode = int_mode_for_size (BITS_PER_UNIT * size, 0).require ();
rtx expect, desired, mem, oldval, boolret;
enum memmodel success, failure;
tree lhs;
bool is_weak;
location_t loc
= expansion_point_location_if_in_system_header (gimple_location (call));
success = get_memmodel (gimple_call_arg (call, 4));
failure = get_memmodel (gimple_call_arg (call, 5));
memmodel success = get_memmodel (gimple_call_arg (call, 4));
memmodel failure = get_memmodel (gimple_call_arg (call, 5));
if (failure > success)
{
warning_at (loc, OPT_Winvalid_memory_model,
"failure memory model cannot be stronger than success "
"memory model for %<__atomic_compare_exchange%>");
success = MEMMODEL_SEQ_CST;
}
if (is_mm_release (failure) || is_mm_acq_rel (failure))
{
warning_at (loc, OPT_Winvalid_memory_model,
"invalid failure memory model for "
"%<__atomic_compare_exchange%>");
failure = MEMMODEL_SEQ_CST;
success = MEMMODEL_SEQ_CST;
}
@ -6026,15 +5988,15 @@ expand_ifn_atomic_compare_exchange (gcall *call)
}
/* Expand the operands. */
mem = get_builtin_sync_mem (gimple_call_arg (call, 0), mode);
rtx mem = get_builtin_sync_mem (gimple_call_arg (call, 0), mode);
expect = expand_expr_force_mode (gimple_call_arg (call, 1), mode);
desired = expand_expr_force_mode (gimple_call_arg (call, 2), mode);
rtx expect = expand_expr_force_mode (gimple_call_arg (call, 1), mode);
rtx desired = expand_expr_force_mode (gimple_call_arg (call, 2), mode);
is_weak = (tree_to_shwi (gimple_call_arg (call, 3)) & 256) != 0;
bool is_weak = (tree_to_shwi (gimple_call_arg (call, 3)) & 256) != 0;
boolret = NULL;
oldval = NULL;
rtx boolret = NULL;
rtx oldval = NULL;
if (!expand_atomic_compare_and_swap (&boolret, &oldval, mem, expect, desired,
is_weak, success, failure))
@ -6043,7 +6005,7 @@ expand_ifn_atomic_compare_exchange (gcall *call)
return;
}
lhs = gimple_call_lhs (call);
tree lhs = gimple_call_lhs (call);
if (lhs)
{
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
@ -6062,24 +6024,15 @@ expand_ifn_atomic_compare_exchange (gcall *call)
static rtx
expand_builtin_atomic_load (machine_mode mode, tree exp, rtx target)
{
rtx mem;
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 1));
if (is_mm_release (model) || is_mm_acq_rel (model))
{
location_t loc
= expansion_point_location_if_in_system_header (input_location);
warning_at (loc, OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_load%>");
model = MEMMODEL_SEQ_CST;
}
if (!flag_inline_atomics)
return NULL_RTX;
/* Expand the operand. */
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
rtx mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
return expand_atomic_load (target, mem, model);
}
@ -6093,26 +6046,17 @@ expand_builtin_atomic_load (machine_mode mode, tree exp, rtx target)
static rtx
expand_builtin_atomic_store (machine_mode mode, tree exp)
{
rtx mem, val;
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 2));
memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 2));
if (!(is_mm_relaxed (model) || is_mm_seq_cst (model)
|| is_mm_release (model)))
{
location_t loc
= expansion_point_location_if_in_system_header (input_location);
warning_at (loc, OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_store%>");
model = MEMMODEL_SEQ_CST;
}
if (!flag_inline_atomics)
return NULL_RTX;
/* Expand the operands. */
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
rtx mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
rtx val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
return expand_atomic_store (mem, val, model, false);
}
@ -6370,29 +6314,19 @@ expand_ifn_atomic_op_fetch_cmp_0 (gcall *call)
static rtx
expand_builtin_atomic_clear (tree exp)
{
machine_mode mode;
rtx mem, ret;
enum memmodel model;
mode = int_mode_for_size (BOOL_TYPE_SIZE, 0).require ();
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
machine_mode mode = int_mode_for_size (BOOL_TYPE_SIZE, 0).require ();
rtx mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 1));
if (is_mm_consume (model) || is_mm_acquire (model) || is_mm_acq_rel (model))
{
location_t loc
= expansion_point_location_if_in_system_header (input_location);
warning_at (loc, OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_store%>");
model = MEMMODEL_SEQ_CST;
}
/* Try issuing an __atomic_store, and allow fallback to __sync_lock_release.
Failing that, a store is issued by __atomic_store. The only way this can
fail is if the bool type is larger than a word size. Unlikely, but
handle it anyway for completeness. Assume a single threaded model since
there is no atomic support in this case, and no barriers are required. */
ret = expand_atomic_store (mem, const0_rtx, model, true);
rtx ret = expand_atomic_store (mem, const0_rtx, model, true);
if (!ret)
emit_move_insn (mem, const0_rtx);
return const0_rtx;

View File

@ -12457,9 +12457,7 @@ This built-in function implements an atomic exchange operation. It writes
@var{val} into @code{*@var{ptr}}, and returns the previous contents of
@code{*@var{ptr}}.
The valid memory order variants are
@code{__ATOMIC_RELAXED}, @code{__ATOMIC_SEQ_CST}, @code{__ATOMIC_ACQUIRE},
@code{__ATOMIC_RELEASE}, and @code{__ATOMIC_ACQ_REL}.
All memory order variants are valid.
@end deftypefn

View File

@ -29,6 +29,7 @@
#include "gimple.h"
#include "tree-pass.h"
#include "builtins.h"
#include "diagnostic.h"
#include "ssa.h"
#include "gimple-pretty-print.h"
#include "gimple-ssa-warn-access.h"
@ -38,6 +39,8 @@
#include "gimple-fold.h"
#include "gimple-iterator.h"
#include "langhooks.h"
#include "memmodel.h"
#include "target.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "tree-cfg.h"
@ -2103,6 +2106,8 @@ private:
void maybe_check_dealloc_call (gcall *);
void maybe_check_access_sizes (rdwr_map *, tree, tree, gimple *);
bool maybe_warn_memmodel (gimple *, tree, tree, const unsigned char *);
void check_atomic_memmodel (gimple *, tree, tree, const unsigned char *);
/* A pointer_query object and its cache to store information about
pointers and their targets in. */
@ -2686,6 +2691,237 @@ pass_waccess::check_read_access (gimple *stmt, tree src,
&data, m_ptr_qry.rvals);
}
/* Return true if memory model ORD is constant in the context of STMT and
set *CSTVAL to the constant value. Otherwise return false. Warn for
invalid ORD. */
bool
memmodel_to_uhwi (tree ord, gimple *stmt, unsigned HOST_WIDE_INT *cstval)
{
unsigned HOST_WIDE_INT val;
if (TREE_CODE (ord) == INTEGER_CST)
{
if (!tree_fits_uhwi_p (ord))
return false;
val = tree_to_uhwi (ord);
}
else
{
/* Use the range query to determine constant values in the absence
of constant proppagation (such as at -O0). */
value_range rng;
if (!get_range_query (cfun)->range_of_expr (rng, ord, stmt)
|| !rng.constant_p ()
|| !rng.singleton_p (&ord))
return false;
wide_int lob = rng.lower_bound ();
if (!wi::fits_uhwi_p (lob))
return false;
val = lob.to_shwi ();
}
if (targetm.memmodel_check)
/* This might warn for an invalid VAL but return a conservatively
valid result. */
val = targetm.memmodel_check (val);
else if (val & ~MEMMODEL_MASK)
{
tree fndecl = gimple_call_fndecl (stmt);
location_t loc = gimple_location (stmt);
loc = expansion_point_location_if_in_system_header (loc);
warning_at (loc, OPT_Winvalid_memory_model,
"unknown architecture specifier in memory model "
"%wi for %qD", val, fndecl);
return false;
}
*cstval = val;
return true;
}
/* Valid memory model for each set of atomic built-in functions. */
struct memmodel_pair
{
memmodel modval;
const char* modname;
#define MEMMODEL_PAIR(val, str) \
{ MEMMODEL_ ## val, "memory_order_" str }
};
/* Valid memory models in the order of increasing strength. */
static const memmodel_pair memory_models[] =
{ MEMMODEL_PAIR (RELAXED, "relaxed"),
MEMMODEL_PAIR (SEQ_CST, "seq_cst"),
MEMMODEL_PAIR (ACQUIRE, "acquire"),
MEMMODEL_PAIR (CONSUME, "consume"),
MEMMODEL_PAIR (RELEASE, "release"),
MEMMODEL_PAIR (ACQ_REL, "acq_rel")
};
/* Return the name of the memory model VAL. */
static const char*
memmodel_name (unsigned HOST_WIDE_INT val)
{
val = memmodel_base (val);
for (unsigned i = 0; i != sizeof memory_models / sizeof *memory_models; ++i)
{
if (val == memory_models[i].modval)
return memory_models[i].modname;
}
return NULL;
}
/* Indices of valid MEMORY_MODELS above for corresponding atomic operations. */
static const unsigned char load_models[] = { 0, 1, 2, 3, UCHAR_MAX };
static const unsigned char store_models[] = { 0, 1, 4, UCHAR_MAX };
static const unsigned char xchg_models[] = { 0, 1, 3, 4, 5, UCHAR_MAX };
static const unsigned char flag_clr_models[] = { 0, 1, 4, UCHAR_MAX };
static const unsigned char all_models[] = { 0, 1, 2, 3, 4, 5, UCHAR_MAX };
/* Check the success memory model argument ORD_SUCS to the call STMT to
an atomic function and warn if it's invalid. If nonnull, also check
the failure memory model ORD_FAIL and warn if it's invalid. Return
true if a warning has been issued. */
bool
pass_waccess::maybe_warn_memmodel (gimple *stmt, tree ord_sucs,
tree ord_fail, const unsigned char *valid)
{
unsigned HOST_WIDE_INT sucs, fail = 0;
if (!memmodel_to_uhwi (ord_sucs, stmt, &sucs)
|| (ord_fail && !memmodel_to_uhwi (ord_fail, stmt, &fail)))
return false;
bool is_valid = false;
if (valid)
for (unsigned i = 0; valid[i] != UCHAR_MAX; ++i)
{
memmodel model = memory_models[valid[i]].modval;
if (memmodel_base (sucs) == model)
{
is_valid = true;
break;
}
}
else
is_valid = true;
tree fndecl = gimple_call_fndecl (stmt);
location_t loc = gimple_location (stmt);
loc = expansion_point_location_if_in_system_header (loc);
if (!is_valid)
{
bool warned = false;
if (const char *modname = memmodel_name (sucs))
warned = warning_at (loc, OPT_Winvalid_memory_model,
"invalid memory model %qs for %qD",
modname, fndecl);
else
warned = warning_at (loc, OPT_Winvalid_memory_model,
"invalid memory model %wi for %qD",
sucs, fndecl);
if (!warned)
return false;
/* Print a note with the valid memory models. */
pretty_printer pp;
pp_show_color (&pp) = pp_show_color (global_dc->printer);
for (unsigned i = 0; valid[i] != UCHAR_MAX; ++i)
{
const char *modname = memory_models[valid[i]].modname;
pp_printf (&pp, "%s%<%s%>", i ? ", " : "", modname);
}
inform (loc, "valid models are %s", pp_formatted_text (&pp));
return true;
}
if (!ord_fail)
return false;
if (fail == MEMMODEL_RELEASE || fail == MEMMODEL_ACQ_REL)
if (const char *failname = memmodel_name (fail))
{
/* If both memory model arguments are valid but their combination
is not, use their names in the warning. */
if (!warning_at (loc, OPT_Winvalid_memory_model,
"invalid failure memory model %qs for %qD",
failname, fndecl))
return false;
inform (loc,
"valid failure models are %qs, %qs, %qs, %qs",
"memory_order_relaxed", "memory_order_seq_cst",
"memory_order_acquire", "memory_order_consume");
return true;
}
if (memmodel_base (fail) <= memmodel_base (sucs))
return false;
if (const char *sucsname = memmodel_name (sucs))
if (const char *failname = memmodel_name (fail))
{
/* If both memory model arguments are valid but their combination
is not, use their names in the warning. */
if (!warning_at (loc, OPT_Winvalid_memory_model,
"failure memory model %qs cannot be stronger "
"than success memory model %qs for %qD",
failname, sucsname, fndecl))
return false;
/* Print a note with the valid failure memory models which are
those with a value less than or equal to the success mode. */
char buf[120];
*buf = '\0';
for (unsigned i = 0;
memory_models[i].modval <= memmodel_base (sucs); ++i)
{
if (*buf)
strcat (buf, ", ");
const char *modname = memory_models[valid[i]].modname;
sprintf (buf + strlen (buf), "'%s'", modname);
}
inform (loc, "valid models are %s", buf);
return true;
}
/* If either memory model argument value is invalid use the numerical
value of both in the message. */
return warning_at (loc, OPT_Winvalid_memory_model,
"failure memory model %wi cannot be stronger "
"than success memory model %wi for %qD",
fail, sucs, fndecl);
}
/* Wrapper for the above. */
void
pass_waccess::check_atomic_memmodel (gimple *stmt, tree ord_sucs,
tree ord_fail, const unsigned char *valid)
{
if (warning_suppressed_p (stmt, OPT_Winvalid_memory_model))
return;
if (maybe_warn_memmodel (stmt, ord_sucs, ord_fail, valid))
return;
suppress_warning (stmt, OPT_Winvalid_memory_model);
}
/* Check a call STMT to an atomic or sync built-in. */
@ -2699,12 +2935,14 @@ pass_waccess::check_atomic_builtin (gcall *stmt)
/* The size in bytes of the access by the function, and the number
of the second argument to check (if any). */
unsigned bytes = 0, arg2 = UINT_MAX;
unsigned sucs_arg = UINT_MAX, fail_arg = UINT_MAX;
/* Points to the array of indices of valid memory models. */
const unsigned char *pvalid_models = NULL;
switch (DECL_FUNCTION_CODE (callee))
{
#define BUILTIN_ACCESS_SIZE_FNSPEC(N) \
BUILT_IN_ATOMIC_LOAD_ ## N: \
case BUILT_IN_SYNC_FETCH_AND_ADD_ ## N: \
BUILT_IN_SYNC_FETCH_AND_ADD_ ## N: \
case BUILT_IN_SYNC_FETCH_AND_SUB_ ## N: \
case BUILT_IN_SYNC_FETCH_AND_OR_ ## N: \
case BUILT_IN_SYNC_FETCH_AND_AND_ ## N: \
@ -2720,8 +2958,16 @@ pass_waccess::check_atomic_builtin (gcall *stmt)
case BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_ ## N: \
case BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_ ## N: \
case BUILT_IN_SYNC_LOCK_RELEASE_ ## N: \
case BUILT_IN_ATOMIC_EXCHANGE_ ## N: \
bytes = N; \
break; \
case BUILT_IN_ATOMIC_LOAD_ ## N: \
pvalid_models = load_models; \
sucs_arg = 1; \
/* FALLTHROUGH */ \
case BUILT_IN_ATOMIC_STORE_ ## N: \
if (!pvalid_models) \
pvalid_models = store_models; \
/* FALLTHROUGH */ \
case BUILT_IN_ATOMIC_ADD_FETCH_ ## N: \
case BUILT_IN_ATOMIC_SUB_FETCH_ ## N: \
case BUILT_IN_ATOMIC_AND_FETCH_ ## N: \
@ -2735,9 +2981,21 @@ pass_waccess::check_atomic_builtin (gcall *stmt)
case BUILT_IN_ATOMIC_FETCH_OR_ ## N: \
case BUILT_IN_ATOMIC_FETCH_XOR_ ## N: \
bytes = N; \
if (sucs_arg == UINT_MAX) \
sucs_arg = 2; \
if (!pvalid_models) \
pvalid_models = all_models; \
break; \
case BUILT_IN_ATOMIC_EXCHANGE_ ## N: \
bytes = N; \
sucs_arg = 3; \
pvalid_models = xchg_models; \
break; \
case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_ ## N: \
bytes = N; \
sucs_arg = 4; \
fail_arg = 5; \
pvalid_models = all_models; \
arg2 = 1
case BUILTIN_ACCESS_SIZE_FNSPEC (1);
@ -2751,10 +3009,28 @@ pass_waccess::check_atomic_builtin (gcall *stmt)
case BUILTIN_ACCESS_SIZE_FNSPEC (16);
break;
case BUILT_IN_ATOMIC_CLEAR:
sucs_arg = 1;
pvalid_models = flag_clr_models;
break;
default:
return false;
}
unsigned nargs = gimple_call_num_args (stmt);
if (sucs_arg < nargs)
{
tree ord_sucs = gimple_call_arg (stmt, sucs_arg);
tree ord_fail = NULL_TREE;
if (fail_arg < nargs)
ord_fail = gimple_call_arg (stmt, fail_arg);
check_atomic_memmodel (stmt, ord_sucs, ord_fail, pvalid_models);
}
if (!bytes)
return true;
tree size = build_int_cstu (sizetype, bytes);
tree dst = gimple_call_arg (stmt, 0);
check_memop_access (stmt, dst, NULL_TREE, size);

View File

@ -986,10 +986,11 @@ linemap_client_expand_location_to_spelling_point (location_t loc,
}
/* If LOCATION is in a system header and if it is a virtual location for
a token coming from the expansion of a macro, unwind it to the
location of the expansion point of the macro. Otherwise, just return
LOCATION.
/* If LOCATION is in a system header and if it is a virtual location
for a token coming from the expansion of a macro, unwind it to
the location of the expansion point of the macro. If the expansion
point is also in a system header return the original LOCATION.
Otherwise, return the location of the expansion point.
This is used for instance when we want to emit diagnostics about a
token that may be located in a macro that is itself defined in a
@ -1001,11 +1002,13 @@ linemap_client_expand_location_to_spelling_point (location_t loc,
location_t
expansion_point_location_if_in_system_header (location_t location)
{
if (in_system_header_at (location))
location = linemap_resolve_location (line_table, location,
if (!in_system_header_at (location))
return location;
location_t xloc = linemap_resolve_location (line_table, location,
LRK_MACRO_EXPANSION_POINT,
NULL);
return location;
return in_system_header_at (xloc) ? location : xloc;
}
/* If LOCATION is a virtual location for a token coming from the expansion

View File

@ -0,0 +1,239 @@
/* PR middle-end/99612 - Missing warning on incorrect memory order without
-Wsystem-headers
Verify that constants are propagated through calls to inline functions
even at -O0.
Also verify that the informational notes after each warning mention
the valid memore models for each function.
{ dg-do compile }
{ dg-options "-O0 -ftrack-macro-expansion=0" } */
#if !__cplusplus
# define bool _Bool
#endif
extern int ei;
static __attribute__ ((always_inline)) inline
int retval (int val)
{
return val;
}
void test_load (int *pi)
{
int relaxed = retval (__ATOMIC_RELAXED);
*pi++ = __atomic_load_n (&ei, relaxed);
int consume = retval (__ATOMIC_CONSUME);
*pi++ = __atomic_load_n (&ei, consume);
int acquire = retval (__ATOMIC_ACQUIRE);
*pi++ = __atomic_load_n (&ei, acquire);
int release = retval (__ATOMIC_RELEASE);
*pi++ = __atomic_load_n (&ei, release); // { dg-warning "invalid memory model 'memory_order_release'" }
// { dg-message "valid models are 'memory_order_relaxed', 'memory_order_seq_cst', 'memory_order_acquire', 'memory_order_consume'" "note" { target *-*-* } .-1 }
int acq_rel = retval (__ATOMIC_ACQ_REL);
*pi++ = __atomic_load_n (&ei, acq_rel); // { dg-warning "invalid memory model 'memory_order_acq_rel'" }
int seq_cst = retval (__ATOMIC_SEQ_CST);
*pi++ = __atomic_load_n (&ei, seq_cst);
/* Verify a nonconstant range. */
int r0_1 = *pi++;
if (r0_1 < 0 || 1 < r0_1)
r0_1 = 0;
*pi++ = __atomic_load_n (&ei, r0_1);
/* Verify an unbounded range. */
int unknown = *pi++;
*pi++ = __atomic_load_n (&ei, unknown);
}
void test_store (int *pi, int x)
{
int relaxed = retval (__ATOMIC_RELAXED);
__atomic_store_n (pi++, x, relaxed);
int consume = retval (__ATOMIC_CONSUME);
__atomic_store_n (pi++, x, consume); // { dg-warning "invalid memory model 'memory_order_consume'" }
// { dg-message "valid models are 'memory_order_relaxed', 'memory_order_seq_cst', 'memory_order_release'" "note" { target *-*-* } .-1 }
int acquire = retval (__ATOMIC_ACQUIRE);
__atomic_store_n (pi++, x, acquire); // { dg-warning "invalid memory model 'memory_order_acquire'" }
int release = retval (__ATOMIC_RELEASE);
__atomic_store_n (pi++, x, release);
int acq_rel = retval (__ATOMIC_ACQ_REL);
__atomic_store_n (pi++, x, acq_rel); // { dg-warning "invalid memory model 'memory_order_acq_rel'" }
int seq_cst = retval (__ATOMIC_SEQ_CST);
__atomic_store_n (pi++, x, seq_cst);
int unknown = *pi++;
__atomic_store_n (pi++, x, unknown);
}
/* All memory models are valid. */
void test_exchange (int *pi, int x)
{
int relaxed = retval (__ATOMIC_RELAXED);
__atomic_exchange_n (pi++, x, relaxed);
int consume = retval (__ATOMIC_CONSUME);
__atomic_exchange_n (pi++, x, consume);
int acquire = retval (__ATOMIC_ACQUIRE);
__atomic_exchange_n (pi++, x, acquire);
int release = retval (__ATOMIC_RELEASE);
__atomic_exchange_n (pi++, x, release);
int acq_rel = retval (__ATOMIC_ACQ_REL);
__atomic_exchange_n (pi++, x, acq_rel);
int seq_cst = retval (__ATOMIC_SEQ_CST);
__atomic_exchange_n (pi++, x, seq_cst);
int unknown = *pi++;
__atomic_exchange_n (pi++, x, unknown);
}
void test_compare_exchange (int *pi, int *pj, bool weak)
{
#define cmpxchg(x, expect, desire, sucs_ord, fail_ord) \
__atomic_compare_exchange_n (x, expect, desire, weak, sucs_ord, fail_ord)
int relaxed = retval (__ATOMIC_RELAXED);
cmpxchg (&ei, pi++, *pj++, relaxed, relaxed);
int consume = retval (__ATOMIC_CONSUME);
cmpxchg (&ei, pi++, *pj++, relaxed, consume); // { dg-warning "failure memory model 'memory_order_consume' cannot be stronger than success memory model 'memory_order_relaxed'" }
int acquire = retval (__ATOMIC_ACQUIRE);
cmpxchg (&ei, pi++, *pj++, relaxed, acquire); // { dg-warning "failure memory model 'memory_order_acquire' cannot be stronger than success memory model 'memory_order_relaxed'" }
int release = retval (__ATOMIC_RELEASE);
cmpxchg (&ei, pi++, *pj++, relaxed, release); // { dg-warning "invalid failure memory model 'memory_order_release'" }
int acq_rel = retval (__ATOMIC_ACQ_REL);
cmpxchg (&ei, pi++, *pj++, relaxed, acq_rel); // { dg-warning "invalid failure memory model 'memory_order_acq_rel'" }
int seq_cst = retval (__ATOMIC_SEQ_CST);
cmpxchg (&ei, pi++, *pj++, relaxed, seq_cst); // { dg-warning "failure memory model 'memory_order_seq_cst' cannot be stronger than success memory model 'memory_order_relaxed'" }
cmpxchg (&ei, pi++, *pj++, consume, relaxed);
cmpxchg (&ei, pi++, *pj++, consume, consume);
cmpxchg (&ei, pi++, *pj++, consume, acquire); // { dg-warning "failure memory model 'memory_order_acquire' cannot be stronger than success memory model 'memory_order_consume'" }
cmpxchg (&ei, pi++, *pj++, consume, release); // { dg-warning "invalid failure memory model 'memory_order_release'" }
cmpxchg (&ei, pi++, *pj++, consume, acq_rel); // { dg-warning "invalid failure memory model 'memory_order_acq_rel'" }
cmpxchg (&ei, pi++, *pj++, consume, seq_cst); // { dg-warning "failure memory model 'memory_order_seq_cst' cannot be stronger than success memory model 'memory_order_consume'" }
cmpxchg (&ei, pi++, *pj++, acquire, relaxed);
cmpxchg (&ei, pi++, *pj++, acquire, consume);
cmpxchg (&ei, pi++, *pj++, acquire, acquire);
cmpxchg (&ei, pi++, *pj++, acquire, release); // { dg-warning "invalid failure memory model 'memory_order_release'" }
cmpxchg (&ei, pi++, *pj++, acquire, acq_rel); // { dg-warning "invalid failure memory model 'memory_order_acq_rel'" }
cmpxchg (&ei, pi++, *pj++, acquire, seq_cst); // { dg-warning "failure memory model 'memory_order_seq_cst' cannot be stronger than success memory model 'memory_order_acquire'" }
cmpxchg (&ei, pi++, *pj++, release, relaxed);
cmpxchg (&ei, pi++, *pj++, release, consume);
cmpxchg (&ei, pi++, *pj++, release, acquire);
cmpxchg (&ei, pi++, *pj++, release, release); // { dg-warning "invalid failure memory model 'memory_order_release'" }
cmpxchg (&ei, pi++, *pj++, release, acq_rel); // { dg-warning "invalid failure memory model 'memory_order_acq_rel'" }
cmpxchg (&ei, pi++, *pj++, release, seq_cst); // { dg-warning "failure memory model 'memory_order_seq_cst' cannot be stronger than success memory model 'memory_order_release'" }
cmpxchg (&ei, pi++, *pj++, acq_rel, relaxed);
cmpxchg (&ei, pi++, *pj++, acq_rel, consume);
cmpxchg (&ei, pi++, *pj++, acq_rel, acquire);
cmpxchg (&ei, pi++, *pj++, acq_rel, release); // { dg-warning "invalid failure memory model 'memory_order_release'" }
cmpxchg (&ei, pi++, *pj++, acq_rel, acq_rel); // { dg-warning "invalid failure memory model 'memory_order_acq_rel'" }
cmpxchg (&ei, pi++, *pj++, acq_rel, seq_cst); // { dg-warning "failure memory model 'memory_order_seq_cst' cannot be stronger than success memory model 'memory_order_acq_rel'" }
cmpxchg (&ei, pi++, *pj++, seq_cst, relaxed);
cmpxchg (&ei, pi++, *pj++, seq_cst, consume);
cmpxchg (&ei, pi++, *pj++, seq_cst, acquire);
cmpxchg (&ei, pi++, *pj++, seq_cst, release); // { dg-warning "invalid failure memory model 'memory_order_release'" }
cmpxchg (&ei, pi++, *pj++, seq_cst, acq_rel); // { dg-warning "invalid failure memory model 'memory_order_acq_rel'" }
cmpxchg (&ei, pi++, *pj++, seq_cst, seq_cst);
int unknown = *pi++;
cmpxchg (&ei, pi++, *pj++, unknown, seq_cst);
cmpxchg (&ei, pi++, *pj++, relaxed, unknown);
}
/* All memory models are valid. */
void test_add_fetch (unsigned *pi, unsigned x)
{
int relaxed = retval (__ATOMIC_RELAXED);
__atomic_add_fetch (pi++, x, relaxed);
int consume = retval (__ATOMIC_CONSUME);
__atomic_add_fetch (pi++, x, consume);
int acquire = retval (__ATOMIC_ACQUIRE);
__atomic_add_fetch (pi++, x, acquire);
int release = retval (__ATOMIC_RELEASE);
__atomic_add_fetch (pi++, x, release);
int acq_rel = retval (__ATOMIC_ACQ_REL);
__atomic_add_fetch (pi++, x, acq_rel);
int seq_cst = retval (__ATOMIC_SEQ_CST);
__atomic_add_fetch (pi++, x, seq_cst);
int invalid;
if (x & 1)
{
invalid = retval (123);
__atomic_add_fetch (pi++, x, invalid); // { dg-warning "invalid memory model 123 for '\(unsigned int \)?__atomic_add_fetch" }
}
else
{
invalid = retval (456);
__atomic_add_fetch (pi++, x, invalid); // { dg-warning "invalid memory model 456 for '\(unsigned int \)?__atomic_add_fetch" }
}
}
void test_sub_fetch (unsigned *pi, unsigned x)
{
int relaxed = retval (__ATOMIC_RELAXED);
__atomic_sub_fetch (pi++, x, relaxed);
int consume = retval (__ATOMIC_CONSUME);
__atomic_sub_fetch (pi++, x, consume);
int acquire = retval (__ATOMIC_ACQUIRE);
__atomic_sub_fetch (pi++, x, acquire);
int release = retval (__ATOMIC_RELEASE);
__atomic_sub_fetch (pi++, x, release);
int acq_rel = retval (__ATOMIC_ACQ_REL);
__atomic_sub_fetch (pi++, x, acq_rel);
int seq_cst = retval (__ATOMIC_SEQ_CST);
__atomic_sub_fetch (pi++, x, seq_cst);
int invalid;
if (x & 1)
{
invalid = retval (123);
__atomic_sub_fetch (pi++, x, invalid); // { dg-warning "invalid memory model 123 for '\(unsigned int \)?__atomic_sub_fetch" }
}
else
{
invalid = retval (456);
__atomic_sub_fetch (pi++, x, invalid); // { dg-warning "invalid memory model 456 for '\(unsigned int \)?__atomic_sub_fetch" }
}
}

View File

@ -1,10 +1,13 @@
/* PR c++/83059 */
/* PR c++/83059 - ICE on invalid C++ code: in tree_to_uhwi, at tree.c:6633 */
/* { dg-do compile } */
void
foo (int *p, int *q, int *r)
{
__atomic_compare_exchange (p, q, r, 0, 0, -1); /* { dg-warning "invalid memory model argument 6" } */
/* { dg-warning "unknown architecture specifi" "" { target *-*-* } .-1 } */
/* { dg-warning "failure memory model cannot be stronger than success memory model" "" { target *-*-* } .-2 } */
}
/* The test triggers several distinct instance of the warning. Prune
them out; they're not relevant to its main purpose -- to verify
there's no ICE.
{ dg-prune-output "-Winvalid-memory-model" } */

View File

@ -0,0 +1,79 @@
/* PR middle-end/99612 - Missing warning on incorrect memory order without
-Wsystem-headers
Verify warnings for atomic functions with optimization.
{ dg-do compile { target c++11 } }
{ dg-options "-O1" } */
#include <atomic>
static const std::memory_order relaxed = std::memory_order_relaxed;
static const std::memory_order consume = std::memory_order_consume;
static const std::memory_order acquire = std::memory_order_acquire;
static const std::memory_order release = std::memory_order_release;
static const std::memory_order acq_rel = std::memory_order_acq_rel;
static const std::memory_order seq_cst = std::memory_order_seq_cst;
extern std::atomic<int> eai;
void test_load (int *pi)
{
*pi++ = eai.load (relaxed);
*pi++ = eai.load (consume);
*pi++ = eai.load (acquire);
*pi++ = eai.load (release); // warning
*pi++ = eai.load (acq_rel); // warning
*pi++ = eai.load (seq_cst);
}
/* { dg-regexp " *inlined from \[^\n\r\]+.C:23:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:24:.*" "" { target *-*-* } 0 }
{ dg-warning "__atomic_load\[^\n\r\]* \\\[-Winvalid-memory-model" "warning" { target *-*-* } 0 } */
void test_store (int *pi)
{
eai.store (*pi++, relaxed);
eai.store (*pi++, consume); // warning
eai.store (*pi++, acquire); // warning
eai.store (*pi++, release);
eai.store (*pi++, acq_rel); // warning
eai.store (*pi++, seq_cst);
}
/* { dg-regexp " *inlined from \[^\n\r\]+.C:36:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:37:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:39:.*" "" { target *-*-* } 0 }
{ dg-warning "__atomic_store\[^\n\r]* \\\[-Winvalid-memory-model" "warning" { target *-*-* } 0 } */
void test_exchange (const int *pi)
{
eai.exchange (*pi++, relaxed);
eai.exchange (*pi++, consume);
eai.exchange (*pi++, acquire);
eai.exchange (*pi++, release);
eai.exchange (*pi++, acq_rel);
eai.exchange (*pi++, seq_cst);
}
void test_compare_exchange (int *pi, int *pj)
{
#define cmpxchg(x, y, z, o1, o2) \
std::atomic_compare_exchange_weak_explicit (x, y, z, o1, o2)
cmpxchg (&eai, pi++, *pj++, relaxed, relaxed);
cmpxchg (&eai, pi++, *pj++, relaxed, consume); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, acquire); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, release); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, acq_rel); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, seq_cst); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, relaxed);
/* { dg-regexp " *inlined from \[^\n\r\]+.C:66:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:67:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:68:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:69:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:70:.*" "" { target *-*-* } 0 }
{ dg-warning "__atomic_compare_exchange\[^\n\r\]* \\\[-Winvalid-memory-model" "cmpxchg 1" { target *-*-* } 0 } */
}

View File

@ -0,0 +1,84 @@
/* PR middle-end/99612 - Missing warning on incorrect memory order without
-Wsystem-headers
Verify warings for basic atomic functions with no optimization.
{ dg-do compile { target c++11 } }
{ dg-options "-O0 -Wall" } */
#include <atomic>
static const std::memory_order relaxed = std::memory_order_relaxed;
static const std::memory_order consume = std::memory_order_consume;
static const std::memory_order acquire = std::memory_order_acquire;
static const std::memory_order release = std::memory_order_release;
static const std::memory_order acq_rel = std::memory_order_acq_rel;
static const std::memory_order seq_cst = std::memory_order_seq_cst;
extern std::atomic<int> eai;
void test_load (int *pi)
{
*pi++ = eai.load (relaxed);
*pi++ = eai.load (consume);
*pi++ = eai.load (acquire);
*pi++ = eai.load (release); // warning
*pi++ = eai.load (acq_rel); // warning
*pi++ = eai.load (seq_cst);
}
/* { dg-regexp " *inlined from \[^\n\r\]+.C:23:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:24:.*" "" { target *-*-* } 0 }
{ dg-warning "__atomic_load\[^\n\r\]* \\\[-Winvalid-memory-model" "warning" { target *-*-* } 0 } */
void test_store (int *pi)
{
eai.store (*pi++, relaxed);
eai.store (*pi++, consume); // warning
eai.store (*pi++, acquire); // warning
eai.store (*pi++, release);
eai.store (*pi++, acq_rel); // warning
eai.store (*pi++, seq_cst);
}
/* { dg-regexp " *inlined from \[^\n\r\]+.C:36:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:37:.*" "" { target *-*-* } 0 }
{ dg-regexp " *inlined from \[^\n\r\]+.C:39:.*" "" { target *-*-* } 0 }
{ dg-warning "__atomic_store\[^\n\r]* \\\[-Winvalid-memory-model" "warning" { target *-*-* } 0 } */
void test_exchange (const int *pi)
{
eai.exchange (*pi++, relaxed);
eai.exchange (*pi++, consume);
eai.exchange (*pi++, acquire);
eai.exchange (*pi++, release);
eai.exchange (*pi++, acq_rel);
eai.exchange (*pi++, seq_cst);
}
/* The following tests fail because std::atomic_compare_exchange_weak_explicit
is not declared with attribute always_inline (like the member functions
above are). */
void test_compare_exchange (int *pi, int *pj)
{
#define cmpxchg(x, y, z, o1, o2) \
std::atomic_compare_exchange_weak_explicit (x, y, z, o1, o2)
cmpxchg (&eai, pi++, *pj++, relaxed, relaxed);
cmpxchg (&eai, pi++, *pj++, relaxed, consume); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, acquire); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, release); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, acq_rel); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, seq_cst); // warning
cmpxchg (&eai, pi++, *pj++, relaxed, relaxed);
/* HACK: xfail doesn't seem to work for the dg-regexp directives below,
so disable them by prepending an X to their names...
{ Xdg-regexp " *inlined from \[^\n\r\]+.C:66:.*" "" { xfail *-*-* } 0 }
{ Xdg-regexp " *inlined from \[^\n\r\]+.C:67:.*" "" { xfail *-*-* } 0 }
{ Xdg-regexp " *inlined from \[^\n\r\]+.C:68:.*" "" { xfail *-*-* } 0 }
{ Xdg-regexp " *inlined from \[^\n\r\]+.C:69:.*" "" { xfail *-*-* } 0 }
{ Xdg-regexp " *inlined from \[^\n\r\]+.C:70:.*" "" { xfail *-*-* } 0 }
{ dg-warning "__atomic_compare_exchange\[^\n\r\]* \\\[-Winvalid-memory-model" "cmpxchg 1" { xfail *-*-* } 0 } */
}

View File

@ -38,13 +38,13 @@ exchange (atomic_int *i)
{
int r;
atomic_compare_exchange_strong_explicit (i, &r, 0, memory_order_seq_cst, memory_order_release); /* { dg-warning "invalid failure memory" } */
atomic_compare_exchange_strong_explicit (i, &r, 0, memory_order_seq_cst, memory_order_acq_rel); /* { dg-warning "invalid failure memory" } */
atomic_compare_exchange_strong_explicit (i, &r, 0, memory_order_relaxed, memory_order_consume); /* { dg-warning "failure memory model cannot be stronger" } */
atomic_compare_exchange_strong_explicit (i, &r, 0, memory_order_seq_cst, memory_order_release); /* { dg-warning "invalid failure memory model 'memory_order_release'" } */
atomic_compare_exchange_strong_explicit (i, &r, 0, memory_order_seq_cst, memory_order_acq_rel); /* { dg-warning "invalid failure memory model 'memory_order_acq_rel'" } */
atomic_compare_exchange_strong_explicit (i, &r, 0, memory_order_relaxed, memory_order_consume); /* { dg-warning "failure memory model 'memory_order_consume' cannot be stronger than success memory model 'memory_order_relaxed'" } */
atomic_compare_exchange_weak_explicit (i, &r, 0, memory_order_seq_cst, memory_order_release); /* { dg-warning "invalid failure memory" } */
atomic_compare_exchange_weak_explicit (i, &r, 0, memory_order_seq_cst, memory_order_acq_rel); /* { dg-warning "invalid failure memory" } */
atomic_compare_exchange_weak_explicit (i, &r, 0, memory_order_relaxed, memory_order_consume); /* { dg-warning "failure memory model cannot be stronger" } */
atomic_compare_exchange_weak_explicit (i, &r, 0, memory_order_seq_cst, memory_order_release); /* { dg-warning "invalid failure memory model 'memory_order_release'" } */
atomic_compare_exchange_weak_explicit (i, &r, 0, memory_order_seq_cst, memory_order_acq_rel); /* { dg-warning "invalid failure memory model 'memory_order_acq_rel'" } */
atomic_compare_exchange_weak_explicit (i, &r, 0, memory_order_relaxed, memory_order_consume); /* { dg-warning "failure memory model 'memory_order_consume' cannot be stronger than success memory model 'memory_order_relaxed'" } */
}
/* atomic_flag_clear():

View File

@ -13,7 +13,7 @@ bool x;
int
main ()
{
__atomic_compare_exchange_n (&i, &e, 1, 0, __ATOMIC_RELAXED, __ATOMIC_SEQ_CST); /* { dg-warning "failure memory model cannot be stronger" } */
__atomic_compare_exchange_n (&i, &e, 1, 0, __ATOMIC_RELAXED, __ATOMIC_SEQ_CST); /* { dg-warning "failure memory model 'memory_order_seq_cst' cannot be stronger" } */
__atomic_compare_exchange_n (&i, &e, 1, 0, __ATOMIC_SEQ_CST, __ATOMIC_RELEASE); /* { dg-warning "invalid failure memory" } */
__atomic_compare_exchange_n (&i, &e, 1, 1, __ATOMIC_SEQ_CST, __ATOMIC_ACQ_REL); /* { dg-warning "invalid failure memory" } */