re PR target/65697 (__atomic memory barriers not strong enough for __sync builtins)

2015-05-12  Andrew MacLeod  <amacleod@redhat.com>

	PR target/65697
	* coretypes.h (MEMMODEL_SYNC, MEMMODEL_BASE_MASK): New macros.
	(enum memmodel): Add SYNC_{ACQUIRE,RELEASE,SEQ_CST}.
	* tree.h (memmodel_from_int, memmodel_base, is_mm_relaxed,
	is_mm_consume,is_mm_acquire, is_mm_release, is_mm_acq_rel,
	is_mm_seq_cst, is_mm_sync): New accessor functions.
	* builtins.c (expand_builtin_sync_operation,
	expand_builtin_compare_and_swap): Use MEMMODEL_SYNC_SEQ_CST.
	(expand_builtin_sync_lock_release): Use MEMMODEL_SYNC_RELEASE.
	(get_memmodel,  expand_builtin_atomic_compare_exchange,
	expand_builtin_atomic_load, expand_builtin_atomic_store,
	expand_builtin_atomic_clear): Use new accessor routines.
	(expand_builtin_sync_synchronize): Use MEMMODEL_SYNC_SEQ_CST.
	* optabs.c (expand_compare_and_swap_loop): Use MEMMODEL_SYNC_SEQ_CST.
	(maybe_emit_sync_lock_test_and_set): Use new accessors and
	MEMMODEL_SYNC_ACQUIRE.
	(expand_sync_lock_test_and_set): Use MEMMODEL_SYNC_ACQUIRE.
	(expand_mem_thread_fence, expand_mem_signal_fence, expand_atomic_load,
	expand_atomic_store): Use new accessors.
	* emit-rtl.c (need_atomic_barrier_p): Add additional enum cases.
	* tsan.c (instrument_builtin_call): Update check for memory model beyond
	final enum to use MEMMODEL_LAST.
	* c-family/c-common.c: Use new accessor for memmodel_base.
	* config/aarch64/aarch64.c (aarch64_expand_compare_and_swap): Use new
	accessors.
	* config/aarch64/atomics.md (atomic_load<mode>,atomic_store<mode>,
	arch64_load_exclusive<mode>, aarch64_store_exclusive<mode>,
	mem_thread_fence, *dmb): Likewise.
	* config/alpha/alpha.c (alpha_split_compare_and_swap,
	alpha_split_compare_and_swap_12): Likewise.
	* config/arm/arm.c (arm_expand_compare_and_swap,
	arm_split_compare_and_swap, arm_split_atomic_op): Likewise.
	* config/arm/sync.md (atomic_load<mode>, atomic_store<mode>,
	atomic_loaddi): Likewise.
	* config/i386/i386.c (ix86_destroy_cost_data, ix86_memmodel_check):
	Likewise.
	* config/i386/sync.md (mem_thread_fence, atomic_store<mode>): Likewise.
	* config/ia64/ia64.c (ia64_expand_atomic_op): Add new memmodel cases and
	use new accessors.
	* config/ia64/sync.md (mem_thread_fence, atomic_load<mode>,
	atomic_store<mode>, atomic_compare_and_swap<mode>,
	atomic_exchange<mode>): Use new accessors.
	* config/mips/mips.c (mips_process_sync_loop): Likewise.
	* config/pa/pa.md (atomic_loaddi, atomic_storedi): Likewise.
	* config/rs6000/rs6000.c (rs6000_pre_atomic_barrier,
	rs6000_post_atomic_barrier): Add new cases.
	(rs6000_expand_atomic_compare_and_swap): Use new accessors.
	* config/rs6000/sync.md (mem_thread_fence): Add new cases.
	(atomic_load<mode>): Add new cases and use new accessors.
	(store_quadpti): Add new cases.
	* config/s390/s390.md (mem_thread_fence, atomic_store<mode>): Use new
	accessors.
	* config/sparc/sparc.c (sparc_emit_membar_for_model): Use new accessors.
	* doc/extend.texi: Update docs to indicate 16 bits are used for memory
	model, not 8.

From-SVN: r223096
This commit is contained in:
Andrew MacLeod 2015-05-12 20:01:47 +00:00 committed by Andrew Macleod
parent e7a677ca1a
commit 46b35980b8
24 changed files with 274 additions and 143 deletions

View File

@ -1,3 +1,61 @@
2015-05-12 Andrew MacLeod <amacleod@redhat.com>
PR target/65697
* coretypes.h (MEMMODEL_SYNC, MEMMODEL_BASE_MASK): New macros.
(enum memmodel): Add SYNC_{ACQUIRE,RELEASE,SEQ_CST}.
* tree.h (memmodel_from_int, memmodel_base, is_mm_relaxed,
is_mm_consume,is_mm_acquire, is_mm_release, is_mm_acq_rel,
is_mm_seq_cst, is_mm_sync): New accessor functions.
* builtins.c (expand_builtin_sync_operation,
expand_builtin_compare_and_swap): Use MEMMODEL_SYNC_SEQ_CST.
(expand_builtin_sync_lock_release): Use MEMMODEL_SYNC_RELEASE.
(get_memmodel, expand_builtin_atomic_compare_exchange,
expand_builtin_atomic_load, expand_builtin_atomic_store,
expand_builtin_atomic_clear): Use new accessor routines.
(expand_builtin_sync_synchronize): Use MEMMODEL_SYNC_SEQ_CST.
* optabs.c (expand_compare_and_swap_loop): Use MEMMODEL_SYNC_SEQ_CST.
(maybe_emit_sync_lock_test_and_set): Use new accessors and
MEMMODEL_SYNC_ACQUIRE.
(expand_sync_lock_test_and_set): Use MEMMODEL_SYNC_ACQUIRE.
(expand_mem_thread_fence, expand_mem_signal_fence, expand_atomic_load,
expand_atomic_store): Use new accessors.
* emit-rtl.c (need_atomic_barrier_p): Add additional enum cases.
* tsan.c (instrument_builtin_call): Update check for memory model beyond
final enum to use MEMMODEL_LAST.
* c-family/c-common.c: Use new accessor for memmodel_base.
* config/aarch64/aarch64.c (aarch64_expand_compare_and_swap): Use new
accessors.
* config/aarch64/atomics.md (atomic_load<mode>,atomic_store<mode>,
arch64_load_exclusive<mode>, aarch64_store_exclusive<mode>,
mem_thread_fence, *dmb): Likewise.
* config/alpha/alpha.c (alpha_split_compare_and_swap,
alpha_split_compare_and_swap_12): Likewise.
* config/arm/arm.c (arm_expand_compare_and_swap,
arm_split_compare_and_swap, arm_split_atomic_op): Likewise.
* config/arm/sync.md (atomic_load<mode>, atomic_store<mode>,
atomic_loaddi): Likewise.
* config/i386/i386.c (ix86_destroy_cost_data, ix86_memmodel_check):
Likewise.
* config/i386/sync.md (mem_thread_fence, atomic_store<mode>): Likewise.
* config/ia64/ia64.c (ia64_expand_atomic_op): Add new memmodel cases and
use new accessors.
* config/ia64/sync.md (mem_thread_fence, atomic_load<mode>,
atomic_store<mode>, atomic_compare_and_swap<mode>,
atomic_exchange<mode>): Use new accessors.
* config/mips/mips.c (mips_process_sync_loop): Likewise.
* config/pa/pa.md (atomic_loaddi, atomic_storedi): Likewise.
* config/rs6000/rs6000.c (rs6000_pre_atomic_barrier,
rs6000_post_atomic_barrier): Add new cases.
(rs6000_expand_atomic_compare_and_swap): Use new accessors.
* config/rs6000/sync.md (mem_thread_fence): Add new cases.
(atomic_load<mode>): Add new cases and use new accessors.
(store_quadpti): Add new cases.
* config/s390/s390.md (mem_thread_fence, atomic_store<mode>): Use new
accessors.
* config/sparc/sparc.c (sparc_emit_membar_for_model): Use new accessors.
* doc/extend.texi: Update docs to indicate 16 bits are used for memory
model, not 8.
2015-05-12 Jan Hubicka <hubicka@ucw.cz>
* ipa-devirt.c (type_with_linkage_p): New function.

View File

@ -5271,7 +5271,7 @@ expand_builtin_sync_operation (machine_mode mode, tree exp,
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
return expand_atomic_fetch_op (target, mem, val, code, MEMMODEL_SEQ_CST,
return expand_atomic_fetch_op (target, mem, val, code, MEMMODEL_SYNC_SEQ_CST,
after);
}
@ -5301,8 +5301,8 @@ expand_builtin_compare_and_swap (machine_mode mode, tree exp,
poval = &target;
}
if (!expand_atomic_compare_and_swap (pbool, poval, mem, old_val, new_val,
false, MEMMODEL_SEQ_CST,
MEMMODEL_SEQ_CST))
false, MEMMODEL_SYNC_SEQ_CST,
MEMMODEL_SYNC_SEQ_CST))
return NULL_RTX;
return target;
@ -5337,7 +5337,7 @@ expand_builtin_sync_lock_release (machine_mode mode, tree exp)
/* Expand the operands. */
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
expand_atomic_store (mem, const0_rtx, MEMMODEL_RELEASE, true);
expand_atomic_store (mem, const0_rtx, MEMMODEL_SYNC_RELEASE, true);
}
/* Given an integer representing an ``enum memmodel'', verify its
@ -5366,7 +5366,8 @@ get_memmodel (tree exp)
return MEMMODEL_SEQ_CST;
}
if ((INTVAL (op) & MEMMODEL_MASK) >= MEMMODEL_LAST)
/* Should never see a user explicit SYNC memodel model, so >= LAST works. */
if (memmodel_base (val) >= MEMMODEL_LAST)
{
warning (OPT_Winvalid_memory_model,
"invalid memory model argument to builtin");
@ -5433,8 +5434,7 @@ expand_builtin_atomic_compare_exchange (machine_mode mode, tree exp,
success = MEMMODEL_SEQ_CST;
}
if ((failure & MEMMODEL_MASK) == MEMMODEL_RELEASE
|| (failure & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
if (is_mm_release (failure) || is_mm_acq_rel (failure))
{
warning (OPT_Winvalid_memory_model,
"invalid failure memory model for "
@ -5496,8 +5496,7 @@ expand_builtin_atomic_load (machine_mode mode, tree exp, rtx target)
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
if ((model & MEMMODEL_MASK) == MEMMODEL_RELEASE
|| (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
if (is_mm_release (model) || is_mm_acq_rel (model))
{
warning (OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_load%>");
@ -5526,9 +5525,8 @@ expand_builtin_atomic_store (machine_mode mode, tree exp)
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 2));
if ((model & MEMMODEL_MASK) != MEMMODEL_RELAXED
&& (model & MEMMODEL_MASK) != MEMMODEL_SEQ_CST
&& (model & MEMMODEL_MASK) != MEMMODEL_RELEASE)
if (!(is_mm_relaxed (model) || is_mm_seq_cst (model)
|| is_mm_release (model)))
{
warning (OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_store%>");
@ -5635,9 +5633,7 @@ expand_builtin_atomic_clear (tree exp)
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
if ((model & MEMMODEL_MASK) == MEMMODEL_CONSUME
|| (model & MEMMODEL_MASK) == MEMMODEL_ACQUIRE
|| (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
if (is_mm_consume (model) || is_mm_acquire (model) || is_mm_acq_rel (model))
{
warning (OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_store%>");
@ -5833,7 +5829,7 @@ expand_builtin_atomic_signal_fence (tree exp)
static void
expand_builtin_sync_synchronize (void)
{
expand_mem_thread_fence (MEMMODEL_SEQ_CST);
expand_mem_thread_fence (MEMMODEL_SYNC_SEQ_CST);
}
static rtx

View File

@ -10792,7 +10792,7 @@ get_atomic_generic_size (location_t loc, tree function,
if (TREE_CODE (p) == INTEGER_CST)
{
int i = tree_to_uhwi (p);
if (i < 0 || (i & MEMMODEL_MASK) >= MEMMODEL_LAST)
if (i < 0 || (memmodel_base (i) >= MEMMODEL_LAST))
{
warning_at (loc, OPT_Winvalid_memory_model,
"invalid memory model argument %d of %qE", x + 1,

View File

@ -9203,8 +9203,8 @@ aarch64_expand_compare_and_swap (rtx operands[])
unlikely event of fail being ACQUIRE and succ being RELEASE we need to
promote succ to ACQ_REL so that we don't lose the acquire semantics. */
if (INTVAL (mod_f) == MEMMODEL_ACQUIRE
&& INTVAL (mod_s) == MEMMODEL_RELEASE)
if (is_mm_acquire (memmodel_from_int (INTVAL (mod_f)))
&& is_mm_release (memmodel_from_int (INTVAL (mod_s))))
mod_s = GEN_INT (MEMMODEL_ACQ_REL);
switch (mode)

View File

@ -260,10 +260,8 @@
UNSPECV_LDA))]
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
if (model == MEMMODEL_RELAXED
|| model == MEMMODEL_CONSUME
|| model == MEMMODEL_RELEASE)
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return "ldr<atomic_sfx>\t%<w>0, %1";
else
return "ldar<atomic_sfx>\t%<w>0, %1";
@ -278,10 +276,8 @@
UNSPECV_STL))]
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
if (model == MEMMODEL_RELAXED
|| model == MEMMODEL_CONSUME
|| model == MEMMODEL_ACQUIRE)
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
return "str<atomic_sfx>\t%<w>1, %0";
else
return "stlr<atomic_sfx>\t%<w>1, %0";
@ -297,10 +293,8 @@
UNSPECV_LX)))]
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
if (model == MEMMODEL_RELAXED
|| model == MEMMODEL_CONSUME
|| model == MEMMODEL_RELEASE)
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return "ldxr<atomic_sfx>\t%w0, %1";
else
return "ldaxr<atomic_sfx>\t%w0, %1";
@ -315,10 +309,8 @@
UNSPECV_LX))]
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
if (model == MEMMODEL_RELAXED
|| model == MEMMODEL_CONSUME
|| model == MEMMODEL_RELEASE)
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return "ldxr\t%<w>0, %1";
else
return "ldaxr\t%<w>0, %1";
@ -335,10 +327,8 @@
UNSPECV_SX))]
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[3]);
if (model == MEMMODEL_RELAXED
|| model == MEMMODEL_CONSUME
|| model == MEMMODEL_ACQUIRE)
enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
else
return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
@ -349,8 +339,8 @@
[(match_operand:SI 0 "const_int_operand" "")]
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[0]);
if (model != MEMMODEL_RELAXED && model != MEMMODEL_CONSUME)
enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
if (!(is_mm_relaxed (model) || is_mm_consume (model)))
emit_insn (gen_dmb (operands[0]));
DONE;
}
@ -373,8 +363,8 @@
UNSPEC_MB))]
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[1]);
if (model == MEMMODEL_ACQUIRE)
enum memmodel model = memmodel_from_int (INTVAL (operands[1]));
if (is_mm_acquire (model))
return "dmb\\tishld";
else
return "dmb\\tish";

View File

@ -4493,8 +4493,8 @@ alpha_split_compare_and_swap (rtx operands[])
oldval = operands[3];
newval = operands[4];
is_weak = (operands[5] != const0_rtx);
mod_s = (enum memmodel) INTVAL (operands[6]);
mod_f = (enum memmodel) INTVAL (operands[7]);
mod_s = memmodel_from_int (INTVAL (operands[6]));
mod_f = memmodel_from_int (INTVAL (operands[7]));
mode = GET_MODE (mem);
alpha_pre_atomic_barrier (mod_s);
@ -4532,12 +4532,12 @@ alpha_split_compare_and_swap (rtx operands[])
emit_unlikely_jump (x, label1);
}
if (mod_f != MEMMODEL_RELAXED)
if (!is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
alpha_post_atomic_barrier (mod_s);
if (mod_f == MEMMODEL_RELAXED)
if (is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
}
@ -4598,8 +4598,8 @@ alpha_split_compare_and_swap_12 (rtx operands[])
newval = operands[4];
align = operands[5];
is_weak = (operands[6] != const0_rtx);
mod_s = (enum memmodel) INTVAL (operands[7]);
mod_f = (enum memmodel) INTVAL (operands[8]);
mod_s = memmodel_from_int (INTVAL (operands[7]));
mod_f = memmodel_from_int (INTVAL (operands[8]));
scratch = operands[9];
mode = GET_MODE (orig_mem);
addr = XEXP (orig_mem, 0);
@ -4651,12 +4651,12 @@ alpha_split_compare_and_swap_12 (rtx operands[])
emit_unlikely_jump (x, label1);
}
if (mod_f != MEMMODEL_RELAXED)
if (!is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
alpha_post_atomic_barrier (mod_s);
if (mod_f == MEMMODEL_RELAXED)
if (is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
}

View File

@ -27461,8 +27461,8 @@ arm_expand_compare_and_swap (rtx operands[])
promote succ to ACQ_REL so that we don't lose the acquire semantics. */
if (TARGET_HAVE_LDACQ
&& INTVAL (mod_f) == MEMMODEL_ACQUIRE
&& INTVAL (mod_s) == MEMMODEL_RELEASE)
&& is_mm_acquire (memmodel_from_int (INTVAL (mod_f)))
&& is_mm_release (memmodel_from_int (INTVAL (mod_s))))
mod_s = GEN_INT (MEMMODEL_ACQ_REL);
switch (mode)
@ -27535,20 +27535,18 @@ arm_split_compare_and_swap (rtx operands[])
oldval = operands[2];
newval = operands[3];
is_weak = (operands[4] != const0_rtx);
mod_s = (enum memmodel) INTVAL (operands[5]);
mod_f = (enum memmodel) INTVAL (operands[6]);
mod_s = memmodel_from_int (INTVAL (operands[5]));
mod_f = memmodel_from_int (INTVAL (operands[6]));
scratch = operands[7];
mode = GET_MODE (mem);
bool use_acquire = TARGET_HAVE_LDACQ
&& !(mod_s == MEMMODEL_RELAXED
|| mod_s == MEMMODEL_CONSUME
|| mod_s == MEMMODEL_RELEASE);
&& !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
|| is_mm_release (mod_s));
bool use_release = TARGET_HAVE_LDACQ
&& !(mod_s == MEMMODEL_RELAXED
|| mod_s == MEMMODEL_CONSUME
|| mod_s == MEMMODEL_ACQUIRE);
&& !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
|| is_mm_acquire (mod_s));
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
@ -27586,14 +27584,14 @@ arm_split_compare_and_swap (rtx operands[])
emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
}
if (mod_f != MEMMODEL_RELAXED)
if (!is_mm_relaxed (mod_f))
emit_label (label2);
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
arm_post_atomic_barrier (mod_s);
if (mod_f == MEMMODEL_RELAXED)
if (is_mm_relaxed (mod_f))
emit_label (label2);
}
@ -27601,21 +27599,19 @@ void
arm_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
rtx value, rtx model_rtx, rtx cond)
{
enum memmodel model = (enum memmodel) INTVAL (model_rtx);
enum memmodel model = memmodel_from_int (INTVAL (model_rtx));
machine_mode mode = GET_MODE (mem);
machine_mode wmode = (mode == DImode ? DImode : SImode);
rtx_code_label *label;
rtx x;
bool use_acquire = TARGET_HAVE_LDACQ
&& !(model == MEMMODEL_RELAXED
|| model == MEMMODEL_CONSUME
|| model == MEMMODEL_RELEASE);
&& !(is_mm_relaxed (model) || is_mm_consume (model)
|| is_mm_release (model));
bool use_release = TARGET_HAVE_LDACQ
&& !(model == MEMMODEL_RELAXED
|| model == MEMMODEL_CONSUME
|| model == MEMMODEL_ACQUIRE);
&& !(is_mm_relaxed (model) || is_mm_consume (model)
|| is_mm_acquire (model));
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))

View File

@ -73,10 +73,8 @@
VUNSPEC_LDA))]
"TARGET_HAVE_LDACQ"
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
if (model == MEMMODEL_RELAXED
|| model == MEMMODEL_CONSUME
|| model == MEMMODEL_RELEASE)
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return \"ldr<sync_sfx>\\t%0, %1\";
else
return \"lda<sync_sfx>\\t%0, %1\";
@ -91,10 +89,8 @@
VUNSPEC_STL))]
"TARGET_HAVE_LDACQ"
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
if (model == MEMMODEL_RELAXED
|| model == MEMMODEL_CONSUME
|| model == MEMMODEL_ACQUIRE)
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
return \"str<sync_sfx>\t%1, %0\";
else
return \"stl<sync_sfx>\t%1, %0\";
@ -110,10 +106,10 @@
(match_operand:SI 2 "const_int_operand")] ;; model
"TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN"
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
expand_mem_thread_fence (model);
emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1]));
if (model == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})

View File

@ -51222,7 +51222,7 @@ ix86_destroy_cost_data (void *data)
static unsigned HOST_WIDE_INT
ix86_memmodel_check (unsigned HOST_WIDE_INT val)
{
unsigned HOST_WIDE_INT model = val & MEMMODEL_MASK;
enum memmodel model = memmodel_from_int (val);
bool strong;
if (val & ~(unsigned HOST_WIDE_INT)(IX86_HLE_ACQUIRE|IX86_HLE_RELEASE
@ -51233,14 +51233,14 @@ ix86_memmodel_check (unsigned HOST_WIDE_INT val)
"Unknown architecture specific memory model");
return MEMMODEL_SEQ_CST;
}
strong = (model == MEMMODEL_ACQ_REL || model == MEMMODEL_SEQ_CST);
if (val & IX86_HLE_ACQUIRE && !(model == MEMMODEL_ACQUIRE || strong))
strong = (is_mm_acq_rel (model) || is_mm_seq_cst (model));
if (val & IX86_HLE_ACQUIRE && !(is_mm_acquire (model) || strong))
{
warning (OPT_Winvalid_memory_model,
"HLE_ACQUIRE not used with ACQUIRE or stronger memory model");
return MEMMODEL_SEQ_CST | IX86_HLE_ACQUIRE;
}
if (val & IX86_HLE_RELEASE && !(model == MEMMODEL_RELEASE || strong))
if (val & IX86_HLE_RELEASE && !(is_mm_release (model) || strong))
{
warning (OPT_Winvalid_memory_model,
"HLE_RELEASE not used with RELEASE or stronger memory model");

View File

@ -105,11 +105,11 @@
[(match_operand:SI 0 "const_int_operand")] ;; model
""
{
enum memmodel model = (enum memmodel) (INTVAL (operands[0]) & MEMMODEL_MASK);
enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
/* Unless this is a SEQ_CST fence, the i386 memory model is strong
enough not to require barriers of any kind. */
if (model == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
{
rtx (*mfence_insn)(rtx);
rtx mem;
@ -217,7 +217,7 @@
UNSPEC_STA))]
""
{
enum memmodel model = (enum memmodel) (INTVAL (operands[2]) & MEMMODEL_MASK);
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (<MODE>mode == DImode && !TARGET_64BIT)
{
@ -233,7 +233,7 @@
operands[1] = force_reg (<MODE>mode, operands[1]);
/* For seq-cst stores, when we lack MFENCE, use XCHG. */
if (model == MEMMODEL_SEQ_CST && !(TARGET_64BIT || TARGET_SSE2))
if (is_mm_seq_cst (model) && !(TARGET_64BIT || TARGET_SSE2))
{
emit_insn (gen_atomic_exchange<mode> (gen_reg_rtx (<MODE>mode),
operands[0], operands[1],
@ -246,7 +246,7 @@
operands[2]));
}
/* ... followed by an MFENCE, if required. */
if (model == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
emit_insn (gen_mem_thread_fence (operands[2]));
DONE;
})

View File

@ -2386,10 +2386,12 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
{
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_memory_barrier ());
/* FALLTHRU */
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_CONSUME:
if (mode == SImode)
icode = CODE_FOR_fetchadd_acq_si;
@ -2397,6 +2399,7 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
icode = CODE_FOR_fetchadd_acq_di;
break;
case MEMMODEL_RELEASE:
case MEMMODEL_SYNC_RELEASE:
if (mode == SImode)
icode = CODE_FOR_fetchadd_rel_si;
else
@ -2423,8 +2426,7 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
front half of the full barrier. The end half is the cmpxchg.rel.
For relaxed and release memory models, we don't need this. But we
also don't bother trying to prevent it either. */
gcc_assert (model == MEMMODEL_RELAXED
|| model == MEMMODEL_RELEASE
gcc_assert (is_mm_relaxed (model) || is_mm_release (model)
|| MEM_VOLATILE_P (mem));
old_reg = gen_reg_rtx (DImode);
@ -2468,6 +2470,7 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
{
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_CONSUME:
switch (mode)
{
@ -2481,8 +2484,10 @@ ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
break;
case MEMMODEL_RELEASE:
case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
case MEMMODEL_SYNC_SEQ_CST:
switch (mode)
{
case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;

View File

@ -33,7 +33,7 @@
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
if (INTVAL (operands[0]) == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (memmodel_from_int (INTVAL (operands[0]))))
emit_insn (gen_memory_barrier ());
DONE;
})
@ -60,11 +60,11 @@
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
/* Unless the memory model is relaxed, we want to emit ld.acq, which
will happen automatically for volatile memories. */
gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[1]));
gcc_assert (is_mm_relaxed (model) || MEM_VOLATILE_P (operands[1]));
emit_move_insn (operands[0], operands[1]);
DONE;
})
@ -75,17 +75,17 @@
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
/* Unless the memory model is relaxed, we want to emit st.rel, which
will happen automatically for volatile memories. */
gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[0]));
gcc_assert (is_mm_relaxed (model) || MEM_VOLATILE_P (operands[0]));
emit_move_insn (operands[0], operands[1]);
/* Sequentially consistent stores need a subsequent MF. See
http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001952.html
for a discussion of why a MF is needed here, but not for atomic_load. */
if (model == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
emit_insn (gen_memory_barrier ());
DONE;
})
@ -101,7 +101,8 @@
(match_operand:SI 7 "const_int_operand" "")] ;; fail model
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[6]);
/* No need to distinquish __sync from __atomic, so get base value. */
enum memmodel model = memmodel_base (INTVAL (operands[6]));
rtx ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
rtx dval, eval;
@ -200,7 +201,8 @@
(match_operand:SI 3 "const_int_operand" "")] ;; succ model
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[3]);
/* No need to distinquish __sync from __atomic, so get base value. */
enum memmodel model = memmodel_base (INTVAL (operands[3]));
switch (model)
{

View File

@ -13106,7 +13106,7 @@ mips_process_sync_loop (rtx_insn *insn, rtx *operands)
model = MEMMODEL_ACQUIRE;
break;
default:
model = (enum memmodel) INTVAL (operands[memmodel_attr]);
model = memmodel_from_int (INTVAL (operands[memmodel_attr]));
}
mips_multi_start ();

View File

@ -707,12 +707,12 @@
(match_operand:SI 2 "const_int_operand")] ;; model
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
operands[1] = force_reg (SImode, XEXP (operands[1], 0));
operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1], operands[2]));
if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})
@ -734,12 +734,12 @@
(match_operand:SI 2 "const_int_operand")] ;; model
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
operands[0] = force_reg (SImode, XEXP (operands[0], 0));
operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
emit_insn (gen_atomic_storedi_1 (operands[0], operands[1], operands[2]));
if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})

View File

@ -20516,12 +20516,15 @@ rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
case MEMMODEL_RELAXED:
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
case MEMMODEL_SYNC_ACQUIRE:
break;
case MEMMODEL_RELEASE:
case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
@ -20538,10 +20541,13 @@ rs6000_post_atomic_barrier (enum memmodel model)
case MEMMODEL_RELAXED:
case MEMMODEL_CONSUME:
case MEMMODEL_RELEASE:
case MEMMODEL_SYNC_RELEASE:
break;
case MEMMODEL_ACQUIRE:
case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_isync ());
break;
default:
@ -20642,8 +20648,8 @@ rs6000_expand_atomic_compare_and_swap (rtx operands[])
oldval = operands[3];
newval = operands[4];
is_weak = (INTVAL (operands[5]) != 0);
mod_s = (enum memmodel) INTVAL (operands[6]);
mod_f = (enum memmodel) INTVAL (operands[7]);
mod_s = memmodel_from_int (INTVAL (operands[6]));
mod_f = memmodel_from_int (INTVAL (operands[7]));
orig_mode = mode = GET_MODE (mem);
mask = shift = NULL_RTX;
@ -20731,12 +20737,12 @@ rs6000_expand_atomic_compare_and_swap (rtx operands[])
emit_unlikely_jump (x, label1);
}
if (mod_f != MEMMODEL_RELAXED)
if (!is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
rs6000_post_atomic_barrier (mod_s);
if (mod_f == MEMMODEL_RELAXED)
if (is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
if (shift)

View File

@ -41,18 +41,21 @@
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[0]);
enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
switch (model)
{
case MEMMODEL_RELAXED:
break;
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_RELEASE:
case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
@ -144,9 +147,9 @@
if (<MODE>mode == TImode && !TARGET_SYNC_TI)
FAIL;
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (model == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
emit_insn (gen_hwsync ());
if (<MODE>mode != TImode)
@ -182,7 +185,9 @@
break;
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_SEQ_CST:
case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_loadsync_<mode> (operands[0]));
break;
default:
@ -209,15 +214,17 @@
if (<MODE>mode == TImode && !TARGET_SYNC_TI)
FAIL;
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
switch (model)
{
case MEMMODEL_RELAXED:
break;
case MEMMODEL_RELEASE:
case MEMMODEL_SYNC_RELEASE:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:

View File

@ -9225,7 +9225,7 @@
{
/* Unless this is a SEQ_CST fence, the s390 memory model is strong
enough not to require barriers of any kind. */
if (INTVAL (operands[0]) == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (memmodel_from_int (INTVAL (operands[0]))))
{
rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
MEM_VOLATILE_P (mem) = 1;
@ -9306,7 +9306,7 @@
(match_operand:SI 2 "const_int_operand")] ;; model
""
{
enum memmodel model = (enum memmodel) INTVAL (operands[2]);
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (MEM_ALIGN (operands[0]) < GET_MODE_BITSIZE (GET_MODE (operands[0])))
FAIL;
@ -9317,7 +9317,7 @@
emit_insn (gen_atomic_storedi_1 (operands[0], operands[1]));
else
emit_move_insn (operands[0], operands[1]);
if (model == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
emit_insn (gen_mem_thread_fence (operands[2]));
DONE;
})

View File

@ -11631,9 +11631,8 @@ sparc_emit_membar_for_model (enum memmodel model,
if (before_after & 1)
{
if (model == MEMMODEL_RELEASE
|| model == MEMMODEL_ACQ_REL
|| model == MEMMODEL_SEQ_CST)
if (is_mm_release (model) || is_mm_acq_rel (model)
|| is_mm_seq_cst (model))
{
if (load_store & 1)
mm |= LoadLoad | StoreLoad;
@ -11643,9 +11642,8 @@ sparc_emit_membar_for_model (enum memmodel model,
}
if (before_after & 2)
{
if (model == MEMMODEL_ACQUIRE
|| model == MEMMODEL_ACQ_REL
|| model == MEMMODEL_SEQ_CST)
if (is_mm_acquire (model) || is_mm_acq_rel (model)
|| is_mm_seq_cst (model))
{
if (load_store & 1)
mm |= LoadLoad | LoadStore;

View File

@ -263,6 +263,18 @@ enum function_class {
function_c11_misc
};
/* Suppose that higher bits are target dependent. */
#define MEMMODEL_MASK ((1<<16)-1)
/* Legacy sync operations set this upper flag in the memory model. This allows
targets that need to do something stronger for sync operations to
differentiate with their target patterns and issue a more appropriate insn
sequence. See bugzilla 65697 for background. */
#define MEMMODEL_SYNC (1<<15)
/* Memory model without SYNC bit for targets/operations that do not care. */
#define MEMMODEL_BASE_MASK (MEMMODEL_SYNC-1)
/* Memory model types for the __atomic* builtins.
This must match the order in libstdc++-v3/include/bits/atomic_base.h. */
enum memmodel
@ -273,12 +285,12 @@ enum memmodel
MEMMODEL_RELEASE = 3,
MEMMODEL_ACQ_REL = 4,
MEMMODEL_SEQ_CST = 5,
MEMMODEL_LAST = 6
MEMMODEL_LAST = 6,
MEMMODEL_SYNC_ACQUIRE = MEMMODEL_ACQUIRE | MEMMODEL_SYNC,
MEMMODEL_SYNC_RELEASE = MEMMODEL_RELEASE | MEMMODEL_SYNC,
MEMMODEL_SYNC_SEQ_CST = MEMMODEL_SEQ_CST | MEMMODEL_SYNC
};
/* Suppose that higher bits are target dependent. */
#define MEMMODEL_MASK ((1<<16)-1)
/* Support for user-provided GGC and PCH markers. The first parameter
is a pointer to a pointer, the second a cookie. */
typedef void (*gt_pointer_operator) (void *, void *);

View File

@ -8946,9 +8946,9 @@ functions map any run-time value to @code{__ATOMIC_SEQ_CST} rather
than invoke a runtime library call or inline a switch statement. This is
standard compliant, safe, and the simplest approach for now.
The memory model parameter is a signed int, but only the lower 8 bits are
The memory model parameter is a signed int, but only the lower 16 bits are
reserved for the memory model. The remainder of the signed int is reserved
for future use and should be 0. Use of the predefined atomic values
for target use and should be 0. Use of the predefined atomic values
ensures proper usage.
@deftypefn {Built-in Function} @var{type} __atomic_load_n (@var{type} *ptr, int memmodel)

View File

@ -6296,11 +6296,14 @@ need_atomic_barrier_p (enum memmodel model, bool pre)
case MEMMODEL_CONSUME:
return false;
case MEMMODEL_RELEASE:
case MEMMODEL_SYNC_RELEASE:
return pre;
case MEMMODEL_ACQUIRE:
case MEMMODEL_SYNC_ACQUIRE:
return !pre;
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
case MEMMODEL_SYNC_SEQ_CST:
return true;
default:
gcc_unreachable ();

View File

@ -7188,7 +7188,7 @@ expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
success = NULL_RTX;
oldval = cmp_reg;
if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
new_reg, false, MEMMODEL_SEQ_CST,
new_reg, false, MEMMODEL_SYNC_SEQ_CST,
MEMMODEL_RELAXED))
return false;
@ -7249,9 +7249,7 @@ maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
exists, and the memory model is stronger than acquire, add a release
barrier before the instruction. */
if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST
|| (model & MEMMODEL_MASK) == MEMMODEL_RELEASE
|| (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
expand_mem_thread_fence (model);
if (icode != CODE_FOR_nothing)
@ -7358,11 +7356,12 @@ expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
rtx ret;
/* Try an atomic_exchange first. */
ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_ACQUIRE);
ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
if (ret)
return ret;
ret = maybe_emit_sync_lock_test_and_set (target, mem, val, MEMMODEL_ACQUIRE);
ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
MEMMODEL_SYNC_ACQUIRE);
if (ret)
return ret;
@ -7373,7 +7372,7 @@ expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
/* If there are no other options, try atomic_test_and_set if the value
being stored is 1. */
if (val == const1_rtx)
ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_ACQUIRE);
ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
return ret;
}
@ -7630,7 +7629,7 @@ expand_mem_thread_fence (enum memmodel model)
{
if (HAVE_mem_thread_fence)
emit_insn (gen_mem_thread_fence (GEN_INT (model)));
else if ((model & MEMMODEL_MASK) != MEMMODEL_RELAXED)
else if (!is_mm_relaxed (model))
{
if (HAVE_memory_barrier)
emit_insn (gen_memory_barrier ());
@ -7654,7 +7653,7 @@ expand_mem_signal_fence (enum memmodel model)
{
if (HAVE_mem_signal_fence)
emit_insn (gen_mem_signal_fence (GEN_INT (model)));
else if ((model & MEMMODEL_MASK) != MEMMODEL_RELAXED)
else if (!is_mm_relaxed (model))
{
/* By default targets are coherent between a thread and the signal
handler running on the same thread. Thus this really becomes a
@ -7709,7 +7708,7 @@ expand_atomic_load (rtx target, rtx mem, enum memmodel model)
target = gen_reg_rtx (mode);
/* For SEQ_CST, emit a barrier before the load. */
if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
emit_move_insn (target, mem);
@ -7755,7 +7754,7 @@ expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
if (maybe_expand_insn (icode, 2, ops))
{
/* lock_release is only a release barrier. */
if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
return const0_rtx;
}
@ -7782,7 +7781,7 @@ expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
emit_move_insn (mem, val);
/* For SEQ_CST, also emit a barrier after the store. */
if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
return const0_rtx;

View File

@ -4384,6 +4384,69 @@ extern void assign_assembler_name_if_neeeded (tree);
extern void warn_deprecated_use (tree, tree);
extern void cache_integer_cst (tree);
/* Return the memory model from a host integer. */
static inline enum memmodel
memmodel_from_int (unsigned HOST_WIDE_INT val)
{
return (enum memmodel) (val & MEMMODEL_MASK);
}
/* Return the base memory model from a host integer. */
static inline enum memmodel
memmodel_base (unsigned HOST_WIDE_INT val)
{
return (enum memmodel) (val & MEMMODEL_BASE_MASK);
}
/* Return TRUE if the memory model is RELAXED. */
static inline bool
is_mm_relaxed (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELAXED;
}
/* Return TRUE if the memory model is CONSUME. */
static inline bool
is_mm_consume (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_CONSUME;
}
/* Return TRUE if the memory model is ACQUIRE. */
static inline bool
is_mm_acquire (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQUIRE;
}
/* Return TRUE if the memory model is RELEASE. */
static inline bool
is_mm_release (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELEASE;
}
/* Return TRUE if the memory model is ACQ_REL. */
static inline bool
is_mm_acq_rel (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQ_REL;
}
/* Return TRUE if the memory model is SEQ_CST. */
static inline bool
is_mm_seq_cst (enum memmodel model)
{
return (model & MEMMODEL_BASE_MASK) == MEMMODEL_SEQ_CST;
}
/* Return TRUE if the memory model is a SYNC variant. */
static inline bool
is_mm_sync (enum memmodel model)
{
return (model & MEMMODEL_SYNC);
}
/* Compare and hash for any structure which begins with a canonical
pointer. Assumes all pointers are interchangeable, which is sort
of already assumed by gcc elsewhere IIRC. */

View File

@ -535,7 +535,7 @@ instrument_builtin_call (gimple_stmt_iterator *gsi)
case fetch_op:
last_arg = gimple_call_arg (stmt, num - 1);
if (!tree_fits_uhwi_p (last_arg)
|| tree_to_uhwi (last_arg) > MEMMODEL_SEQ_CST)
|| memmodel_base (tree_to_uhwi (last_arg)) >= MEMMODEL_LAST)
return;
gimple_call_set_fndecl (stmt, decl);
update_stmt (stmt);
@ -600,10 +600,10 @@ instrument_builtin_call (gimple_stmt_iterator *gsi)
for (j = 0; j < 6; j++)
args[j] = gimple_call_arg (stmt, j);
if (!tree_fits_uhwi_p (args[4])
|| tree_to_uhwi (args[4]) > MEMMODEL_SEQ_CST)
|| memmodel_base (tree_to_uhwi (args[4])) >= MEMMODEL_LAST)
return;
if (!tree_fits_uhwi_p (args[5])
|| tree_to_uhwi (args[5]) > MEMMODEL_SEQ_CST)
|| memmodel_base (tree_to_uhwi (args[5])) >= MEMMODEL_LAST)
return;
update_gimple_call (gsi, decl, 5, args[0], args[1], args[2],
args[4], args[5]);