Unify implementations of __builtin_mem_*_fence and __sync_synchronize.

* builtins.c (expand_builtin_mem_thread_fence): Remove.
	(expand_builtin_mem_signal_fence): Remove.
	(expand_builtin_atomic_thread_fence): Use expand_mem_thread_fence.
	(expand_builtin_sync_synchronize): Likewise.
	(expand_builtin_atomic_signal_fence): Use expand_mem_signal_fence.
	* optabs.c (expand_asm_memory_barrier): Split out from
	expand_builtin_mem_signal_fence.
	(expand_mem_thread_fence): New, a combination of code from
	expand_builtin_mem_thread_fence and expand_builtin_sync_synchronize.
	(expand_mem_signal_fence): Moved and renamed from
	expand_builtin_mem_signal_fence.
	(expand_atomic_exchange): Use expand_mem_thread_fence.
	(expand_atomic_load, expand_atomic_store): Likewise.
	* expr.h, optabs.h: Update decls.

From-SVN: r181451
This commit is contained in:
Richard Henderson 2011-11-17 11:29:04 -08:00 committed by Richard Henderson
parent cfb9952179
commit c39169c82c
5 changed files with 103 additions and 94 deletions

View File

@ -1,3 +1,20 @@
2011-11-17 Richard Henderson <rth@redhat.com>
* builtins.c (expand_builtin_mem_thread_fence): Remove.
(expand_builtin_mem_signal_fence): Remove.
(expand_builtin_atomic_thread_fence): Use expand_mem_thread_fence.
(expand_builtin_sync_synchronize): Likewise.
(expand_builtin_atomic_signal_fence): Use expand_mem_signal_fence.
* optabs.c (expand_asm_memory_barrier): Split out from
expand_builtin_mem_signal_fence.
(expand_mem_thread_fence): New, a combination of code from
expand_builtin_mem_thread_fence and expand_builtin_sync_synchronize.
(expand_mem_signal_fence): Moved and renamed from
expand_builtin_mem_signal_fence.
(expand_atomic_exchange): Use expand_mem_thread_fence.
(expand_atomic_load, expand_atomic_store): Likewise.
* expr.h, optabs.h: Update decls.
2011-11-17 Bin Cheng <bin.cheng@arm.com>
PR rtl-optimization/50663

View File

@ -5672,23 +5672,6 @@ expand_builtin_atomic_is_lock_free (tree exp)
return NULL_RTX;
}
/* This routine will either emit the mem_thread_fence pattern or issue a
sync_synchronize to generate a fence for memory model MEMMODEL. */
#ifndef HAVE_mem_thread_fence
# define HAVE_mem_thread_fence 0
# define gen_mem_thread_fence(x) (gcc_unreachable (), NULL_RTX)
#endif
void
expand_builtin_mem_thread_fence (enum memmodel model)
{
if (HAVE_mem_thread_fence)
emit_insn (gen_mem_thread_fence (GEN_INT (model)));
else if (model != MEMMODEL_RELAXED)
expand_builtin_sync_synchronize ();
}
/* Expand the __atomic_thread_fence intrinsic:
void __atomic_thread_fence (enum memmodel)
EXP is the CALL_EXPR. */
@ -5696,46 +5679,8 @@ expand_builtin_mem_thread_fence (enum memmodel model)
static void
expand_builtin_atomic_thread_fence (tree exp)
{
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 0));
expand_builtin_mem_thread_fence (model);
}
/* This routine will either emit the mem_signal_fence pattern or issue a
sync_synchronize to generate a fence for memory model MEMMODEL. */
#ifndef HAVE_mem_signal_fence
# define HAVE_mem_signal_fence 0
# define gen_mem_signal_fence(x) (gcc_unreachable (), NULL_RTX)
#endif
static void
expand_builtin_mem_signal_fence (enum memmodel model)
{
if (HAVE_mem_signal_fence)
emit_insn (gen_mem_signal_fence (GEN_INT (model)));
else if (model != MEMMODEL_RELAXED)
{
rtx asm_op, clob;
/* By default targets are coherent between a thread and the signal
handler running on the same thread. Thus this really becomes a
compiler barrier, in that stores must not be sunk past
(or raised above) a given point. */
/* Generate asm volatile("" : : : "memory") as the memory barrier. */
asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
rtvec_alloc (0), rtvec_alloc (0),
rtvec_alloc (0), UNKNOWN_LOCATION);
MEM_VOLATILE_P (asm_op) = 1;
clob = gen_rtx_SCRATCH (VOIDmode);
clob = gen_rtx_MEM (BLKmode, clob);
clob = gen_rtx_CLOBBER (VOIDmode, clob);
emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
}
enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0));
expand_mem_thread_fence (model);
}
/* Expand the __atomic_signal_fence intrinsic:
@ -5745,10 +5690,8 @@ expand_builtin_mem_signal_fence (enum memmodel model)
static void
expand_builtin_atomic_signal_fence (tree exp)
{
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 0));
expand_builtin_mem_signal_fence (model);
enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0));
expand_mem_signal_fence (model);
}
/* Expand the __sync_synchronize intrinsic. */
@ -5756,31 +5699,7 @@ expand_builtin_atomic_signal_fence (tree exp)
static void
expand_builtin_sync_synchronize (void)
{
gimple x;
VEC (tree, gc) *v_clobbers;
#ifdef HAVE_memory_barrier
if (HAVE_memory_barrier)
{
emit_insn (gen_memory_barrier ());
return;
}
#endif
if (synchronize_libfunc != NULL_RTX)
{
emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
return;
}
/* If no explicit memory barrier instruction is available, create an
empty asm stmt with a memory clobber. */
v_clobbers = VEC_alloc (tree, gc, 1);
VEC_quick_push (tree, v_clobbers,
tree_cons (NULL, build_string (6, "memory"), NULL));
x = gimple_build_asm_vec ("", NULL, NULL, v_clobbers, NULL);
gimple_asm_set_volatile (x, true);
expand_asm_stmt (x);
expand_mem_thread_fence (MEMMODEL_SEQ_CST);
}

View File

@ -254,7 +254,6 @@ extern void expand_builtin_setjmp_receiver (rtx);
extern rtx expand_builtin_saveregs (void);
extern void expand_builtin_trap (void);
extern rtx builtin_strncpy_read_str (void *, HOST_WIDE_INT, enum machine_mode);
extern void expand_builtin_mem_thread_fence (enum memmodel);
/* Functions from expr.c: */

View File

@ -7378,7 +7378,7 @@ expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model,
if (model == MEMMODEL_SEQ_CST
|| model == MEMMODEL_RELEASE
|| model == MEMMODEL_ACQ_REL)
expand_builtin_mem_thread_fence (model);
expand_mem_thread_fence (model);
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], mem);
@ -7403,7 +7403,7 @@ expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model,
if (model == MEMMODEL_SEQ_CST
|| model == MEMMODEL_RELEASE
|| model == MEMMODEL_ACQ_REL)
expand_builtin_mem_thread_fence (model);
expand_mem_thread_fence (model);
addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
return emit_library_call_value (libfunc, target, LCT_NORMAL,
@ -7556,6 +7556,76 @@ expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
return true;
}
/* Generate asm volatile("" : : : "memory") as the memory barrier. */
static void
expand_asm_memory_barrier (void)
{
rtx asm_op, clob;
asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
rtvec_alloc (0), rtvec_alloc (0),
rtvec_alloc (0), UNKNOWN_LOCATION);
MEM_VOLATILE_P (asm_op) = 1;
clob = gen_rtx_SCRATCH (VOIDmode);
clob = gen_rtx_MEM (BLKmode, clob);
clob = gen_rtx_CLOBBER (VOIDmode, clob);
emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
}
/* This routine will either emit the mem_thread_fence pattern or issue a
sync_synchronize to generate a fence for memory model MEMMODEL. */
#ifndef HAVE_mem_thread_fence
# define HAVE_mem_thread_fence 0
# define gen_mem_thread_fence(x) (gcc_unreachable (), NULL_RTX)
#endif
#ifndef HAVE_memory_barrier
# define HAVE_memory_barrier 0
# define gen_memory_barrier() (gcc_unreachable (), NULL_RTX)
#endif
void
expand_mem_thread_fence (enum memmodel model)
{
if (HAVE_mem_thread_fence)
emit_insn (gen_mem_thread_fence (GEN_INT (model)));
else if (model != MEMMODEL_RELAXED)
{
if (HAVE_memory_barrier)
emit_insn (gen_memory_barrier ());
else if (synchronize_libfunc != NULL_RTX)
emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
else
expand_asm_memory_barrier ();
}
}
/* This routine will either emit the mem_signal_fence pattern or issue a
sync_synchronize to generate a fence for memory model MEMMODEL. */
#ifndef HAVE_mem_signal_fence
# define HAVE_mem_signal_fence 0
# define gen_mem_signal_fence(x) (gcc_unreachable (), NULL_RTX)
#endif
void
expand_mem_signal_fence (enum memmodel model)
{
if (HAVE_mem_signal_fence)
emit_insn (gen_mem_signal_fence (GEN_INT (model)));
else if (model != MEMMODEL_RELAXED)
{
/* By default targets are coherent between a thread and the signal
handler running on the same thread. Thus this really becomes a
compiler barrier, in that stores must not be sunk past
(or raised above) a given point. */
expand_asm_memory_barrier ();
}
}
/* This function expands the atomic load operation:
return the atomically loaded value in MEM.
@ -7598,13 +7668,13 @@ expand_atomic_load (rtx target, rtx mem, enum memmodel model)
target = gen_reg_rtx (mode);
/* Emit the appropriate barrier before the load. */
expand_builtin_mem_thread_fence (model);
expand_mem_thread_fence (model);
emit_move_insn (target, mem);
/* For SEQ_CST, also emit a barrier after the load. */
if (model == MEMMODEL_SEQ_CST)
expand_builtin_mem_thread_fence (model);
expand_mem_thread_fence (model);
return target;
}
@ -7645,7 +7715,7 @@ expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
{
/* lock_release is only a release barrier. */
if (model == MEMMODEL_SEQ_CST)
expand_builtin_mem_thread_fence (model);
expand_mem_thread_fence (model);
return const0_rtx;
}
}
@ -7665,13 +7735,13 @@ expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
/* If there is no mem_store, default to a move with barriers */
if (model == MEMMODEL_SEQ_CST || model == MEMMODEL_RELEASE)
expand_builtin_mem_thread_fence (model);
expand_mem_thread_fence (model);
emit_move_insn (mem, val);
/* For SEQ_CST, also emit a barrier after the load. */
if (model == MEMMODEL_SEQ_CST)
expand_builtin_mem_thread_fence (model);
expand_mem_thread_fence (model);
return const0_rtx;
}

View File

@ -978,6 +978,10 @@ extern bool can_atomic_exchange_p (enum machine_mode, bool);
extern bool expand_atomic_compare_and_swap (rtx *, rtx *, rtx, rtx, rtx, bool,
enum memmodel, enum memmodel);
/* Generate memory barriers. */
extern void expand_mem_thread_fence (enum memmodel);
extern void expand_mem_signal_fence (enum memmodel);
/* Check whether an operation represented by the code CODE is a
convert operation that is supported by the target platform in
vector form */