From 53de1ea800db54b47290d578c43892799b66c8dc Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 31 Oct 2018 23:11:22 +0000 Subject: [PATCH] aarch64: Remove early clobber from ATOMIC_LDOP scratch * config/aarch64/atomics.md (aarch64_atomic__lse): The scratch register need not be early-clobber. Document the reason why we cannot use ST. From-SVN: r265703 --- gcc/ChangeLog | 6 ++++++ gcc/config/aarch64/atomics.md | 14 +++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 83611659f72..8510ddd7851 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2018-10-31 Richard Henderson + + * config/aarch64/atomics.md (aarch64_atomic__lse): + scratch register need not be early-clobber. Document the reason + why we cannot use ST. + 2018-10-31 Joseph Myers PR bootstrap/82856 diff --git a/gcc/config/aarch64/atomics.md b/gcc/config/aarch64/atomics.md index 2198649b1be..00f7af4e4ac 100644 --- a/gcc/config/aarch64/atomics.md +++ b/gcc/config/aarch64/atomics.md @@ -263,6 +263,18 @@ } ) +;; It is tempting to want to use ST for relaxed and release +;; memory models here. However, that is incompatible with the +;; C++ memory model for the following case: +;; +;; atomic_fetch_add(ptr, 1, memory_order_relaxed); +;; atomic_thread_fence(memory_order_acquire); +;; +;; The problem is that the architecture says that ST (and LD +;; insns where the destination is XZR) are not regarded as a read. +;; However we also implement the acquire memory barrier with DMB LD, +;; and so the ST is not blocked by the barrier. + (define_insn "aarch64_atomic__lse" [(set (match_operand:ALLI 0 "aarch64_sync_memory_operand" "+Q") (unspec_volatile:ALLI @@ -270,7 +282,7 @@ (match_operand:ALLI 1 "register_operand" "r") (match_operand:SI 2 "const_int_operand")] ATOMIC_LDOP)) - (clobber (match_scratch:ALLI 3 "=&r"))] + (clobber (match_scratch:ALLI 3 "=r"))] "TARGET_LSE" { enum memmodel model = memmodel_from_int (INTVAL (operands[2]));