diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog index da8a0ef4c62..1beb8d58321 100644 --- a/libsanitizer/ChangeLog +++ b/libsanitizer/ChangeLog @@ -1,3 +1,8 @@ +2017-03-22 Jakub Jelinek + + PR sanitizer/78158 + * tsan/tsan_interface_atomic.cc: Cherry-pick upstream r298378. + 2017-02-16 Andreas Tobler PR sanitizer/79562 diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc index 5c5c34f3b87..deb4206a624 100644 --- a/libsanitizer/tsan/tsan_interface_atomic.cc +++ b/libsanitizer/tsan/tsan_interface_atomic.cc @@ -448,10 +448,27 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { // C/C++ +static morder covert_morder(morder mo) { + if (flags()->force_seq_cst_atomics) + return (morder)mo_seq_cst; + + // Filter out additional memory order flags: + // MEMMODEL_SYNC = 1 << 15 + // __ATOMIC_HLE_ACQUIRE = 1 << 16 + // __ATOMIC_HLE_RELEASE = 1 << 17 + // + // HLE is an optimization, and we pretend that elision always fails. + // MEMMODEL_SYNC is used when lowering __sync_ atomics, + // since we use __sync_ atomics for actual atomic operations, + // we can safely ignore it as well. It also subtly affects semantics, + // but we don't model the difference. + return (morder)(mo & 0x7fff); +} + #define SCOPED_ATOMIC(func, ...) \ const uptr callpc = (uptr)__builtin_return_address(0); \ uptr pc = StackTrace::GetCurrentPc(); \ - mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ + mo = covert_morder(mo); \ ThreadState *const thr = cur_thread(); \ if (thr->ignore_interceptors) \ return NoTsanAtomic##func(__VA_ARGS__); \