5d3805fca3
* ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch builtins, store max (log2 (align), 0) into uchar field instead of align into uptr field. (ubsan_expand_objsize_ifn): Use _v1 suffixed type mismatch builtins, store uchar 0 field instead of uptr 0 field. (instrument_nonnull_return): Use _v1 suffixed nonnull return builtin, instead of passing one address of struct with 2 locations pass two addresses of structs with 1 location each. * sanitizer.def (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH, BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT): Removed. (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1, BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1_ABORT): New builtins. * c-c++-common/ubsan/float-cast-overflow-1.c: Drop value keyword from expected output regexps. * c-c++-common/ubsan/float-cast-overflow-2.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-3.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-4.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-5.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-6.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-8.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-9.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-10.c: Likewise. * g++.dg/ubsan/float-cast-overflow-bf.C: Likewise. * gcc.dg/ubsan/float-cast-overflow-bf.c: Likewise. * g++.dg/asan/default-options-1.C (__asan_default_options): Add used attribute. * g++.dg/asan/asan_test.C: Run with ASAN_OPTIONS=handle_segv=2 in the environment. * All source files: Merge from upstream 315899. * asan/Makefile.am (nodist_saninclude_HEADERS): Add include/sanitizer/tsan_interface.h. * asan/libtool-version: Bump the libasan SONAME. * lsan/Makefile.am (sanitizer_lsan_files): Add lsan_common_mac.cc. (lsan_files): Add lsan_linux.cc, lsan_mac.cc and lsan_malloc_mac.cc. * sanitizer_common/Makefile.am (sanitizer_common_files): Add sancov_flags.cc, sanitizer_allocator_checks.cc, sanitizer_coverage_libcdep_new.cc, sanitizer_errno.cc, sanitizer_file.cc, sanitizer_mac_libcdep.cc and sanitizer_stoptheworld_mac.cc. Remove sanitizer_coverage_libcdep.cc and sanitizer_coverage_mapping_libcdep.cc. * tsan/Makefile.am (tsan_files): Add tsan_external.cc. * ubsan/Makefile.am (DEFS): Add -DUBSAN_CAN_USE_CXXABI=1. (ubsan_files): Add ubsan_init_standalone.cc and ubsan_signals_standalone.cc. * ubsan/libtool-version: Bump the libubsan SONAME. * asan/Makefile.in: Regenerate. * lsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.in: Regenerate. * tsan/Makefile.in: Regenerate. * ubsan/Makefile.in: Regenerate. From-SVN: r253887
160 lines
4.6 KiB
C++
160 lines
4.6 KiB
C++
//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
|
// Not intended for direct inclusion. Include sanitizer_atomic.h.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
|
|
#define SANITIZER_ATOMIC_CLANG_OTHER_H
|
|
|
|
namespace __sanitizer {
|
|
|
|
// MIPS32 does not support atomic > 4 bytes. To address this lack of
|
|
// functionality, the sanitizer library provides helper methods which use an
|
|
// internal spin lock mechanism to emulate atomic oprations when the size is
|
|
// 8 bytes.
|
|
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
|
static void __spin_lock(volatile int *lock) {
|
|
while (__sync_lock_test_and_set(lock, 1))
|
|
while (*lock) {
|
|
}
|
|
}
|
|
|
|
static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
|
|
|
|
|
|
// Make sure the lock is on its own cache line to prevent false sharing.
|
|
// Put it inside a struct that is aligned and padded to the typical MIPS
|
|
// cacheline which is 32 bytes.
|
|
static struct {
|
|
int lock;
|
|
char pad[32 - sizeof(int)];
|
|
} __attribute__((aligned(32))) lock = {0};
|
|
|
|
template <class T>
|
|
T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
|
|
T ret;
|
|
|
|
__spin_lock(&lock.lock);
|
|
|
|
ret = *ptr;
|
|
*ptr = ret + val;
|
|
|
|
__spin_unlock(&lock.lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
template <class T>
|
|
T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
|
|
T ret;
|
|
__spin_lock(&lock.lock);
|
|
|
|
ret = *ptr;
|
|
if (ret == oldval) *ptr = newval;
|
|
|
|
__spin_unlock(&lock.lock);
|
|
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
INLINE void proc_yield(int cnt) {
|
|
__asm__ __volatile__("" ::: "memory");
|
|
}
|
|
|
|
template<typename T>
|
|
INLINE typename T::Type atomic_load(
|
|
const volatile T *a, memory_order mo) {
|
|
DCHECK(mo & (memory_order_relaxed | memory_order_consume
|
|
| memory_order_acquire | memory_order_seq_cst));
|
|
DCHECK(!((uptr)a % sizeof(*a)));
|
|
typename T::Type v;
|
|
|
|
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
|
|
// Assume that aligned loads are atomic.
|
|
if (mo == memory_order_relaxed) {
|
|
v = a->val_dont_use;
|
|
} else if (mo == memory_order_consume) {
|
|
// Assume that processor respects data dependencies
|
|
// (and that compiler won't break them).
|
|
__asm__ __volatile__("" ::: "memory");
|
|
v = a->val_dont_use;
|
|
__asm__ __volatile__("" ::: "memory");
|
|
} else if (mo == memory_order_acquire) {
|
|
__asm__ __volatile__("" ::: "memory");
|
|
v = a->val_dont_use;
|
|
__sync_synchronize();
|
|
} else { // seq_cst
|
|
// E.g. on POWER we need a hw fence even before the store.
|
|
__sync_synchronize();
|
|
v = a->val_dont_use;
|
|
__sync_synchronize();
|
|
}
|
|
} else {
|
|
// 64-bit load on 32-bit platform.
|
|
// Gross, but simple and reliable.
|
|
// Assume that it is not in read-only memory.
|
|
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
|
typename T::Type volatile *val_ptr =
|
|
const_cast<typename T::Type volatile *>(&a->val_dont_use);
|
|
v = __mips_sync_fetch_and_add<u64>(
|
|
reinterpret_cast<u64 volatile *>(val_ptr), 0);
|
|
#else
|
|
v = __sync_fetch_and_add(
|
|
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
|
|
#endif
|
|
}
|
|
return v;
|
|
}
|
|
|
|
template<typename T>
|
|
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
|
|
DCHECK(mo & (memory_order_relaxed | memory_order_release
|
|
| memory_order_seq_cst));
|
|
DCHECK(!((uptr)a % sizeof(*a)));
|
|
|
|
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
|
|
// Assume that aligned loads are atomic.
|
|
if (mo == memory_order_relaxed) {
|
|
a->val_dont_use = v;
|
|
} else if (mo == memory_order_release) {
|
|
__sync_synchronize();
|
|
a->val_dont_use = v;
|
|
__asm__ __volatile__("" ::: "memory");
|
|
} else { // seq_cst
|
|
__sync_synchronize();
|
|
a->val_dont_use = v;
|
|
__sync_synchronize();
|
|
}
|
|
} else {
|
|
// 64-bit store on 32-bit platform.
|
|
// Gross, but simple and reliable.
|
|
typename T::Type cmp = a->val_dont_use;
|
|
typename T::Type cur;
|
|
for (;;) {
|
|
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
|
typename T::Type volatile *val_ptr =
|
|
const_cast<typename T::Type volatile *>(&a->val_dont_use);
|
|
cur = __mips_sync_val_compare_and_swap<u64>(
|
|
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v);
|
|
#else
|
|
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
|
|
#endif
|
|
if (cmp == v)
|
|
break;
|
|
cmp = cur;
|
|
}
|
|
}
|
|
}
|
|
|
|
} // namespace __sanitizer
|
|
|
|
#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
|