696d846a56
libsanitizer/ 2015-10-20 Maxim Ostapenko <m.ostapenko@partner.samsung.com> * All source files: Merge from upstream r250806. * configure.ac (link_sanitizer_common): Add -lrt flag. * configure.tgt: Enable TSAN and LSAN for aarch64-linux targets. Set CXX_ABI_NEEDED=true for darwin. * asan/Makefile.am (asan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0. * asan/Makefile.in: Regenerate. * ubsan/Makefile.am (ubsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=1. (libubsan_la_LIBADD): Add -lc++abi if CXX_ABI_NEEDED is true. * ubsan/Makefile.in: Regenerate. * tsan/Makefile.am (tsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0. * tsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. * sanitizer_common/Makefile.in: Regenerate. * asan/libtool-version: Bump the libasan SONAME. From-SVN: r229111
71 lines
2.2 KiB
C++
71 lines
2.2 KiB
C++
//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// A fast memory allocator that does not support free() nor realloc().
|
|
// All allocations are forever.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
|
|
#define SANITIZER_PERSISTENT_ALLOCATOR_H
|
|
|
|
#include "sanitizer_internal_defs.h"
|
|
#include "sanitizer_mutex.h"
|
|
#include "sanitizer_atomic.h"
|
|
#include "sanitizer_common.h"
|
|
|
|
namespace __sanitizer {
|
|
|
|
class PersistentAllocator {
|
|
public:
|
|
void *alloc(uptr size);
|
|
|
|
private:
|
|
void *tryAlloc(uptr size);
|
|
StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
|
|
atomic_uintptr_t region_pos; // Region allocator for Node's.
|
|
atomic_uintptr_t region_end;
|
|
};
|
|
|
|
inline void *PersistentAllocator::tryAlloc(uptr size) {
|
|
// Optimisic lock-free allocation, essentially try to bump the region ptr.
|
|
for (;;) {
|
|
uptr cmp = atomic_load(®ion_pos, memory_order_acquire);
|
|
uptr end = atomic_load(®ion_end, memory_order_acquire);
|
|
if (cmp == 0 || cmp + size > end) return nullptr;
|
|
if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size,
|
|
memory_order_acquire))
|
|
return (void *)cmp;
|
|
}
|
|
}
|
|
|
|
inline void *PersistentAllocator::alloc(uptr size) {
|
|
// First, try to allocate optimisitically.
|
|
void *s = tryAlloc(size);
|
|
if (s) return s;
|
|
// If failed, lock, retry and alloc new superblock.
|
|
SpinMutexLock l(&mtx);
|
|
for (;;) {
|
|
s = tryAlloc(size);
|
|
if (s) return s;
|
|
atomic_store(®ion_pos, 0, memory_order_relaxed);
|
|
uptr allocsz = 64 * 1024;
|
|
if (allocsz < size) allocsz = size;
|
|
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
|
|
atomic_store(®ion_end, mem + allocsz, memory_order_release);
|
|
atomic_store(®ion_pos, mem, memory_order_release);
|
|
}
|
|
}
|
|
|
|
extern PersistentAllocator thePersistentAllocator;
|
|
inline void *PersistentAlloc(uptr sz) {
|
|
return thePersistentAllocator.alloc(sz);
|
|
}
|
|
|
|
} // namespace __sanitizer
|
|
|
|
#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
|