2014-09-23 19:59:53 +02:00
|
|
|
//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-08-14 10:47:11 +02:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2014-09-23 19:59:53 +02:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// A fast memory allocator that does not support free() nor realloc().
|
|
|
|
// All allocations are forever.
|
|
|
|
//===----------------------------------------------------------------------===//
|
2015-10-21 09:32:45 +02:00
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
|
|
|
|
#define SANITIZER_PERSISTENT_ALLOCATOR_H
|
|
|
|
|
|
|
|
#include "sanitizer_internal_defs.h"
|
|
|
|
#include "sanitizer_mutex.h"
|
|
|
|
#include "sanitizer_atomic.h"
|
|
|
|
#include "sanitizer_common.h"
|
|
|
|
|
|
|
|
namespace __sanitizer {
|
|
|
|
|
2021-11-04 09:20:14 +01:00
|
|
|
template <typename T>
|
2014-09-23 19:59:53 +02:00
|
|
|
class PersistentAllocator {
|
|
|
|
public:
|
2021-11-04 09:20:14 +01:00
|
|
|
T *alloc(uptr count = 1);
|
|
|
|
uptr allocated() const { return atomic_load_relaxed(&mapped_size); }
|
|
|
|
|
|
|
|
void TestOnlyUnmap();
|
2014-09-23 19:59:53 +02:00
|
|
|
|
|
|
|
private:
|
2021-11-04 09:20:14 +01:00
|
|
|
T *tryAlloc(uptr count);
|
|
|
|
T *refillAndAlloc(uptr count);
|
|
|
|
mutable StaticSpinMutex mtx; // Protects alloc of new blocks.
|
2014-09-23 19:59:53 +02:00
|
|
|
atomic_uintptr_t region_pos; // Region allocator for Node's.
|
|
|
|
atomic_uintptr_t region_end;
|
2021-11-04 09:20:14 +01:00
|
|
|
atomic_uintptr_t mapped_size;
|
|
|
|
|
|
|
|
struct BlockInfo {
|
|
|
|
const BlockInfo *next;
|
|
|
|
uptr ptr;
|
|
|
|
uptr size;
|
|
|
|
};
|
|
|
|
const BlockInfo *curr;
|
2014-09-23 19:59:53 +02:00
|
|
|
};
|
|
|
|
|
2021-11-04 09:20:14 +01:00
|
|
|
template <typename T>
|
|
|
|
inline T *PersistentAllocator<T>::tryAlloc(uptr count) {
|
2014-09-23 19:59:53 +02:00
|
|
|
// Optimisic lock-free allocation, essentially try to bump the region ptr.
|
|
|
|
for (;;) {
|
|
|
|
uptr cmp = atomic_load(®ion_pos, memory_order_acquire);
|
|
|
|
uptr end = atomic_load(®ion_end, memory_order_acquire);
|
2021-11-04 09:20:14 +01:00
|
|
|
uptr size = count * sizeof(T);
|
|
|
|
if (cmp == 0 || cmp + size > end)
|
|
|
|
return nullptr;
|
2014-09-23 19:59:53 +02:00
|
|
|
if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size,
|
|
|
|
memory_order_acquire))
|
2021-11-04 09:20:14 +01:00
|
|
|
return reinterpret_cast<T *>(cmp);
|
2014-09-23 19:59:53 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-04 09:20:14 +01:00
|
|
|
template <typename T>
|
|
|
|
inline T *PersistentAllocator<T>::alloc(uptr count) {
|
2014-09-23 19:59:53 +02:00
|
|
|
// First, try to allocate optimisitically.
|
2021-11-04 09:20:14 +01:00
|
|
|
T *s = tryAlloc(count);
|
|
|
|
if (LIKELY(s))
|
|
|
|
return s;
|
|
|
|
return refillAndAlloc(count);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
inline T *PersistentAllocator<T>::refillAndAlloc(uptr count) {
|
2014-09-23 19:59:53 +02:00
|
|
|
// If failed, lock, retry and alloc new superblock.
|
|
|
|
SpinMutexLock l(&mtx);
|
|
|
|
for (;;) {
|
2021-11-04 09:20:14 +01:00
|
|
|
T *s = tryAlloc(count);
|
|
|
|
if (s)
|
|
|
|
return s;
|
2014-09-23 19:59:53 +02:00
|
|
|
atomic_store(®ion_pos, 0, memory_order_relaxed);
|
2021-11-04 09:20:14 +01:00
|
|
|
uptr size = count * sizeof(T) + sizeof(BlockInfo);
|
|
|
|
uptr allocsz = RoundUpTo(Max<uptr>(size, 64u * 1024u), GetPageSizeCached());
|
2014-09-23 19:59:53 +02:00
|
|
|
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
|
2021-11-04 09:20:14 +01:00
|
|
|
BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1;
|
|
|
|
new_block->next = curr;
|
|
|
|
new_block->ptr = mem;
|
|
|
|
new_block->size = allocsz;
|
|
|
|
curr = new_block;
|
|
|
|
|
|
|
|
atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
|
|
|
|
|
|
|
|
allocsz -= sizeof(BlockInfo);
|
2014-09-23 19:59:53 +02:00
|
|
|
atomic_store(®ion_end, mem + allocsz, memory_order_release);
|
|
|
|
atomic_store(®ion_pos, mem, memory_order_release);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-04 09:20:14 +01:00
|
|
|
template <typename T>
|
|
|
|
void PersistentAllocator<T>::TestOnlyUnmap() {
|
|
|
|
while (curr) {
|
|
|
|
uptr mem = curr->ptr;
|
|
|
|
uptr allocsz = curr->size;
|
|
|
|
curr = curr->next;
|
|
|
|
UnmapOrDie((void *)mem, allocsz);
|
|
|
|
}
|
|
|
|
internal_memset(this, 0, sizeof(*this));
|
2014-09-23 19:59:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace __sanitizer
|
|
|
|
|
2015-10-21 09:32:45 +02:00
|
|
|
#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
|