gcc/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h
Jakub Jelinek 5d3805fca3 ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch builtins...
* ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch
	builtins, store max (log2 (align), 0) into uchar field instead of
	align into uptr field.
	(ubsan_expand_objsize_ifn): Use _v1 suffixed type mismatch builtins,
	store uchar 0 field instead of uptr 0 field.
	(instrument_nonnull_return): Use _v1 suffixed nonnull return builtin,
	instead of passing one address of struct with 2 locations pass
	two addresses of structs with 1 location each.
	* sanitizer.def (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH,
	BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT,
	BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN,
	BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT): Removed.
	(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1,
	BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT,
	BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1,
	BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1_ABORT): New builtins.

	* c-c++-common/ubsan/float-cast-overflow-1.c: Drop value keyword
	from expected output regexps.
	* c-c++-common/ubsan/float-cast-overflow-2.c: Likewise.
	* c-c++-common/ubsan/float-cast-overflow-3.c: Likewise.
	* c-c++-common/ubsan/float-cast-overflow-4.c: Likewise.
	* c-c++-common/ubsan/float-cast-overflow-5.c: Likewise.
	* c-c++-common/ubsan/float-cast-overflow-6.c: Likewise.
	* c-c++-common/ubsan/float-cast-overflow-8.c: Likewise.
	* c-c++-common/ubsan/float-cast-overflow-9.c: Likewise.
	* c-c++-common/ubsan/float-cast-overflow-10.c: Likewise.
	* g++.dg/ubsan/float-cast-overflow-bf.C: Likewise.
	* gcc.dg/ubsan/float-cast-overflow-bf.c: Likewise.
	* g++.dg/asan/default-options-1.C (__asan_default_options): Add
	used attribute.
	* g++.dg/asan/asan_test.C: Run with ASAN_OPTIONS=handle_segv=2
	in the environment.

	* All source files: Merge from upstream 315899.
        * asan/Makefile.am (nodist_saninclude_HEADERS): Add
	include/sanitizer/tsan_interface.h.
        * asan/libtool-version: Bump the libasan SONAME.
	* lsan/Makefile.am (sanitizer_lsan_files): Add lsan_common_mac.cc.
	(lsan_files): Add lsan_linux.cc, lsan_mac.cc and lsan_malloc_mac.cc.
        * sanitizer_common/Makefile.am (sanitizer_common_files): Add
	sancov_flags.cc, sanitizer_allocator_checks.cc,
	sanitizer_coverage_libcdep_new.cc, sanitizer_errno.cc,
	sanitizer_file.cc, sanitizer_mac_libcdep.cc and
	sanitizer_stoptheworld_mac.cc.  Remove sanitizer_coverage_libcdep.cc
	and sanitizer_coverage_mapping_libcdep.cc.
        * tsan/Makefile.am (tsan_files): Add tsan_external.cc.
	* ubsan/Makefile.am (DEFS): Add -DUBSAN_CAN_USE_CXXABI=1.
	(ubsan_files): Add ubsan_init_standalone.cc and
	ubsan_signals_standalone.cc.
	* ubsan/libtool-version: Bump the libubsan SONAME.
        * asan/Makefile.in: Regenerate.
        * lsan/Makefile.in: Regenerate.
        * sanitizer_common/Makefile.in: Regenerate.
        * tsan/Makefile.in: Regenerate.
	* ubsan/Makefile.in: Regenerate.

From-SVN: r253887
2017-10-19 13:23:59 +02:00

270 lines
9.1 KiB
C++

//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#error This file must be included inside sanitizer_allocator.h
#endif
// Objects of this type should be used as local caches for SizeClassAllocator64
// or SizeClassAllocator32. Since the typical use of this class is to have one
// object per thread in TLS, is has to be POD.
template<class SizeClassAllocator>
struct SizeClassAllocatorLocalCache
: SizeClassAllocator::AllocatorCache {
};
// Cache used by SizeClassAllocator64.
template <class SizeClassAllocator>
struct SizeClassAllocator64LocalCache {
typedef SizeClassAllocator Allocator;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
if (s)
s->Register(&stats_);
}
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
Drain(allocator);
if (s)
s->Unregister(&stats_);
}
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0)) {
if (UNLIKELY(!Refill(c, allocator, class_id)))
return nullptr;
}
stats_.Add(AllocatorStatAllocated, c->class_size);
CHECK_GT(c->count, 0);
CompactPtrT chunk = c->chunks[--c->count];
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
allocator->GetRegionBeginBySizeClass(class_id), chunk));
return res;
}
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
PerClass *c = &per_class_[class_id];
stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(c, allocator, class_id, c->max_count / 2);
CompactPtrT chunk = allocator->PointerToCompactPtr(
allocator->GetRegionBeginBySizeClass(class_id),
reinterpret_cast<uptr>(p));
c->chunks[c->count++] = chunk;
}
void Drain(SizeClassAllocator *allocator) {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
while (c->count > 0)
Drain(c, allocator, i, c->count);
}
}
private:
typedef typename Allocator::SizeClassMapT SizeClassMap;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
typedef typename Allocator::CompactPtrT CompactPtrT;
struct PerClass {
u32 count;
u32 max_count;
uptr class_size;
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
void InitCache() {
if (LIKELY(per_class_[1].max_count))
return;
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
c->class_size = Allocator::ClassIdToSize(i);
}
}
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
uptr class_id) {
InitCache();
uptr num_requested_chunks = c->max_count / 2;
if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
num_requested_chunks)))
return false;
c->count = num_requested_chunks;
return true;
}
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
uptr count) {
InitCache();
CHECK_GE(c->count, count);
uptr first_idx_to_drain = c->count - count;
c->count -= count;
allocator->ReturnToAllocator(&stats_, class_id,
&c->chunks[first_idx_to_drain], count);
}
};
// Cache used by SizeClassAllocator32.
template <class SizeClassAllocator>
struct SizeClassAllocator32LocalCache {
typedef SizeClassAllocator Allocator;
typedef typename Allocator::TransferBatch TransferBatch;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
if (s)
s->Register(&stats_);
}
// Returns a TransferBatch suitable for class_id.
TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
TransferBatch *b) {
if (uptr batch_class_id = per_class_[class_id].batch_class_id)
return (TransferBatch*)Allocate(allocator, batch_class_id);
return b;
}
// Destroys TransferBatch b.
void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
TransferBatch *b) {
if (uptr batch_class_id = per_class_[class_id].batch_class_id)
Deallocate(allocator, batch_class_id, b);
}
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
Drain(allocator);
if (s)
s->Unregister(&stats_);
}
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0)) {
if (UNLIKELY(!Refill(allocator, class_id)))
return nullptr;
}
stats_.Add(AllocatorStatAllocated, c->class_size);
void *res = c->batch[--c->count];
PREFETCH(c->batch[c->count - 1]);
return res;
}
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
PerClass *c = &per_class_[class_id];
stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id);
c->batch[c->count++] = p;
}
void Drain(SizeClassAllocator *allocator) {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
while (c->count > 0)
Drain(allocator, i);
}
}
private:
typedef typename Allocator::SizeClassMapT SizeClassMap;
static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
// If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
// allocated from kBatchClassID size class (except for those that are needed
// for kBatchClassID itself). The goal is to have TransferBatches in a totally
// different region of RAM to improve security.
static const bool kUseSeparateSizeClassForBatch =
Allocator::kUseSeparateSizeClassForBatch;
struct PerClass {
uptr count;
uptr max_count;
uptr class_size;
uptr batch_class_id;
void *batch[2 * TransferBatch::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
void InitCache() {
if (LIKELY(per_class_[1].max_count))
return;
const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
uptr max_cached = TransferBatch::MaxCached(i);
c->max_count = 2 * max_cached;
c->class_size = Allocator::ClassIdToSize(i);
// Precompute the class id to use to store batches for the current class
// id. 0 means the class size is large enough to store a batch within one
// of the chunks. If using a separate size class, it will always be
// kBatchClassID, except for kBatchClassID itself.
if (kUseSeparateSizeClassForBatch) {
c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
} else {
c->batch_class_id = (c->class_size <
TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
batch_class_id : 0;
}
}
}
NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
if (UNLIKELY(!b))
return false;
CHECK_GT(b->Count(), 0);
b->CopyToArray(c->batch);
c->count = b->Count();
DestroyBatch(class_id, allocator, b);
return true;
}
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
uptr cnt = Min(c->max_count / 2, c->count);
uptr first_idx_to_drain = c->count - cnt;
TransferBatch *b = CreateBatch(
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
// Failure to allocate a batch while releasing memory is non recoverable.
// TODO(alekseys): Figure out how to do it without allocating a new batch.
if (UNLIKELY(!b))
DieOnFailure::OnOOM();
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
&c->batch[first_idx_to_drain], cnt);
c->count -= cnt;
allocator->DeallocateBatch(&stats_, class_id, b);
}
};