2019-08-14 10:47:11 +02:00
|
|
|
//===-- sanitizer_stackdepot.cpp ------------------------------------------===//
|
2012-11-12 16:53:47 +01:00
|
|
|
//
|
2019-08-14 10:47:11 +02:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-11-12 16:53:47 +01:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is shared between AddressSanitizer and ThreadSanitizer
|
|
|
|
// run-time libraries.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "sanitizer_stackdepot.h"
|
2014-09-23 19:59:53 +02:00
|
|
|
|
2012-11-12 16:53:47 +01:00
|
|
|
#include "sanitizer_common.h"
|
2019-08-14 10:47:11 +02:00
|
|
|
#include "sanitizer_hash.h"
|
2014-09-23 19:59:53 +02:00
|
|
|
#include "sanitizer_stackdepotbase.h"
|
2012-11-12 16:53:47 +01:00
|
|
|
|
|
|
|
namespace __sanitizer {
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
struct StackDepotNode {
|
|
|
|
StackDepotNode *link;
|
2012-11-12 16:53:47 +01:00
|
|
|
u32 id;
|
2014-09-23 19:59:53 +02:00
|
|
|
atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
|
2015-10-21 09:32:45 +02:00
|
|
|
u32 size;
|
|
|
|
u32 tag;
|
2012-11-12 16:53:47 +01:00
|
|
|
uptr stack[1]; // [size]
|
|
|
|
|
2019-08-14 10:47:11 +02:00
|
|
|
static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
|
2014-09-23 19:59:53 +02:00
|
|
|
// Lower kTabSizeLog bits are equal for all items in one bucket.
|
|
|
|
// We use these bits to store the per-stack use counter.
|
|
|
|
static const u32 kUseCountBits = kTabSizeLog;
|
|
|
|
static const u32 kMaxUseCount = 1 << kUseCountBits;
|
|
|
|
static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
|
|
|
|
static const u32 kHashMask = ~kUseCountMask;
|
|
|
|
|
2014-11-13 21:41:38 +01:00
|
|
|
typedef StackTrace args_type;
|
2014-09-23 19:59:53 +02:00
|
|
|
bool eq(u32 hash, const args_type &args) const {
|
|
|
|
u32 hash_bits =
|
|
|
|
atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
|
2015-10-21 09:32:45 +02:00
|
|
|
if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
|
|
|
|
return false;
|
2014-09-23 19:59:53 +02:00
|
|
|
uptr i = 0;
|
|
|
|
for (; i < size; i++) {
|
2014-11-13 21:41:38 +01:00
|
|
|
if (stack[i] != args.trace[i]) return false;
|
2014-09-23 19:59:53 +02:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
static uptr storage_size(const args_type &args) {
|
|
|
|
return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
|
|
|
|
}
|
2014-11-13 21:41:38 +01:00
|
|
|
static u32 hash(const args_type &args) {
|
2019-08-14 10:47:11 +02:00
|
|
|
MurMur2HashBuilder H(args.size * sizeof(uptr));
|
|
|
|
for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
|
|
|
|
return H.get();
|
2014-11-13 21:41:38 +01:00
|
|
|
}
|
|
|
|
static bool is_valid(const args_type &args) {
|
|
|
|
return args.size > 0 && args.trace;
|
|
|
|
}
|
2014-09-23 19:59:53 +02:00
|
|
|
void store(const args_type &args, u32 hash) {
|
|
|
|
atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
|
|
|
|
size = args.size;
|
2015-10-21 09:32:45 +02:00
|
|
|
tag = args.tag;
|
2014-11-13 21:41:38 +01:00
|
|
|
internal_memcpy(stack, args.trace, size * sizeof(uptr));
|
2014-09-23 19:59:53 +02:00
|
|
|
}
|
|
|
|
args_type load() const {
|
2015-10-21 09:32:45 +02:00
|
|
|
return args_type(&stack[0], size, tag);
|
2014-09-23 19:59:53 +02:00
|
|
|
}
|
|
|
|
StackDepotHandle get_handle() { return StackDepotHandle(this); }
|
2012-11-12 16:53:47 +01:00
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
typedef StackDepotHandle handle_type;
|
|
|
|
};
|
2013-01-10 13:44:08 +01:00
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
|
2013-01-10 13:44:08 +01:00
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
u32 StackDepotHandle::id() { return node_->id; }
|
|
|
|
int StackDepotHandle::use_count() {
|
|
|
|
return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
|
|
|
|
StackDepotNode::kUseCountMask;
|
2012-11-12 16:53:47 +01:00
|
|
|
}
|
2014-09-23 19:59:53 +02:00
|
|
|
void StackDepotHandle::inc_use_count_unsafe() {
|
|
|
|
u32 prev =
|
|
|
|
atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
|
|
|
|
StackDepotNode::kUseCountMask;
|
|
|
|
CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
|
2012-11-12 16:53:47 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
// FIXME(dvyukov): this single reserved bit is used in TSan.
|
|
|
|
typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
|
|
|
|
StackDepot;
|
|
|
|
static StackDepot theDepot;
|
|
|
|
|
|
|
|
StackDepotStats *StackDepotGetStats() {
|
|
|
|
return theDepot.GetStats();
|
2012-11-12 16:53:47 +01:00
|
|
|
}
|
|
|
|
|
2014-11-13 21:41:38 +01:00
|
|
|
u32 StackDepotPut(StackTrace stack) {
|
|
|
|
StackDepotHandle h = theDepot.Put(stack);
|
2014-09-23 19:59:53 +02:00
|
|
|
return h.valid() ? h.id() : 0;
|
2012-11-12 16:53:47 +01:00
|
|
|
}
|
|
|
|
|
2014-11-13 21:41:38 +01:00
|
|
|
StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
|
|
|
|
return theDepot.Put(stack);
|
2012-11-12 16:53:47 +01:00
|
|
|
}
|
|
|
|
|
2014-11-13 21:41:38 +01:00
|
|
|
StackTrace StackDepotGet(u32 id) {
|
|
|
|
return theDepot.Get(id);
|
2012-11-12 16:53:47 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
void StackDepotLockAll() {
|
|
|
|
theDepot.LockAll();
|
2012-11-12 16:53:47 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
void StackDepotUnlockAll() {
|
|
|
|
theDepot.UnlockAll();
|
2012-11-12 16:53:47 +01:00
|
|
|
}
|
|
|
|
|
2013-11-04 22:33:31 +01:00
|
|
|
bool StackDepotReverseMap::IdDescPair::IdComparator(
|
|
|
|
const StackDepotReverseMap::IdDescPair &a,
|
|
|
|
const StackDepotReverseMap::IdDescPair &b) {
|
|
|
|
return a.id < b.id;
|
|
|
|
}
|
|
|
|
|
2018-10-31 12:14:23 +01:00
|
|
|
StackDepotReverseMap::StackDepotReverseMap() {
|
|
|
|
map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
|
2014-09-23 19:59:53 +02:00
|
|
|
for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
|
|
|
|
atomic_uintptr_t *p = &theDepot.tab[idx];
|
2013-11-04 22:33:31 +01:00
|
|
|
uptr v = atomic_load(p, memory_order_consume);
|
2014-09-23 19:59:53 +02:00
|
|
|
StackDepotNode *s = (StackDepotNode*)(v & ~1);
|
2013-11-04 22:33:31 +01:00
|
|
|
for (; s; s = s->link) {
|
|
|
|
IdDescPair pair = {s->id, s};
|
|
|
|
map_.push_back(pair);
|
|
|
|
}
|
|
|
|
}
|
2018-10-31 12:14:23 +01:00
|
|
|
Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
|
2013-11-04 22:33:31 +01:00
|
|
|
}
|
|
|
|
|
2014-11-13 21:41:38 +01:00
|
|
|
StackTrace StackDepotReverseMap::Get(u32 id) {
|
|
|
|
if (!map_.size())
|
|
|
|
return StackTrace();
|
2015-10-21 09:32:45 +02:00
|
|
|
IdDescPair pair = {id, nullptr};
|
ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch builtins...
* ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch
builtins, store max (log2 (align), 0) into uchar field instead of
align into uptr field.
(ubsan_expand_objsize_ifn): Use _v1 suffixed type mismatch builtins,
store uchar 0 field instead of uptr 0 field.
(instrument_nonnull_return): Use _v1 suffixed nonnull return builtin,
instead of passing one address of struct with 2 locations pass
two addresses of structs with 1 location each.
* sanitizer.def (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH,
BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT,
BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN,
BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT): Removed.
(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1,
BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT,
BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1,
BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1_ABORT): New builtins.
* c-c++-common/ubsan/float-cast-overflow-1.c: Drop value keyword
from expected output regexps.
* c-c++-common/ubsan/float-cast-overflow-2.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-3.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-4.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-5.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-6.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-8.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-9.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-10.c: Likewise.
* g++.dg/ubsan/float-cast-overflow-bf.C: Likewise.
* gcc.dg/ubsan/float-cast-overflow-bf.c: Likewise.
* g++.dg/asan/default-options-1.C (__asan_default_options): Add
used attribute.
* g++.dg/asan/asan_test.C: Run with ASAN_OPTIONS=handle_segv=2
in the environment.
* All source files: Merge from upstream 315899.
* asan/Makefile.am (nodist_saninclude_HEADERS): Add
include/sanitizer/tsan_interface.h.
* asan/libtool-version: Bump the libasan SONAME.
* lsan/Makefile.am (sanitizer_lsan_files): Add lsan_common_mac.cc.
(lsan_files): Add lsan_linux.cc, lsan_mac.cc and lsan_malloc_mac.cc.
* sanitizer_common/Makefile.am (sanitizer_common_files): Add
sancov_flags.cc, sanitizer_allocator_checks.cc,
sanitizer_coverage_libcdep_new.cc, sanitizer_errno.cc,
sanitizer_file.cc, sanitizer_mac_libcdep.cc and
sanitizer_stoptheworld_mac.cc. Remove sanitizer_coverage_libcdep.cc
and sanitizer_coverage_mapping_libcdep.cc.
* tsan/Makefile.am (tsan_files): Add tsan_external.cc.
* ubsan/Makefile.am (DEFS): Add -DUBSAN_CAN_USE_CXXABI=1.
(ubsan_files): Add ubsan_init_standalone.cc and
ubsan_signals_standalone.cc.
* ubsan/libtool-version: Bump the libubsan SONAME.
* asan/Makefile.in: Regenerate.
* lsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.in: Regenerate.
* tsan/Makefile.in: Regenerate.
* ubsan/Makefile.in: Regenerate.
From-SVN: r253887
2017-10-19 13:23:59 +02:00
|
|
|
uptr idx =
|
|
|
|
InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
|
|
|
|
if (idx > map_.size() || map_[idx].id != id)
|
2014-11-13 21:41:38 +01:00
|
|
|
return StackTrace();
|
|
|
|
return map_[idx].desc->load();
|
2013-11-04 22:33:31 +01:00
|
|
|
}
|
|
|
|
|
2015-10-21 09:32:45 +02:00
|
|
|
} // namespace __sanitizer
|