2012-11-22 22:03:11 +00:00
|
|
|
//===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-08-14 10:47:11 +02:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-11-22 22:03:11 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#ifndef TSAN_TRACE_H
|
|
|
|
#define TSAN_TRACE_H
|
|
|
|
|
|
|
|
#include "tsan_defs.h"
|
2021-09-27 10:43:33 -07:00
|
|
|
#include "tsan_ilist.h"
|
2013-01-10 12:44:08 +00:00
|
|
|
#include "tsan_mutexset.h"
|
2021-09-27 10:43:33 -07:00
|
|
|
#include "tsan_stack_trace.h"
|
2012-11-22 22:03:11 +00:00
|
|
|
|
|
|
|
namespace __tsan {
|
|
|
|
|
2015-10-21 10:32:45 +03:00
|
|
|
const int kTracePartSizeBits = 13;
|
2012-12-05 13:19:55 +00:00
|
|
|
const int kTracePartSize = 1 << kTracePartSizeBits;
|
2015-10-21 10:32:45 +03:00
|
|
|
const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
|
2012-11-22 22:03:11 +00:00
|
|
|
const int kTraceSize = kTracePartSize * kTraceParts;
|
|
|
|
|
|
|
|
// Must fit into 3 bits.
|
|
|
|
enum EventType {
|
|
|
|
EventTypeMop,
|
|
|
|
EventTypeFuncEnter,
|
|
|
|
EventTypeFuncExit,
|
|
|
|
EventTypeLock,
|
|
|
|
EventTypeUnlock,
|
|
|
|
EventTypeRLock,
|
|
|
|
EventTypeRUnlock
|
|
|
|
};
|
|
|
|
|
|
|
|
// Represents a thread event (from most significant bit):
|
|
|
|
// u64 typ : 3; // EventType.
|
|
|
|
// u64 addr : 61; // Associated pc.
|
|
|
|
typedef u64 Event;
|
|
|
|
|
ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch builtins...
* ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch
builtins, store max (log2 (align), 0) into uchar field instead of
align into uptr field.
(ubsan_expand_objsize_ifn): Use _v1 suffixed type mismatch builtins,
store uchar 0 field instead of uptr 0 field.
(instrument_nonnull_return): Use _v1 suffixed nonnull return builtin,
instead of passing one address of struct with 2 locations pass
two addresses of structs with 1 location each.
* sanitizer.def (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH,
BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT,
BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN,
BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT): Removed.
(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1,
BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT,
BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1,
BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1_ABORT): New builtins.
* c-c++-common/ubsan/float-cast-overflow-1.c: Drop value keyword
from expected output regexps.
* c-c++-common/ubsan/float-cast-overflow-2.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-3.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-4.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-5.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-6.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-8.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-9.c: Likewise.
* c-c++-common/ubsan/float-cast-overflow-10.c: Likewise.
* g++.dg/ubsan/float-cast-overflow-bf.C: Likewise.
* gcc.dg/ubsan/float-cast-overflow-bf.c: Likewise.
* g++.dg/asan/default-options-1.C (__asan_default_options): Add
used attribute.
* g++.dg/asan/asan_test.C: Run with ASAN_OPTIONS=handle_segv=2
in the environment.
* All source files: Merge from upstream 315899.
* asan/Makefile.am (nodist_saninclude_HEADERS): Add
include/sanitizer/tsan_interface.h.
* asan/libtool-version: Bump the libasan SONAME.
* lsan/Makefile.am (sanitizer_lsan_files): Add lsan_common_mac.cc.
(lsan_files): Add lsan_linux.cc, lsan_mac.cc and lsan_malloc_mac.cc.
* sanitizer_common/Makefile.am (sanitizer_common_files): Add
sancov_flags.cc, sanitizer_allocator_checks.cc,
sanitizer_coverage_libcdep_new.cc, sanitizer_errno.cc,
sanitizer_file.cc, sanitizer_mac_libcdep.cc and
sanitizer_stoptheworld_mac.cc. Remove sanitizer_coverage_libcdep.cc
and sanitizer_coverage_mapping_libcdep.cc.
* tsan/Makefile.am (tsan_files): Add tsan_external.cc.
* ubsan/Makefile.am (DEFS): Add -DUBSAN_CAN_USE_CXXABI=1.
(ubsan_files): Add ubsan_init_standalone.cc and
ubsan_signals_standalone.cc.
* ubsan/libtool-version: Bump the libubsan SONAME.
* asan/Makefile.in: Regenerate.
* lsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.in: Regenerate.
* tsan/Makefile.in: Regenerate.
* ubsan/Makefile.in: Regenerate.
From-SVN: r253887
2017-10-19 13:23:59 +02:00
|
|
|
const uptr kEventPCBits = 61;
|
|
|
|
|
2012-11-22 22:03:11 +00:00
|
|
|
struct TraceHeader {
|
2016-11-08 22:04:09 +00:00
|
|
|
#if !SANITIZER_GO
|
2014-11-13 20:41:38 +00:00
|
|
|
BufferedStackTrace stack0; // Start stack for the trace.
|
2012-11-22 22:03:11 +00:00
|
|
|
#else
|
2014-11-13 20:41:38 +00:00
|
|
|
VarSizeStackTrace stack0;
|
2012-11-22 22:03:11 +00:00
|
|
|
#endif
|
2014-11-13 20:41:38 +00:00
|
|
|
u64 epoch0; // Start epoch for the trace.
|
|
|
|
MutexSet mset0;
|
|
|
|
|
|
|
|
TraceHeader() : stack0(), epoch0() {}
|
2012-11-22 22:03:11 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct Trace {
|
|
|
|
Mutex mtx;
|
2016-11-08 22:04:09 +00:00
|
|
|
#if !SANITIZER_GO
|
2013-12-05 09:18:38 +00:00
|
|
|
// Must be last to catch overflow as paging fault.
|
|
|
|
// Go shadow stack is dynamically allocated.
|
|
|
|
uptr shadow_stack[kShadowStackSize];
|
|
|
|
#endif
|
2015-10-21 10:32:45 +03:00
|
|
|
// Must be the last field, because we unmap the unused part in
|
|
|
|
// CreateThreadContext.
|
|
|
|
TraceHeader headers[kTraceParts];
|
2012-11-22 22:03:11 +00:00
|
|
|
|
2021-07-20 10:44:37 -07:00
|
|
|
Trace() : mtx(MutexTypeTrace) {}
|
2012-11-22 22:03:11 +00:00
|
|
|
};
|
|
|
|
|
2021-09-27 10:43:33 -07:00
|
|
|
namespace v3 {
|
|
|
|
|
|
|
|
enum class EventType : u64 {
|
|
|
|
kAccessExt,
|
|
|
|
kAccessRange,
|
|
|
|
kLock,
|
|
|
|
kRLock,
|
|
|
|
kUnlock,
|
|
|
|
kTime,
|
|
|
|
};
|
|
|
|
|
|
|
|
// "Base" type for all events for type dispatch.
|
|
|
|
struct Event {
|
|
|
|
// We use variable-length type encoding to give more bits to some event
|
|
|
|
// types that need them. If is_access is set, this is EventAccess.
|
|
|
|
// Otherwise, if is_func is set, this is EventFunc.
|
|
|
|
// Otherwise type denotes the type.
|
|
|
|
u64 is_access : 1;
|
|
|
|
u64 is_func : 1;
|
|
|
|
EventType type : 3;
|
|
|
|
u64 _ : 59;
|
|
|
|
};
|
|
|
|
static_assert(sizeof(Event) == 8, "bad Event size");
|
|
|
|
|
|
|
|
// Nop event used as padding and does not affect state during replay.
|
|
|
|
static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
|
|
|
|
|
|
|
|
// Compressed memory access can represent only some events with PCs
|
|
|
|
// close enough to each other. Otherwise we fall back to EventAccessExt.
|
|
|
|
struct EventAccess {
|
|
|
|
static constexpr uptr kPCBits = 15;
|
|
|
|
|
|
|
|
u64 is_access : 1; // = 1
|
|
|
|
u64 is_read : 1;
|
|
|
|
u64 is_atomic : 1;
|
|
|
|
u64 size_log : 2;
|
|
|
|
u64 pc_delta : kPCBits; // signed delta from the previous memory access PC
|
|
|
|
u64 addr : kCompressedAddrBits;
|
|
|
|
};
|
|
|
|
static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
|
|
|
|
|
|
|
|
// Function entry (pc != 0) or exit (pc == 0).
|
|
|
|
struct EventFunc {
|
|
|
|
u64 is_access : 1; // = 0
|
|
|
|
u64 is_func : 1; // = 1
|
|
|
|
u64 pc : 62;
|
|
|
|
};
|
|
|
|
static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
|
|
|
|
|
|
|
|
// Extended memory access with full PC.
|
|
|
|
struct EventAccessExt {
|
|
|
|
u64 is_access : 1; // = 0
|
|
|
|
u64 is_func : 1; // = 0
|
|
|
|
EventType type : 3; // = EventType::kAccessExt
|
|
|
|
u64 is_read : 1;
|
|
|
|
u64 is_atomic : 1;
|
|
|
|
u64 size_log : 2;
|
|
|
|
u64 _ : 11;
|
|
|
|
u64 addr : kCompressedAddrBits;
|
|
|
|
u64 pc;
|
|
|
|
};
|
|
|
|
static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
|
|
|
|
|
|
|
|
// Access to a memory range.
|
|
|
|
struct EventAccessRange {
|
|
|
|
static constexpr uptr kSizeLoBits = 13;
|
|
|
|
|
|
|
|
u64 is_access : 1; // = 0
|
|
|
|
u64 is_func : 1; // = 0
|
|
|
|
EventType type : 3; // = EventType::kAccessRange
|
|
|
|
u64 is_read : 1;
|
|
|
|
u64 is_free : 1;
|
|
|
|
u64 size_lo : kSizeLoBits;
|
|
|
|
u64 pc : kCompressedAddrBits;
|
|
|
|
u64 addr : kCompressedAddrBits;
|
|
|
|
u64 size_hi : 64 - kCompressedAddrBits;
|
|
|
|
};
|
|
|
|
static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
|
|
|
|
|
|
|
|
// Mutex lock.
|
|
|
|
struct EventLock {
|
|
|
|
static constexpr uptr kStackIDLoBits = 15;
|
|
|
|
|
|
|
|
u64 is_access : 1; // = 0
|
|
|
|
u64 is_func : 1; // = 0
|
|
|
|
EventType type : 3; // = EventType::kLock or EventType::kRLock
|
|
|
|
u64 pc : kCompressedAddrBits;
|
|
|
|
u64 stack_lo : kStackIDLoBits;
|
|
|
|
u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
|
|
|
|
u64 _ : 3;
|
|
|
|
u64 addr : kCompressedAddrBits;
|
|
|
|
};
|
|
|
|
static_assert(sizeof(EventLock) == 16, "bad EventLock size");
|
|
|
|
|
|
|
|
// Mutex unlock.
|
|
|
|
struct EventUnlock {
|
|
|
|
u64 is_access : 1; // = 0
|
|
|
|
u64 is_func : 1; // = 0
|
|
|
|
EventType type : 3; // = EventType::kUnlock
|
|
|
|
u64 _ : 15;
|
|
|
|
u64 addr : kCompressedAddrBits;
|
|
|
|
};
|
|
|
|
static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
|
|
|
|
|
|
|
|
// Time change event.
|
|
|
|
struct EventTime {
|
|
|
|
u64 is_access : 1; // = 0
|
|
|
|
u64 is_func : 1; // = 0
|
|
|
|
EventType type : 3; // = EventType::kTime
|
|
|
|
u64 sid : sizeof(Sid) * kByteBits;
|
|
|
|
u64 epoch : kEpochBits;
|
|
|
|
u64 _ : 64 - 5 - sizeof(Sid) * kByteBits - kEpochBits;
|
|
|
|
};
|
|
|
|
static_assert(sizeof(EventTime) == 8, "bad EventTime size");
|
|
|
|
|
|
|
|
struct Trace;
|
|
|
|
|
|
|
|
struct TraceHeader {
|
|
|
|
Trace* trace = nullptr; // back-pointer to Trace containing this part
|
|
|
|
INode trace_parts; // in Trace::parts
|
|
|
|
};
|
|
|
|
|
|
|
|
struct TracePart : TraceHeader {
|
|
|
|
static constexpr uptr kByteSize = 256 << 10;
|
|
|
|
static constexpr uptr kSize =
|
|
|
|
(kByteSize - sizeof(TraceHeader)) / sizeof(Event);
|
|
|
|
// TraceAcquire does a fast event pointer overflow check by comparing
|
|
|
|
// pointer into TracePart::events with kAlignment mask. Since TracePart's
|
|
|
|
// are allocated page-aligned, this check detects end of the array
|
|
|
|
// (it also have false positives in the middle that are filtered separately).
|
|
|
|
// This also requires events to be the last field.
|
|
|
|
static constexpr uptr kAlignment = 0xff0;
|
|
|
|
Event events[kSize];
|
|
|
|
|
|
|
|
TracePart() {}
|
|
|
|
};
|
|
|
|
static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
|
|
|
|
|
|
|
|
struct Trace {
|
|
|
|
Mutex mtx;
|
|
|
|
IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
|
|
|
|
Event* final_pos =
|
|
|
|
nullptr; // final position in the last part for finished threads
|
|
|
|
|
|
|
|
Trace() : mtx(MutexTypeTrace) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace v3
|
|
|
|
|
2012-11-22 22:03:11 +00:00
|
|
|
} // namespace __tsan
|
|
|
|
|
|
|
|
#endif // TSAN_TRACE_H
|