Skip to content

Commit c973189

Browse files
committed
tsan: add new trace
Add structures for the new trace format, functions that serialize and add events to the trace and trace replaying logic. Differential Revision: https://reviews.llvm.org/D107911
1 parent f7347df commit c973189

File tree

8 files changed

+868
-1
lines changed

8 files changed

+868
-1
lines changed

compiler-rt/lib/tsan/rtl/tsan_defs.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,13 +51,18 @@ typedef __m128i m128;
5151

5252
namespace __tsan {
5353

54+
constexpr uptr kByteBits = 8;
55+
5456
// Thread slot ID.
5557
enum class Sid : u8 {};
5658
constexpr uptr kThreadSlotCount = 256;
59+
constexpr Sid kFreeSid = static_cast<Sid>(255);
5760

5861
// Abstract time unit, vector clock element.
5962
enum class Epoch : u16 {};
63+
constexpr uptr kEpochBits = 14;
6064
constexpr Epoch kEpochZero = static_cast<Epoch>(0);
65+
constexpr Epoch kEpochOver = static_cast<Epoch>(1 << kEpochBits);
6166

6267
const int kClkBits = 42;
6368
const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;

compiler-rt/lib/tsan/rtl/tsan_rtl.cpp

Lines changed: 182 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -555,6 +555,188 @@ StackID CurrentStackId(ThreadState *thr, uptr pc) {
555555
return id;
556556
}
557557

558+
namespace v3 {
559+
560+
ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState *thr, uptr pc,
561+
uptr addr, uptr size,
562+
AccessType typ) {
563+
DCHECK(size == 1 || size == 2 || size == 4 || size == 8);
564+
if (!kCollectHistory)
565+
return true;
566+
EventAccess *ev;
567+
if (UNLIKELY(!TraceAcquire(thr, &ev)))
568+
return false;
569+
u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
570+
uptr pc_delta = pc - thr->trace_prev_pc + (1 << (EventAccess::kPCBits - 1));
571+
thr->trace_prev_pc = pc;
572+
if (LIKELY(pc_delta < (1 << EventAccess::kPCBits))) {
573+
ev->is_access = 1;
574+
ev->is_read = !!(typ & kAccessRead);
575+
ev->is_atomic = !!(typ & kAccessAtomic);
576+
ev->size_log = size_log;
577+
ev->pc_delta = pc_delta;
578+
DCHECK_EQ(ev->pc_delta, pc_delta);
579+
ev->addr = CompressAddr(addr);
580+
TraceRelease(thr, ev);
581+
return true;
582+
}
583+
auto *evex = reinterpret_cast<EventAccessExt *>(ev);
584+
evex->is_access = 0;
585+
evex->is_func = 0;
586+
evex->type = EventType::kAccessExt;
587+
evex->is_read = !!(typ & kAccessRead);
588+
evex->is_atomic = !!(typ & kAccessAtomic);
589+
evex->size_log = size_log;
590+
evex->addr = CompressAddr(addr);
591+
evex->pc = pc;
592+
TraceRelease(thr, evex);
593+
return true;
594+
}
595+
596+
ALWAYS_INLINE USED bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc,
597+
uptr addr, uptr size,
598+
AccessType typ) {
599+
if (!kCollectHistory)
600+
return true;
601+
EventAccessRange *ev;
602+
if (UNLIKELY(!TraceAcquire(thr, &ev)))
603+
return false;
604+
thr->trace_prev_pc = pc;
605+
ev->is_access = 0;
606+
ev->is_func = 0;
607+
ev->type = EventType::kAccessRange;
608+
ev->is_read = !!(typ & kAccessRead);
609+
ev->is_free = !!(typ & kAccessFree);
610+
ev->size_lo = size;
611+
ev->pc = CompressAddr(pc);
612+
ev->addr = CompressAddr(addr);
613+
ev->size_hi = size >> EventAccessRange::kSizeLoBits;
614+
TraceRelease(thr, ev);
615+
return true;
616+
}
617+
618+
void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
619+
AccessType typ) {
620+
if (LIKELY(TryTraceMemoryAccessRange(thr, pc, addr, size, typ)))
621+
return;
622+
TraceSwitchPart(thr);
623+
UNUSED bool res = TryTraceMemoryAccessRange(thr, pc, addr, size, typ);
624+
DCHECK(res);
625+
}
626+
627+
void TraceFunc(ThreadState *thr, uptr pc) {
628+
if (LIKELY(TryTraceFunc(thr, pc)))
629+
return;
630+
TraceSwitchPart(thr);
631+
UNUSED bool res = TryTraceFunc(thr, pc);
632+
DCHECK(res);
633+
}
634+
635+
void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
636+
StackID stk) {
637+
DCHECK(type == EventType::kLock || type == EventType::kRLock);
638+
if (!kCollectHistory)
639+
return;
640+
EventLock ev;
641+
ev.is_access = 0;
642+
ev.is_func = 0;
643+
ev.type = type;
644+
ev.pc = CompressAddr(pc);
645+
ev.stack_lo = stk;
646+
ev.stack_hi = stk >> EventLock::kStackIDLoBits;
647+
ev._ = 0;
648+
ev.addr = CompressAddr(addr);
649+
TraceEvent(thr, ev);
650+
}
651+
652+
void TraceMutexUnlock(ThreadState *thr, uptr addr) {
653+
if (!kCollectHistory)
654+
return;
655+
EventUnlock ev;
656+
ev.is_access = 0;
657+
ev.is_func = 0;
658+
ev.type = EventType::kUnlock;
659+
ev._ = 0;
660+
ev.addr = CompressAddr(addr);
661+
TraceEvent(thr, ev);
662+
}
663+
664+
void TraceTime(ThreadState *thr) {
665+
if (!kCollectHistory)
666+
return;
667+
EventTime ev;
668+
ev.is_access = 0;
669+
ev.is_func = 0;
670+
ev.type = EventType::kTime;
671+
ev.sid = static_cast<u64>(thr->sid);
672+
ev.epoch = static_cast<u64>(thr->epoch);
673+
ev._ = 0;
674+
TraceEvent(thr, ev);
675+
}
676+
677+
NOINLINE
678+
void TraceSwitchPart(ThreadState *thr) {
679+
Trace *trace = &thr->tctx->trace;
680+
Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
681+
DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
682+
auto *part = trace->parts.Back();
683+
DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos);
684+
if (part) {
685+
// We can get here when we still have space in the current trace part.
686+
// The fast-path check in TraceAcquire has false positives in the middle of
687+
// the part. Check if we are indeed at the end of the current part or not,
688+
// and fill any gaps with NopEvent's.
689+
Event *end = &part->events[TracePart::kSize];
690+
DCHECK_GE(pos, &part->events[0]);
691+
DCHECK_LE(pos, end);
692+
if (pos + 1 < end) {
693+
if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
694+
TracePart::kAlignment)
695+
*pos++ = NopEvent;
696+
*pos++ = NopEvent;
697+
DCHECK_LE(pos + 2, end);
698+
atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
699+
// Ensure we setup trace so that the next TraceAcquire
700+
// won't detect trace part end.
701+
Event *ev;
702+
CHECK(TraceAcquire(thr, &ev));
703+
return;
704+
}
705+
// We are indeed at the end.
706+
for (; pos < end; pos++) *pos = NopEvent;
707+
}
708+
#if !SANITIZER_GO
709+
if (ctx->after_multithreaded_fork) {
710+
// We just need to survive till exec.
711+
CHECK(part);
712+
atomic_store_relaxed(&thr->trace_pos,
713+
reinterpret_cast<uptr>(&part->events[0]));
714+
return;
715+
}
716+
#endif
717+
part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart();
718+
part->trace = trace;
719+
thr->trace_prev_pc = 0;
720+
{
721+
Lock lock(&trace->mtx);
722+
trace->parts.PushBack(part);
723+
atomic_store_relaxed(&thr->trace_pos,
724+
reinterpret_cast<uptr>(&part->events[0]));
725+
}
726+
// Make this part self-sufficient by restoring the current stack
727+
// and mutex set in the beginning of the trace.
728+
TraceTime(thr);
729+
for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++)
730+
CHECK(TryTraceFunc(thr, *pos));
731+
for (uptr i = 0; i < thr->mset.Size(); i++) {
732+
MutexSet::Desc d = thr->mset.Get(i);
733+
TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
734+
d.addr, d.stack_id);
735+
}
736+
}
737+
738+
} // namespace v3
739+
558740
void TraceSwitch(ThreadState *thr) {
559741
#if !SANITIZER_GO
560742
if (ctx->after_multithreaded_fork)

compiler-rt/lib/tsan/rtl/tsan_rtl.h

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -444,6 +444,13 @@ struct ThreadState {
444444

445445
const ReportDesc *current_report;
446446

447+
// Current position in tctx->trace.Back()->events (Event*).
448+
atomic_uintptr_t trace_pos;
449+
// PC of the last memory access, used to compute PC deltas in the trace.
450+
uptr trace_prev_pc;
451+
Sid sid;
452+
Epoch epoch;
453+
447454
explicit ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
448455
unsigned reuse_count, uptr stk_addr, uptr stk_size,
449456
uptr tls_addr, uptr tls_size);
@@ -486,6 +493,8 @@ class ThreadContext final : public ThreadContextBase {
486493
u64 epoch0;
487494
u64 epoch1;
488495

496+
v3::Trace trace;
497+
489498
// Override superclass callbacks.
490499
void OnDead() override;
491500
void OnJoined(void *arg) override;
@@ -549,6 +558,8 @@ struct Context {
549558
ClockAlloc clock_alloc;
550559

551560
Flags flags;
561+
562+
Mutex slot_mtx;
552563
};
553564

554565
extern Context *ctx; // The one and the only global runtime context.
@@ -892,6 +903,88 @@ void LazyInitialize(ThreadState *thr) {
892903
#endif
893904
}
894905

906+
namespace v3 {
907+
908+
void TraceSwitchPart(ThreadState *thr);
909+
bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
910+
uptr size, AccessType typ, VarSizeStackTrace *pstk,
911+
MutexSet *pmset, uptr *ptag);
912+
913+
template <typename EventT>
914+
ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
915+
EventT **ev) {
916+
Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
917+
#if SANITIZER_DEBUG
918+
// TraceSwitch acquires these mutexes,
919+
// so we lock them here to detect deadlocks more reliably.
920+
{ Lock lock(&ctx->slot_mtx); }
921+
{ Lock lock(&thr->tctx->trace.mtx); }
922+
TracePart *current = thr->tctx->trace.parts.Back();
923+
if (current) {
924+
DCHECK_GE(pos, &current->events[0]);
925+
DCHECK_LE(pos, &current->events[TracePart::kSize]);
926+
} else {
927+
DCHECK_EQ(pos, nullptr);
928+
}
929+
#endif
930+
// TracePart is allocated with mmap and is at least 4K aligned.
931+
// So the following check is a faster way to check for part end.
932+
// It may have false positives in the middle of the trace,
933+
// they are filtered out in TraceSwitch.
934+
if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
935+
return false;
936+
*ev = reinterpret_cast<EventT *>(pos);
937+
return true;
938+
}
939+
940+
template <typename EventT>
941+
ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
942+
DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
943+
atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
944+
}
945+
946+
template <typename EventT>
947+
void TraceEvent(ThreadState *thr, EventT ev) {
948+
EventT *evp;
949+
if (!TraceAcquire(thr, &evp)) {
950+
TraceSwitchPart(thr);
951+
UNUSED bool res = TraceAcquire(thr, &evp);
952+
DCHECK(res);
953+
}
954+
*evp = ev;
955+
TraceRelease(thr, evp);
956+
}
957+
958+
ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
959+
uptr pc = 0) {
960+
if (!kCollectHistory)
961+
return true;
962+
EventFunc *ev;
963+
if (UNLIKELY(!TraceAcquire(thr, &ev)))
964+
return false;
965+
ev->is_access = 0;
966+
ev->is_func = 1;
967+
ev->pc = pc;
968+
TraceRelease(thr, ev);
969+
return true;
970+
}
971+
972+
WARN_UNUSED_RESULT
973+
bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
974+
AccessType typ);
975+
WARN_UNUSED_RESULT
976+
bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
977+
AccessType typ);
978+
void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
979+
AccessType typ);
980+
void TraceFunc(ThreadState *thr, uptr pc = 0);
981+
void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
982+
StackID stk);
983+
void TraceMutexUnlock(ThreadState *thr, uptr addr);
984+
void TraceTime(ThreadState *thr);
985+
986+
} // namespace v3
987+
895988
} // namespace __tsan
896989

897990
#endif // TSAN_RTL_H

0 commit comments

Comments
 (0)