@@ -555,6 +555,188 @@ StackID CurrentStackId(ThreadState *thr, uptr pc) {
555
555
return id;
556
556
}
557
557
558
+ namespace v3 {
559
+
560
+ ALWAYS_INLINE USED bool TryTraceMemoryAccess (ThreadState *thr, uptr pc,
561
+ uptr addr, uptr size,
562
+ AccessType typ) {
563
+ DCHECK (size == 1 || size == 2 || size == 4 || size == 8 );
564
+ if (!kCollectHistory )
565
+ return true ;
566
+ EventAccess *ev;
567
+ if (UNLIKELY (!TraceAcquire (thr, &ev)))
568
+ return false ;
569
+ u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3 ;
570
+ uptr pc_delta = pc - thr->trace_prev_pc + (1 << (EventAccess::kPCBits - 1 ));
571
+ thr->trace_prev_pc = pc;
572
+ if (LIKELY (pc_delta < (1 << EventAccess::kPCBits ))) {
573
+ ev->is_access = 1 ;
574
+ ev->is_read = !!(typ & kAccessRead );
575
+ ev->is_atomic = !!(typ & kAccessAtomic );
576
+ ev->size_log = size_log;
577
+ ev->pc_delta = pc_delta;
578
+ DCHECK_EQ (ev->pc_delta , pc_delta);
579
+ ev->addr = CompressAddr (addr);
580
+ TraceRelease (thr, ev);
581
+ return true ;
582
+ }
583
+ auto *evex = reinterpret_cast <EventAccessExt *>(ev);
584
+ evex->is_access = 0 ;
585
+ evex->is_func = 0 ;
586
+ evex->type = EventType::kAccessExt ;
587
+ evex->is_read = !!(typ & kAccessRead );
588
+ evex->is_atomic = !!(typ & kAccessAtomic );
589
+ evex->size_log = size_log;
590
+ evex->addr = CompressAddr (addr);
591
+ evex->pc = pc;
592
+ TraceRelease (thr, evex);
593
+ return true ;
594
+ }
595
+
596
+ ALWAYS_INLINE USED bool TryTraceMemoryAccessRange (ThreadState *thr, uptr pc,
597
+ uptr addr, uptr size,
598
+ AccessType typ) {
599
+ if (!kCollectHistory )
600
+ return true ;
601
+ EventAccessRange *ev;
602
+ if (UNLIKELY (!TraceAcquire (thr, &ev)))
603
+ return false ;
604
+ thr->trace_prev_pc = pc;
605
+ ev->is_access = 0 ;
606
+ ev->is_func = 0 ;
607
+ ev->type = EventType::kAccessRange ;
608
+ ev->is_read = !!(typ & kAccessRead );
609
+ ev->is_free = !!(typ & kAccessFree );
610
+ ev->size_lo = size;
611
+ ev->pc = CompressAddr (pc);
612
+ ev->addr = CompressAddr (addr);
613
+ ev->size_hi = size >> EventAccessRange::kSizeLoBits ;
614
+ TraceRelease (thr, ev);
615
+ return true ;
616
+ }
617
+
618
+ void TraceMemoryAccessRange (ThreadState *thr, uptr pc, uptr addr, uptr size,
619
+ AccessType typ) {
620
+ if (LIKELY (TryTraceMemoryAccessRange (thr, pc, addr, size, typ)))
621
+ return ;
622
+ TraceSwitchPart (thr);
623
+ UNUSED bool res = TryTraceMemoryAccessRange (thr, pc, addr, size, typ);
624
+ DCHECK (res);
625
+ }
626
+
627
+ void TraceFunc (ThreadState *thr, uptr pc) {
628
+ if (LIKELY (TryTraceFunc (thr, pc)))
629
+ return ;
630
+ TraceSwitchPart (thr);
631
+ UNUSED bool res = TryTraceFunc (thr, pc);
632
+ DCHECK (res);
633
+ }
634
+
635
+ void TraceMutexLock (ThreadState *thr, EventType type, uptr pc, uptr addr,
636
+ StackID stk) {
637
+ DCHECK (type == EventType::kLock || type == EventType::kRLock );
638
+ if (!kCollectHistory )
639
+ return ;
640
+ EventLock ev;
641
+ ev.is_access = 0 ;
642
+ ev.is_func = 0 ;
643
+ ev.type = type;
644
+ ev.pc = CompressAddr (pc);
645
+ ev.stack_lo = stk;
646
+ ev.stack_hi = stk >> EventLock::kStackIDLoBits ;
647
+ ev._ = 0 ;
648
+ ev.addr = CompressAddr (addr);
649
+ TraceEvent (thr, ev);
650
+ }
651
+
652
+ void TraceMutexUnlock (ThreadState *thr, uptr addr) {
653
+ if (!kCollectHistory )
654
+ return ;
655
+ EventUnlock ev;
656
+ ev.is_access = 0 ;
657
+ ev.is_func = 0 ;
658
+ ev.type = EventType::kUnlock ;
659
+ ev._ = 0 ;
660
+ ev.addr = CompressAddr (addr);
661
+ TraceEvent (thr, ev);
662
+ }
663
+
664
+ void TraceTime (ThreadState *thr) {
665
+ if (!kCollectHistory )
666
+ return ;
667
+ EventTime ev;
668
+ ev.is_access = 0 ;
669
+ ev.is_func = 0 ;
670
+ ev.type = EventType::kTime ;
671
+ ev.sid = static_cast <u64 >(thr->sid );
672
+ ev.epoch = static_cast <u64 >(thr->epoch );
673
+ ev._ = 0 ;
674
+ TraceEvent (thr, ev);
675
+ }
676
+
677
+ NOINLINE
678
+ void TraceSwitchPart (ThreadState *thr) {
679
+ Trace *trace = &thr->tctx ->trace ;
680
+ Event *pos = reinterpret_cast <Event *>(atomic_load_relaxed (&thr->trace_pos ));
681
+ DCHECK_EQ (reinterpret_cast <uptr>(pos + 1 ) & TracePart::kAlignment , 0 );
682
+ auto *part = trace->parts .Back ();
683
+ DPrintf (" TraceSwitchPart part=%p pos=%p\n " , part, pos);
684
+ if (part) {
685
+ // We can get here when we still have space in the current trace part.
686
+ // The fast-path check in TraceAcquire has false positives in the middle of
687
+ // the part. Check if we are indeed at the end of the current part or not,
688
+ // and fill any gaps with NopEvent's.
689
+ Event *end = &part->events [TracePart::kSize ];
690
+ DCHECK_GE (pos, &part->events [0 ]);
691
+ DCHECK_LE (pos, end);
692
+ if (pos + 1 < end) {
693
+ if ((reinterpret_cast <uptr>(pos) & TracePart::kAlignment ) ==
694
+ TracePart::kAlignment )
695
+ *pos++ = NopEvent;
696
+ *pos++ = NopEvent;
697
+ DCHECK_LE (pos + 2 , end);
698
+ atomic_store_relaxed (&thr->trace_pos , reinterpret_cast <uptr>(pos));
699
+ // Ensure we setup trace so that the next TraceAcquire
700
+ // won't detect trace part end.
701
+ Event *ev;
702
+ CHECK (TraceAcquire (thr, &ev));
703
+ return ;
704
+ }
705
+ // We are indeed at the end.
706
+ for (; pos < end; pos++) *pos = NopEvent;
707
+ }
708
+ #if !SANITIZER_GO
709
+ if (ctx->after_multithreaded_fork ) {
710
+ // We just need to survive till exec.
711
+ CHECK (part);
712
+ atomic_store_relaxed (&thr->trace_pos ,
713
+ reinterpret_cast <uptr>(&part->events [0 ]));
714
+ return ;
715
+ }
716
+ #endif
717
+ part = new (MmapOrDie (sizeof (TracePart), " TracePart" )) TracePart ();
718
+ part->trace = trace;
719
+ thr->trace_prev_pc = 0 ;
720
+ {
721
+ Lock lock (&trace->mtx );
722
+ trace->parts .PushBack (part);
723
+ atomic_store_relaxed (&thr->trace_pos ,
724
+ reinterpret_cast <uptr>(&part->events [0 ]));
725
+ }
726
+ // Make this part self-sufficient by restoring the current stack
727
+ // and mutex set in the beginning of the trace.
728
+ TraceTime (thr);
729
+ for (uptr *pos = &thr->shadow_stack [0 ]; pos < thr->shadow_stack_pos ; pos++)
730
+ CHECK (TryTraceFunc (thr, *pos));
731
+ for (uptr i = 0 ; i < thr->mset .Size (); i++) {
732
+ MutexSet::Desc d = thr->mset .Get (i);
733
+ TraceMutexLock (thr, d.write ? EventType::kLock : EventType::kRLock , 0 ,
734
+ d.addr , d.stack_id );
735
+ }
736
+ }
737
+
738
+ } // namespace v3
739
+
558
740
void TraceSwitch (ThreadState *thr) {
559
741
#if !SANITIZER_GO
560
742
if (ctx->after_multithreaded_fork )
0 commit comments