Skip to content

Commit 5a2c311

Browse files
committed
[TSAN] Add optional support for distinguishing volatiles
Add support to optionally emit different instrumentation for accesses to volatile variables. While the default TSAN runtime likely will never require this feature, other runtimes for different environments that have subtly different memory models or assumptions may require distinguishing volatiles. One such environment are OS kernels, where volatile is still used in various places for various reasons, and often declare volatile to be "safe enough" even in multi-threaded contexts. One such example is the Linux kernel, which implements various synchronization primitives using volatile (READ_ONCE(), WRITE_ONCE()). Here the Kernel Concurrency Sanitizer (KCSAN) [1], is a runtime that uses TSAN instrumentation but otherwise implements a very different approach to race detection from TSAN. While in the Linux kernel it is generally discouraged to use volatiles explicitly, the topic will likely come up again, and we will eventually need to distinguish volatile accesses [2]. The other use-case is ignoring data races on specially marked variables in the kernel, for example bit-flags (here we may hide 'volatile' behind a different name such as 'no_data_race'). [1] https://github.com/google/ktsan/wiki/KCSAN [2] https://lkml.kernel.org/r/CANpmjNOfXNE-Zh3MNP=-gmnhvKbsfUfTtWkyg_=VqTxS4nnptQ@mail.gmail.com Author: melver (Marco Elver) Reviewed-in: https://reviews.llvm.org/D78554
1 parent ec16df7 commit 5a2c311

File tree

2 files changed

+216
-4
lines changed

2 files changed

+216
-4
lines changed

llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp

Lines changed: 41 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,10 @@ static cl::opt<bool> ClInstrumentAtomics(
6868
static cl::opt<bool> ClInstrumentMemIntrinsics(
6969
"tsan-instrument-memintrinsics", cl::init(true),
7070
cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
71+
static cl::opt<bool> ClDistinguishVolatile(
72+
"tsan-distinguish-volatile", cl::init(false),
73+
cl::desc("Emit special instrumentation for accesses to volatiles"),
74+
cl::Hidden);
7175

7276
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
7377
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
@@ -118,6 +122,10 @@ struct ThreadSanitizer {
118122
FunctionCallee TsanWrite[kNumberOfAccessSizes];
119123
FunctionCallee TsanUnalignedRead[kNumberOfAccessSizes];
120124
FunctionCallee TsanUnalignedWrite[kNumberOfAccessSizes];
125+
FunctionCallee TsanVolatileRead[kNumberOfAccessSizes];
126+
FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes];
127+
FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes];
128+
FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes];
121129
FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes];
122130
FunctionCallee TsanAtomicStore[kNumberOfAccessSizes];
123131
FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1]
@@ -236,6 +244,24 @@ void ThreadSanitizer::initialize(Module &M) {
236244
TsanUnalignedWrite[i] = M.getOrInsertFunction(
237245
UnalignedWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
238246

247+
SmallString<64> VolatileReadName("__tsan_volatile_read" + ByteSizeStr);
248+
TsanVolatileRead[i] = M.getOrInsertFunction(
249+
VolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
250+
251+
SmallString<64> VolatileWriteName("__tsan_volatile_write" + ByteSizeStr);
252+
TsanVolatileWrite[i] = M.getOrInsertFunction(
253+
VolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
254+
255+
SmallString<64> UnalignedVolatileReadName("__tsan_unaligned_volatile_read" +
256+
ByteSizeStr);
257+
TsanUnalignedVolatileRead[i] = M.getOrInsertFunction(
258+
UnalignedVolatileReadName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
259+
260+
SmallString<64> UnalignedVolatileWriteName(
261+
"__tsan_unaligned_volatile_write" + ByteSizeStr);
262+
TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction(
263+
UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy());
264+
239265
Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
240266
Type *PtrTy = Ty->getPointerTo();
241267
SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load");
@@ -565,13 +591,24 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
565591
const unsigned Alignment = IsWrite
566592
? cast<StoreInst>(I)->getAlignment()
567593
: cast<LoadInst>(I)->getAlignment();
594+
const bool IsVolatile =
595+
ClDistinguishVolatile && (IsWrite ? cast<StoreInst>(I)->isVolatile()
596+
: cast<LoadInst>(I)->isVolatile());
568597
Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
569598
const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
570599
FunctionCallee OnAccessFunc = nullptr;
571-
if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0)
572-
OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
573-
else
574-
OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
600+
if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) {
601+
if (IsVolatile)
602+
OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx];
603+
else
604+
OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
605+
} else {
606+
if (IsVolatile)
607+
OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx]
608+
: TsanUnalignedVolatileRead[Idx];
609+
else
610+
OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
611+
}
575612
IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
576613
if (IsWrite) NumInstrumentedWrites++;
577614
else NumInstrumentedReads++;
Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
; RUN: opt < %s -tsan -tsan-distinguish-volatile -S | FileCheck %s
2+
3+
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4+
5+
define i16 @test_volatile_read2(i16* %a) sanitize_thread {
6+
entry:
7+
%tmp1 = load volatile i16, i16* %a, align 2
8+
ret i16 %tmp1
9+
}
10+
11+
; CHECK-LABEL: define i16 @test_volatile_read2(i16* %a)
12+
; CHECK: call void @__tsan_func_entry(i8* %0)
13+
; CHECK-NEXT: %1 = bitcast i16* %a to i8*
14+
; CHECK-NEXT: call void @__tsan_volatile_read2(i8* %1)
15+
; CHECK-NEXT: %tmp1 = load volatile i16, i16* %a, align 2
16+
; CHECK-NEXT: call void @__tsan_func_exit()
17+
; CHECK: ret i16
18+
19+
define i32 @test_volatile_read4(i32* %a) sanitize_thread {
20+
entry:
21+
%tmp1 = load volatile i32, i32* %a, align 4
22+
ret i32 %tmp1
23+
}
24+
25+
; CHECK-LABEL: define i32 @test_volatile_read4(i32* %a)
26+
; CHECK: call void @__tsan_func_entry(i8* %0)
27+
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
28+
; CHECK-NEXT: call void @__tsan_volatile_read4(i8* %1)
29+
; CHECK-NEXT: %tmp1 = load volatile i32, i32* %a, align 4
30+
; CHECK-NEXT: call void @__tsan_func_exit()
31+
; CHECK: ret i32
32+
33+
define i64 @test_volatile_read8(i64* %a) sanitize_thread {
34+
entry:
35+
%tmp1 = load volatile i64, i64* %a, align 8
36+
ret i64 %tmp1
37+
}
38+
39+
; CHECK-LABEL: define i64 @test_volatile_read8(i64* %a)
40+
; CHECK: call void @__tsan_func_entry(i8* %0)
41+
; CHECK-NEXT: %1 = bitcast i64* %a to i8*
42+
; CHECK-NEXT: call void @__tsan_volatile_read8(i8* %1)
43+
; CHECK-NEXT: %tmp1 = load volatile i64, i64* %a, align 8
44+
; CHECK-NEXT: call void @__tsan_func_exit()
45+
; CHECK: ret i64
46+
47+
define i128 @test_volatile_read16(i128* %a) sanitize_thread {
48+
entry:
49+
%tmp1 = load volatile i128, i128* %a, align 16
50+
ret i128 %tmp1
51+
}
52+
53+
; CHECK-LABEL: define i128 @test_volatile_read16(i128* %a)
54+
; CHECK: call void @__tsan_func_entry(i8* %0)
55+
; CHECK-NEXT: %1 = bitcast i128* %a to i8*
56+
; CHECK-NEXT: call void @__tsan_volatile_read16(i8* %1)
57+
; CHECK-NEXT: %tmp1 = load volatile i128, i128* %a, align 16
58+
; CHECK-NEXT: call void @__tsan_func_exit()
59+
; CHECK: ret i128
60+
61+
define void @test_volatile_write2(i16* %a) sanitize_thread {
62+
entry:
63+
store volatile i16 1, i16* %a, align 2
64+
ret void
65+
}
66+
67+
; CHECK-LABEL: define void @test_volatile_write2(i16* %a)
68+
; CHECK: call void @__tsan_func_entry(i8* %0)
69+
; CHECK-NEXT: %1 = bitcast i16* %a to i8*
70+
; CHECK-NEXT: call void @__tsan_volatile_write2(i8* %1)
71+
; CHECK-NEXT: store volatile i16 1, i16* %a, align 2
72+
; CHECK-NEXT: call void @__tsan_func_exit()
73+
; CHECK: ret void
74+
75+
define void @test_volatile_write4(i32* %a) sanitize_thread {
76+
entry:
77+
store volatile i32 1, i32* %a, align 4
78+
ret void
79+
}
80+
81+
; CHECK-LABEL: define void @test_volatile_write4(i32* %a)
82+
; CHECK: call void @__tsan_func_entry(i8* %0)
83+
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
84+
; CHECK-NEXT: call void @__tsan_volatile_write4(i8* %1)
85+
; CHECK-NEXT: store volatile i32 1, i32* %a, align 4
86+
; CHECK-NEXT: call void @__tsan_func_exit()
87+
; CHECK: ret void
88+
89+
define void @test_volatile_write8(i64* %a) sanitize_thread {
90+
entry:
91+
store volatile i64 1, i64* %a, align 8
92+
ret void
93+
}
94+
95+
; CHECK-LABEL: define void @test_volatile_write8(i64* %a)
96+
; CHECK: call void @__tsan_func_entry(i8* %0)
97+
; CHECK-NEXT: %1 = bitcast i64* %a to i8*
98+
; CHECK-NEXT: call void @__tsan_volatile_write8(i8* %1)
99+
; CHECK-NEXT: store volatile i64 1, i64* %a, align 8
100+
; CHECK-NEXT: call void @__tsan_func_exit()
101+
; CHECK: ret void
102+
103+
define void @test_volatile_write16(i128* %a) sanitize_thread {
104+
entry:
105+
store volatile i128 1, i128* %a, align 16
106+
ret void
107+
}
108+
109+
; CHECK-LABEL: define void @test_volatile_write16(i128* %a)
110+
; CHECK: call void @__tsan_func_entry(i8* %0)
111+
; CHECK-NEXT: %1 = bitcast i128* %a to i8*
112+
; CHECK-NEXT: call void @__tsan_volatile_write16(i8* %1)
113+
; CHECK-NEXT: store volatile i128 1, i128* %a, align 16
114+
; CHECK-NEXT: call void @__tsan_func_exit()
115+
; CHECK: ret void
116+
117+
; Check unaligned volatile accesses
118+
119+
define i32 @test_unaligned_read4(i32* %a) sanitize_thread {
120+
entry:
121+
%tmp1 = load volatile i32, i32* %a, align 2
122+
ret i32 %tmp1
123+
}
124+
125+
; CHECK-LABEL: define i32 @test_unaligned_read4(i32* %a)
126+
; CHECK: call void @__tsan_func_entry(i8* %0)
127+
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
128+
; CHECK-NEXT: call void @__tsan_unaligned_volatile_read4(i8* %1)
129+
; CHECK-NEXT: %tmp1 = load volatile i32, i32* %a, align 2
130+
; CHECK-NEXT: call void @__tsan_func_exit()
131+
; CHECK: ret i32
132+
133+
define void @test_unaligned_write4(i32* %a) sanitize_thread {
134+
entry:
135+
store volatile i32 1, i32* %a, align 1
136+
ret void
137+
}
138+
139+
; CHECK-LABEL: define void @test_unaligned_write4(i32* %a)
140+
; CHECK: call void @__tsan_func_entry(i8* %0)
141+
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
142+
; CHECK-NEXT: call void @__tsan_unaligned_volatile_write4(i8* %1)
143+
; CHECK-NEXT: store volatile i32 1, i32* %a, align 1
144+
; CHECK-NEXT: call void @__tsan_func_exit()
145+
; CHECK: ret void
146+
147+
; Check that regular aligned accesses are unaffected
148+
149+
define i32 @test_read4(i32* %a) sanitize_thread {
150+
entry:
151+
%tmp1 = load i32, i32* %a, align 4
152+
ret i32 %tmp1
153+
}
154+
155+
; CHECK-LABEL: define i32 @test_read4(i32* %a)
156+
; CHECK: call void @__tsan_func_entry(i8* %0)
157+
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
158+
; CHECK-NEXT: call void @__tsan_read4(i8* %1)
159+
; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 4
160+
; CHECK-NEXT: call void @__tsan_func_exit()
161+
; CHECK: ret i32
162+
163+
define void @test_write4(i32* %a) sanitize_thread {
164+
entry:
165+
store i32 1, i32* %a, align 4
166+
ret void
167+
}
168+
169+
; CHECK-LABEL: define void @test_write4(i32* %a)
170+
; CHECK: call void @__tsan_func_entry(i8* %0)
171+
; CHECK-NEXT: %1 = bitcast i32* %a to i8*
172+
; CHECK-NEXT: call void @__tsan_write4(i8* %1)
173+
; CHECK-NEXT: store i32 1, i32* %a, align 4
174+
; CHECK-NEXT: call void @__tsan_func_exit()
175+
; CHECK: ret void

0 commit comments

Comments
 (0)