Skip to content

Commit 831910c

Browse files
committed
tsan: new MemoryAccess interface
Currently we have MemoryAccess function that accepts "bool kAccessIsWrite, bool kIsAtomic" and 4 wrappers: MemoryRead/MemoryWrite/MemoryReadAtomic/MemoryWriteAtomic. Such scheme with bool flags is not particularly scalable/extendable. Because of that we did not have Read/Write wrappers for UnalignedMemoryAccess, and "true, false" or "false, true" at call sites is not very readable. Moreover, the new tsan runtime will introduce more flags (e.g. move "freed" and "vptr access" to memory acccess flags). We can't have 16 wrappers and each flag also takes whole 64-bit register for non-inlined calls. Introduce AccessType enum that contains bit mask of read/write, atomic/non-atomic, and later free/non-free, vptr/non-vptr. Such scheme is more scalable, more readble, more efficient (don't consume multiple registers for these flags during calls) and allows to cover unaligned and range variations of memory access functions as well. Also switch from size log to just size. The new tsan runtime won't have the limitation of supporting only 1/2/4/8 access sizes, so we don't need the logarithms. Also add an inline thunk that converts the new interface to the old one. For inlined calls it should not add any overhead because all flags/size can be computed as compile time. Reviewed By: vitalybuka, melver Differential Revision: https://reviews.llvm.org/D107276
1 parent 0156f91 commit 831910c

File tree

10 files changed

+118
-96
lines changed

10 files changed

+118
-96
lines changed

compiler-rt/lib/tsan/go/tsan_go.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -167,25 +167,25 @@ void __tsan_map_shadow(uptr addr, uptr size) {
167167
}
168168

169169
void __tsan_read(ThreadState *thr, void *addr, void *pc) {
170-
MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
170+
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead);
171171
}
172172

173173
void __tsan_read_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
174174
if (callpc != 0)
175175
FuncEntry(thr, callpc);
176-
MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
176+
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead);
177177
if (callpc != 0)
178178
FuncExit(thr);
179179
}
180180

181181
void __tsan_write(ThreadState *thr, void *addr, void *pc) {
182-
MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
182+
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite);
183183
}
184184

185185
void __tsan_write_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
186186
if (callpc != 0)
187187
FuncEntry(thr, callpc);
188-
MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
188+
MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite);
189189
if (callpc != 0)
190190
FuncExit(thr);
191191
}

compiler-rt/lib/tsan/rtl/tsan_external.cpp

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -57,16 +57,14 @@ uptr TagFromShadowStackFrame(uptr pc) {
5757

5858
#if !SANITIZER_GO
5959

60-
typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
61-
void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessFunc access) {
60+
void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessType typ) {
6261
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
6362
ThreadState *thr = cur_thread();
6463
if (caller_pc) FuncEntry(thr, caller_pc);
6564
InsertShadowStackFrameForTag(thr, (uptr)tag);
6665
bool in_ignored_lib;
67-
if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib)) {
68-
access(thr, CALLERPC, (uptr)addr, kSizeLog1);
69-
}
66+
if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib))
67+
MemoryAccess(thr, CALLERPC, (uptr)addr, 1, typ);
7068
FuncExit(thr);
7169
if (caller_pc) FuncExit(thr);
7270
}
@@ -111,12 +109,12 @@ void __tsan_external_assign_tag(void *addr, void *tag) {
111109

112110
SANITIZER_INTERFACE_ATTRIBUTE
113111
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
114-
ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryRead);
112+
ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessRead);
115113
}
116114

117115
SANITIZER_INTERFACE_ATTRIBUTE
118116
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
119-
ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryWrite);
117+
ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessWrite);
120118
}
121119
} // extern "C"
122120

compiler-rt/lib/tsan/rtl/tsan_fd.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
115115
MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
116116
} else {
117117
// See the dup-related comment in FdClose.
118-
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
118+
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
119119
}
120120
}
121121

@@ -163,7 +163,7 @@ void FdAcquire(ThreadState *thr, uptr pc, int fd) {
163163
FdDesc *d = fddesc(thr, pc, fd);
164164
FdSync *s = d->sync;
165165
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
166-
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
166+
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
167167
if (s)
168168
Acquire(thr, pc, (uptr)s);
169169
}
@@ -174,7 +174,7 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
174174
FdDesc *d = fddesc(thr, pc, fd);
175175
FdSync *s = d->sync;
176176
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
177-
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
177+
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
178178
if (s)
179179
Release(thr, pc, (uptr)s);
180180
}
@@ -184,7 +184,7 @@ void FdAccess(ThreadState *thr, uptr pc, int fd) {
184184
if (bogusfd(fd))
185185
return;
186186
FdDesc *d = fddesc(thr, pc, fd);
187-
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
187+
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
188188
}
189189

190190
void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
@@ -194,7 +194,7 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
194194
FdDesc *d = fddesc(thr, pc, fd);
195195
if (write) {
196196
// To catch races between fd usage and close.
197-
MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
197+
MemoryAccess(thr, pc, (uptr)d, 8, kAccessWrite);
198198
} else {
199199
// This path is used only by dup2/dup3 calls.
200200
// We do read instead of write because there is a number of legitimate
@@ -204,7 +204,7 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
204204
// 2. Some daemons dup /dev/null in place of stdin/stdout.
205205
// On the other hand we have not seen cases when write here catches real
206206
// bugs.
207-
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
207+
MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
208208
}
209209
// We need to clear it, because if we do not intercept any call out there
210210
// that creates fd, we will hit false postives.
@@ -228,7 +228,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
228228
return;
229229
// Ignore the case when user dups not yet connected socket.
230230
FdDesc *od = fddesc(thr, pc, oldfd);
231-
MemoryRead(thr, pc, (uptr)od, kSizeLog8);
231+
MemoryAccess(thr, pc, (uptr)od, 8, kAccessRead);
232232
FdClose(thr, pc, newfd, write);
233233
init(thr, pc, newfd, ref(od->sync), write);
234234
}

compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1445,24 +1445,24 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
14451445
#if !SANITIZER_MAC
14461446
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
14471447
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1448-
MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1448+
MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
14491449
int res = REAL(pthread_barrier_init)(b, a, count);
14501450
return res;
14511451
}
14521452

14531453
TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
14541454
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1455-
MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1455+
MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
14561456
int res = REAL(pthread_barrier_destroy)(b);
14571457
return res;
14581458
}
14591459

14601460
TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
14611461
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
14621462
Release(thr, pc, (uptr)b);
1463-
MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1463+
MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
14641464
int res = REAL(pthread_barrier_wait)(b);
1465-
MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1465+
MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
14661466
if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
14671467
Acquire(thr, pc, (uptr)b);
14681468
}

compiler-rt/lib/tsan/rtl/tsan_interface.cpp

Lines changed: 24 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -30,57 +30,65 @@ void __tsan_flush_memory() {
3030
}
3131

3232
void __tsan_read16(void *addr) {
33-
MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
34-
MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
33+
uptr pc = CALLERPC;
34+
ThreadState *thr = cur_thread();
35+
MemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead);
36+
MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead);
3537
}
3638

3739
void __tsan_write16(void *addr) {
38-
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
39-
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
40+
uptr pc = CALLERPC;
41+
ThreadState *thr = cur_thread();
42+
MemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite);
43+
MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite);
4044
}
4145

4246
void __tsan_read16_pc(void *addr, void *pc) {
43-
MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
44-
MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
47+
uptr pc_no_pac = STRIP_PAC_PC(pc);
48+
ThreadState *thr = cur_thread();
49+
MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessRead);
50+
MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessRead);
4551
}
4652

4753
void __tsan_write16_pc(void *addr, void *pc) {
48-
MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
49-
MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
54+
uptr pc_no_pac = STRIP_PAC_PC(pc);
55+
ThreadState *thr = cur_thread();
56+
MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessWrite);
57+
MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessWrite);
5058
}
5159

5260
// __tsan_unaligned_read/write calls are emitted by compiler.
5361

5462
void __tsan_unaligned_read2(const void *addr) {
55-
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
63+
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
5664
}
5765

5866
void __tsan_unaligned_read4(const void *addr) {
59-
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
67+
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
6068
}
6169

6270
void __tsan_unaligned_read8(const void *addr) {
63-
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
71+
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
6472
}
6573

6674
void __tsan_unaligned_read16(const void *addr) {
67-
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
75+
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, kAccessRead);
6876
}
6977

7078
void __tsan_unaligned_write2(void *addr) {
71-
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
79+
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
7280
}
7381

7482
void __tsan_unaligned_write4(void *addr) {
75-
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
83+
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
7684
}
7785

7886
void __tsan_unaligned_write8(void *addr) {
79-
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
87+
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
8088
}
8189

8290
void __tsan_unaligned_write16(void *addr) {
83-
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
91+
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, kAccessWrite);
8492
}
8593

8694
// __sanitizer_unaligned_load/store are for user instrumentation.

compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -161,16 +161,16 @@ a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
161161
}
162162
#endif
163163

164-
template<typename T>
165-
static int SizeLog() {
164+
template <typename T>
165+
static int AccessSize() {
166166
if (sizeof(T) <= 1)
167-
return kSizeLog1;
167+
return 1;
168168
else if (sizeof(T) <= 2)
169-
return kSizeLog2;
169+
return 2;
170170
else if (sizeof(T) <= 4)
171-
return kSizeLog4;
171+
return 4;
172172
else
173-
return kSizeLog8;
173+
return 8;
174174
// For 16-byte atomics we also use 8-byte memory access,
175175
// this leads to false negatives only in very obscure cases.
176176
}
@@ -224,7 +224,8 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
224224
// This fast-path is critical for performance.
225225
// Assume the access is atomic.
226226
if (!IsAcquireOrder(mo)) {
227-
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
227+
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
228+
kAccessRead | kAccessAtomic);
228229
return NoTsanAtomicLoad(a, mo);
229230
}
230231
// Don't create sync object if it does not exist yet. For example, an atomic
@@ -238,7 +239,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
238239
// of the value and the clock we acquire.
239240
v = NoTsanAtomicLoad(a, mo);
240241
}
241-
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
242+
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
242243
return v;
243244
}
244245

@@ -258,7 +259,7 @@ template <typename T>
258259
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
259260
morder mo) {
260261
CHECK(IsStoreOrder(mo));
261-
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
262+
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
262263
// This fast-path is critical for performance.
263264
// Assume the access is atomic.
264265
// Strictly saying even relaxed store cuts off release sequence,
@@ -279,7 +280,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
279280

280281
template <typename T, T (*F)(volatile T *v, T op)>
281282
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
282-
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
283+
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
283284
if (LIKELY(mo == mo_relaxed))
284285
return F(a, v);
285286
SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
@@ -404,7 +405,7 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
404405
// (mo_relaxed) when those are used.
405406
CHECK(IsLoadOrder(fmo));
406407

407-
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
408+
MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
408409
if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
409410
T cc = *c;
410411
T pr = func_cas(a, cc, v);

0 commit comments

Comments
 (0)