Skip to content

Commit 33d2395

Browse files
committed
[MSAN] Instrument libatomic load/store calls
These calls are neither intercepted by compiler-rt nor is libatomic.a naturally instrumented. This patch uses the existing libcall mechanism to detect a call to atomic_load or atomic_store, and instruments them much like the preexisting instrumentation for atomics. Calls to _load are modified to have at least Acquire ordering, and calls to _store at least Release ordering. Because this needs to be converted at runtime, msan injects a LUT (implemented as a vector with extractelement). Differential Revision: https://reviews.llvm.org/D83337
1 parent a5e0194 commit 33d2395

File tree

3 files changed

+219
-0
lines changed

3 files changed

+219
-0
lines changed

compiler-rt/test/msan/libatomic.c

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -latomic -DTEST_STORE -O0 %s -o %t && %run %t 2>&1
2+
// RUN: %clangxx_msan -fsanitize-memory-track-origins=0 -latomic -DTEST_LOAD -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK
3+
// RUN: %clangxx_msan -fsanitize-memory-track-origins=2 -latomic -DTEST_LOAD -O0 %s -o %t && not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SHADOW
4+
5+
#include <sanitizer/msan_interface.h>
6+
#include <stdatomic.h>
7+
8+
typedef struct __attribute((packed)) {
9+
uint8_t val[3];
10+
} i24;
11+
12+
void copy(i24 *dst, i24 *src);
13+
14+
int main() {
15+
i24 uninit;
16+
i24 init = {0};
17+
18+
__msan_check_mem_is_initialized(&init, 3);
19+
copy(&init, &uninit);
20+
__msan_check_mem_is_initialized(&init, 3);
21+
}
22+
23+
void copy(i24 *dst, i24 *src) {
24+
#ifdef TEST_LOAD
25+
__atomic_load(src, dst, __ATOMIC_RELAXED);
26+
27+
// CHECK: MemorySanitizer: use-of-uninitialized-value
28+
// CHECK: #0 {{0x[a-f0-9]+}} in main{{.*}}libatomic.c:[[@LINE-8]]
29+
30+
// CHECK-SHADOW: Uninitialized value was stored to memory at
31+
// CHECK-SHADOW: #0 {{0x[a-f0-9]+}} in copy{{.*}}libatomic.c:[[@LINE-6]]
32+
#endif
33+
#ifdef TEST_STORE
34+
// Store always writes a clean shadow
35+
__atomic_store(src, dst, __ATOMIC_RELAXED);
36+
#endif
37+
}

llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -572,6 +572,9 @@ class MemorySanitizer {
572572
/// uninitialized value and returns an updated origin id encoding this info.
573573
FunctionCallee MsanChainOriginFn;
574574

575+
/// Run-time helper that paints an origin over a region.
576+
FunctionCallee MsanSetOriginFn;
577+
575578
/// MSan runtime replacements for memmove, memcpy and memset.
576579
FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
577580

@@ -850,6 +853,9 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
850853
// instrumentation.
851854
MsanChainOriginFn = M.getOrInsertFunction(
852855
"__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
856+
MsanSetOriginFn =
857+
M.getOrInsertFunction("__msan_set_origin", IRB.getVoidTy(),
858+
IRB.getInt8PtrTy(), IntptrTy, IRB.getInt32Ty());
853859
MemmoveFn = M.getOrInsertFunction(
854860
"__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
855861
IRB.getInt8PtrTy(), IntptrTy);
@@ -1769,6 +1775,24 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
17691775
llvm_unreachable("Unknown ordering");
17701776
}
17711777

1778+
Value *makeAddReleaseOrderingTable(IRBuilder<> &IRB) {
1779+
constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
1780+
uint32_t OrderingTable[NumOrderings] = {};
1781+
1782+
OrderingTable[(int)AtomicOrderingCABI::relaxed] =
1783+
OrderingTable[(int)AtomicOrderingCABI::release] =
1784+
(int)AtomicOrderingCABI::release;
1785+
OrderingTable[(int)AtomicOrderingCABI::consume] =
1786+
OrderingTable[(int)AtomicOrderingCABI::acquire] =
1787+
OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
1788+
(int)AtomicOrderingCABI::acq_rel;
1789+
OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
1790+
(int)AtomicOrderingCABI::seq_cst;
1791+
1792+
return ConstantDataVector::get(IRB.getContext(),
1793+
makeArrayRef(OrderingTable, NumOrderings));
1794+
}
1795+
17721796
AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
17731797
switch (a) {
17741798
case AtomicOrdering::NotAtomic:
@@ -1786,6 +1810,24 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
17861810
llvm_unreachable("Unknown ordering");
17871811
}
17881812

1813+
Value *makeAddAcquireOrderingTable(IRBuilder<> &IRB) {
1814+
constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
1815+
uint32_t OrderingTable[NumOrderings] = {};
1816+
1817+
OrderingTable[(int)AtomicOrderingCABI::relaxed] =
1818+
OrderingTable[(int)AtomicOrderingCABI::acquire] =
1819+
OrderingTable[(int)AtomicOrderingCABI::consume] =
1820+
(int)AtomicOrderingCABI::acquire;
1821+
OrderingTable[(int)AtomicOrderingCABI::release] =
1822+
OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
1823+
(int)AtomicOrderingCABI::acq_rel;
1824+
OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
1825+
(int)AtomicOrderingCABI::seq_cst;
1826+
1827+
return ConstantDataVector::get(IRB.getContext(),
1828+
makeArrayRef(OrderingTable, NumOrderings));
1829+
}
1830+
17891831
// ------------------- Visitors.
17901832
using InstVisitor<MemorySanitizerVisitor>::visit;
17911833
void visit(Instruction &I) {
@@ -3404,6 +3446,60 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
34043446
}
34053447
}
34063448

3449+
void visitLibAtomicLoad(CallBase &CB) {
3450+
IRBuilder<> IRB(&CB);
3451+
Value *Size = CB.getArgOperand(0);
3452+
Value *SrcPtr = CB.getArgOperand(1);
3453+
Value *DstPtr = CB.getArgOperand(2);
3454+
Value *Ordering = CB.getArgOperand(3);
3455+
// Convert the call to have at least Acquire ordering to make sure
3456+
// the shadow operations aren't reordered before it.
3457+
Value *NewOrdering =
3458+
IRB.CreateExtractElement(makeAddAcquireOrderingTable(IRB), Ordering);
3459+
CB.setArgOperand(3, NewOrdering);
3460+
3461+
IRBuilder<> NextIRB(CB.getNextNode());
3462+
NextIRB.SetCurrentDebugLocation(CB.getDebugLoc());
3463+
3464+
Value *SrcShadowPtr, *SrcOriginPtr;
3465+
std::tie(SrcShadowPtr, SrcOriginPtr) =
3466+
getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
3467+
/*isStore*/ false);
3468+
Value *DstShadowPtr =
3469+
getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(), Align(1),
3470+
/*isStore*/ true)
3471+
.first;
3472+
3473+
NextIRB.CreateMemCpy(DstShadowPtr, Align(1), SrcShadowPtr, Align(1), Size);
3474+
if (MS.TrackOrigins) {
3475+
Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
3476+
kMinOriginAlignment);
3477+
Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
3478+
NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
3479+
}
3480+
}
3481+
3482+
void visitLibAtomicStore(CallBase &CB) {
3483+
IRBuilder<> IRB(&CB);
3484+
Value *Size = CB.getArgOperand(0);
3485+
Value *DstPtr = CB.getArgOperand(2);
3486+
Value *Ordering = CB.getArgOperand(3);
3487+
// Convert the call to have at least Release ordering to make sure
3488+
// the shadow operations aren't reordered after it.
3489+
Value *NewOrdering =
3490+
IRB.CreateExtractElement(makeAddReleaseOrderingTable(IRB), Ordering);
3491+
CB.setArgOperand(3, NewOrdering);
3492+
3493+
Value *DstShadowPtr =
3494+
getShadowOriginPtr(DstPtr, IRB, IRB.getInt8Ty(), Align(1),
3495+
/*isStore*/ true)
3496+
.first;
3497+
3498+
// Atomic store always paints clean shadow/origin. See file header.
3499+
IRB.CreateMemSet(DstShadowPtr, getCleanShadow(IRB.getInt8Ty()), Size,
3500+
Align(1));
3501+
}
3502+
34073503
void visitCallBase(CallBase &CB) {
34083504
assert(!CB.getMetadata("nosanitize"));
34093505
if (CB.isInlineAsm()) {
@@ -3417,6 +3513,23 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
34173513
visitInstruction(CB);
34183514
return;
34193515
}
3516+
LibFunc LF;
3517+
if (TLI->getLibFunc(CB, LF)) {
3518+
// libatomic.a functions need to have special handling because there isn't
3519+
// a good way to intercept them or compile the library with
3520+
// instrumentation.
3521+
switch (LF) {
3522+
case LibFunc_atomic_load:
3523+
visitLibAtomicLoad(CB);
3524+
return;
3525+
case LibFunc_atomic_store:
3526+
visitLibAtomicStore(CB);
3527+
return;
3528+
default:
3529+
break;
3530+
}
3531+
}
3532+
34203533
if (auto *Call = dyn_cast<CallInst>(&CB)) {
34213534
assert(!isa<IntrinsicInst>(Call) && "intrinsics are handled elsewhere");
34223535

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s
2+
; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=2 -S -passes=msan 2>&1 | FileCheck %s -check-prefixes=CHECK,CHECK-ORIGIN
3+
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
4+
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5+
target triple = "x86_64-unknown-linux-gnu"
6+
7+
declare void @__atomic_load(i64, i8*, i8*, i32)
8+
declare void @__atomic_store(i64, i8*, i8*, i32)
9+
10+
define i24 @odd_sized_load(i24* %ptr) sanitize_memory {
11+
; CHECK: @odd_sized_load(i24* {{.*}}[[PTR:%.+]])
12+
; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
13+
; CHECK-ORIGIN: @__msan_set_alloca_origin
14+
; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
15+
; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
16+
; CHECK: call void @__atomic_load(i64 3, i8* [[PTR_I8]], i8* [[VAL_PTR_I8]], i32 2)
17+
18+
; CHECK: ptrtoint i8* [[PTR_I8]]
19+
; CHECK: xor
20+
; CHECK: [[SPTR_I8:%.*]] = inttoptr
21+
; CHECK-ORIGIN: add
22+
; CHECK-ORIGIN: and
23+
; CHECK-ORIGIN: [[OPTR:%.*]] = inttoptr
24+
25+
; CHECK: ptrtoint i8* [[VAL_PTR_I8]]
26+
; CHECK: xor
27+
; CHECK: [[VAL_SPTR_I8:%.*]] = inttoptr
28+
; CHECK-ORIGIN: add
29+
; CHECK-ORIGIN: and
30+
; CHECK-ORIGIN: [[VAL_OPTR:%.*]] = inttoptr
31+
32+
; CHECK: call void @llvm.memcpy{{.*}}(i8* align 1 [[VAL_SPTR_I8]], i8* align 1 [[SPTR_I8]], i64 3
33+
34+
; CHECK-ORIGIN: [[ARG_ORIGIN:%.*]] = load i32, i32* [[OPTR]]
35+
; CHECK-ORIGIN: [[VAL_ORIGIN:%.*]] = call i32 @__msan_chain_origin(i32 [[ARG_ORIGIN]])
36+
; CHECK-ORIGIN: call void @__msan_set_origin(i8* [[VAL_PTR_I8]], i64 3, i32 [[VAL_ORIGIN]])
37+
38+
; CHECK: [[VAL:%.*]] = load i24, i24* [[VAL_PTR]]
39+
; CHECK: ret i24 [[VAL]]
40+
%val_ptr = alloca i24, align 1
41+
%val_ptr_i8 = bitcast i24* %val_ptr to i8*
42+
%ptr_i8 = bitcast i24* %ptr to i8*
43+
call void @__atomic_load(i64 3, i8* %ptr_i8, i8* %val_ptr_i8, i32 0)
44+
%val = load i24, i24* %val_ptr
45+
ret i24 %val
46+
}
47+
48+
define void @odd_sized_store(i24* %ptr, i24 %val) sanitize_memory {
49+
; CHECK: @odd_sized_store(i24* {{.*}}[[PTR:%.+]], i24 {{.*}}[[VAL:%.+]])
50+
; CHECK: [[VAL_PTR:%.*]] = alloca i24, align 1
51+
; CHECK: store i24 [[VAL]], i24* [[VAL_PTR]]
52+
; CHECK: [[VAL_PTR_I8:%.*]] = bitcast i24* [[VAL_PTR]] to i8*
53+
; CHECK: [[PTR_I8:%.*]] = bitcast i24* [[PTR]] to i8*
54+
55+
; CHECK: ptrtoint i8* [[PTR_I8]]
56+
; CHECK: xor
57+
; CHECK: [[SPTR_I8:%.*]] = inttoptr
58+
; CHECK: call void @llvm.memset{{.*}}(i8* align 1 [[SPTR_I8]], i8 0, i64 3
59+
60+
; CHECK: call void @__atomic_store(i64 3, i8* [[VAL_PTR_I8]], i8* [[PTR_I8]], i32 3)
61+
; CHECK: ret void
62+
%val_ptr = alloca i24, align 1
63+
store i24 %val, i24* %val_ptr
64+
%val_ptr_i8 = bitcast i24* %val_ptr to i8*
65+
%ptr_i8 = bitcast i24* %ptr to i8*
66+
call void @__atomic_store(i64 3, i8* %val_ptr_i8, i8* %ptr_i8, i32 0)
67+
ret void
68+
}
69+

0 commit comments

Comments
 (0)