|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
1 | 2 | ; RUN: opt < %s -passes=tsan -S | FileCheck %s
|
2 | 3 | ; Check that atomic memory operations on floating-point types are converted to calls into ThreadSanitizer runtime.
|
3 | 4 | target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
4 | 5 |
|
5 | 6 | define float @load_float(ptr %fptr) {
|
| 7 | +; CHECK-LABEL: define float @load_float( |
| 8 | +; CHECK-SAME: ptr [[FPTR:%.*]]) { |
| 9 | +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0) |
| 10 | +; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]]) |
| 11 | +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @__tsan_atomic32_load(ptr [[FPTR]], i32 0) |
| 12 | +; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32 [[TMP2]] to float |
| 13 | +; CHECK-NEXT: [[V:%.*]] = load atomic float, ptr [[FPTR]] unordered, align 4 |
| 14 | +; CHECK-NEXT: call void @__tsan_func_exit() |
| 15 | +; CHECK-NEXT: ret float [[TMP3]] |
| 16 | +; |
6 | 17 | %v = load atomic float, ptr %fptr unordered, align 4
|
7 | 18 | ret float %v
|
8 |
| - ; CHECK-LABEL: load_float |
9 |
| - ; CHECK: call i32 @__tsan_atomic32_load(ptr %{{.+}}, i32 0) |
10 |
| - ; CHECK: bitcast i32 {{.+}} to float |
11 | 19 | }
|
12 | 20 |
|
13 | 21 | define double @load_double(ptr %fptr) {
|
| 22 | +; CHECK-LABEL: define double @load_double( |
| 23 | +; CHECK-SAME: ptr [[FPTR:%.*]]) { |
| 24 | +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0) |
| 25 | +; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]]) |
| 26 | +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @__tsan_atomic64_load(ptr [[FPTR]], i32 0) |
| 27 | +; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64 [[TMP2]] to double |
| 28 | +; CHECK-NEXT: [[V:%.*]] = load atomic double, ptr [[FPTR]] unordered, align 8 |
| 29 | +; CHECK-NEXT: call void @__tsan_func_exit() |
| 30 | +; CHECK-NEXT: ret double [[TMP3]] |
| 31 | +; |
14 | 32 | %v = load atomic double, ptr %fptr unordered, align 8
|
15 | 33 | ret double %v
|
16 |
| - ; CHECK-LABEL: load_double |
17 |
| - ; CHECK: call i64 @__tsan_atomic64_load(ptr %{{.+}}, i32 0) |
18 |
| - ; CHECK: bitcast i64 {{.+}} to double |
19 | 34 | }
|
20 | 35 |
|
21 | 36 | define fp128 @load_fp128(ptr %fptr) {
|
| 37 | +; CHECK-LABEL: define fp128 @load_fp128( |
| 38 | +; CHECK-SAME: ptr [[FPTR:%.*]]) { |
| 39 | +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0) |
| 40 | +; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]]) |
| 41 | +; CHECK-NEXT: [[TMP2:%.*]] = call i128 @__tsan_atomic128_load(ptr [[FPTR]], i32 0) |
| 42 | +; CHECK-NEXT: [[TMP3:%.*]] = bitcast i128 [[TMP2]] to fp128 |
| 43 | +; CHECK-NEXT: [[V:%.*]] = load atomic fp128, ptr [[FPTR]] unordered, align 16 |
| 44 | +; CHECK-NEXT: call void @__tsan_func_exit() |
| 45 | +; CHECK-NEXT: ret fp128 [[TMP3]] |
| 46 | +; |
22 | 47 | %v = load atomic fp128, ptr %fptr unordered, align 16
|
23 | 48 | ret fp128 %v
|
24 |
| - ; CHECK-LABEL: load_fp128 |
25 |
| - ; CHECK: call i128 @__tsan_atomic128_load(ptr %{{.+}}, i32 0) |
26 |
| - ; CHECK: bitcast i128 {{.+}} to fp128 |
27 | 49 | }
|
28 | 50 |
|
29 | 51 | define void @store_float(ptr %fptr, float %v) {
|
| 52 | +; CHECK-LABEL: define void @store_float( |
| 53 | +; CHECK-SAME: ptr [[FPTR:%.*]], float [[V:%.*]]) { |
| 54 | +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0) |
| 55 | +; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]]) |
| 56 | +; CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[V]] to i32 |
| 57 | +; CHECK-NEXT: call void @__tsan_atomic32_store(ptr [[FPTR]], i32 [[TMP2]], i32 0) |
| 58 | +; CHECK-NEXT: call void @__tsan_func_exit() |
| 59 | +; CHECK-NEXT: ret void |
| 60 | +; |
30 | 61 | store atomic float %v, ptr %fptr unordered, align 4
|
31 | 62 | ret void
|
32 |
| - ; CHECK-LABEL: store_float |
33 |
| - ; CHECK: bitcast float %v to i32 |
34 |
| - ; CHECK: call void @__tsan_atomic32_store(ptr %{{.+}}, i32 %{{.+}}, i32 0) |
35 | 63 | }
|
36 | 64 |
|
37 | 65 | define void @store_double(ptr %fptr, double %v) {
|
| 66 | +; CHECK-LABEL: define void @store_double( |
| 67 | +; CHECK-SAME: ptr [[FPTR:%.*]], double [[V:%.*]]) { |
| 68 | +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0) |
| 69 | +; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]]) |
| 70 | +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[V]] to i64 |
| 71 | +; CHECK-NEXT: call void @__tsan_atomic64_store(ptr [[FPTR]], i64 [[TMP2]], i32 0) |
| 72 | +; CHECK-NEXT: call void @__tsan_func_exit() |
| 73 | +; CHECK-NEXT: ret void |
| 74 | +; |
38 | 75 | store atomic double %v, ptr %fptr unordered, align 8
|
39 | 76 | ret void
|
40 |
| - ; CHECK-LABEL: store_double |
41 |
| - ; CHECK: bitcast double %v to i64 |
42 |
| - ; CHECK: call void @__tsan_atomic64_store(ptr %{{.+}}, i64 %{{.+}}, i32 0) |
43 | 77 | }
|
44 | 78 |
|
45 | 79 | define void @store_fp128(ptr %fptr, fp128 %v) {
|
| 80 | +; CHECK-LABEL: define void @store_fp128( |
| 81 | +; CHECK-SAME: ptr [[FPTR:%.*]], fp128 [[V:%.*]]) { |
| 82 | +; CHECK-NEXT: [[TMP1:%.*]] = call ptr @llvm.returnaddress(i32 0) |
| 83 | +; CHECK-NEXT: call void @__tsan_func_entry(ptr [[TMP1]]) |
| 84 | +; CHECK-NEXT: [[TMP2:%.*]] = bitcast fp128 [[V]] to i128 |
| 85 | +; CHECK-NEXT: call void @__tsan_atomic128_store(ptr [[FPTR]], i128 [[TMP2]], i32 0) |
| 86 | +; CHECK-NEXT: call void @__tsan_func_exit() |
| 87 | +; CHECK-NEXT: ret void |
| 88 | +; |
46 | 89 | store atomic fp128 %v, ptr %fptr unordered, align 16
|
47 | 90 | ret void
|
48 |
| - ; CHECK-LABEL: store_fp128 |
49 |
| - ; CHECK: bitcast fp128 %v to i128 |
50 |
| - ; CHECK: call void @__tsan_atomic128_store(ptr %{{.+}}, i128 %{{.+}}, i32 0) |
51 | 91 | }
|
0 commit comments