@@ -44,9 +44,9 @@ define void @g() {
44
44
; CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[T]], ptr [[A]], i32 0, i32 1
45
45
; CHECK-NEXT: [[SV1:%.*]] = call i32 @somevalue()
46
46
; CHECK-NEXT: [[SV2:%.*]] = call i32 @somevalue()
47
- ; CHECK-NEXT: store i32 [[SV1]], ptr [[A1_I8_INV]], align 4, !invariant.group !0
47
+ ; CHECK-NEXT: store i32 [[SV1]], ptr [[A1_I8_INV]], align 4, !invariant.group [[META0:![0-9]+]]
48
48
; CHECK-NEXT: store i32 [[SV2]], ptr [[A2]], align 4
49
- ; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[A1_I8_INV]], align 4, !invariant.group !0
49
+ ; CHECK-NEXT: [[V1:%.*]] = load i32, ptr [[A1_I8_INV]], align 4, !invariant.group [[META0]]
50
50
; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[A2]], align 4
51
51
; CHECK-NEXT: call void @h(i32 [[V1]])
52
52
; CHECK-NEXT: call void @h(i32 [[V2]])
@@ -79,6 +79,84 @@ define void @g() {
79
79
ret void
80
80
}
81
81
82
+ define void @store_and_launder () {
83
+ ; CHECK-LABEL: @store_and_launder(
84
+ ; CHECK-NEXT: ret void
85
+ ;
86
+ %valptr = alloca i32 , align 4
87
+ store i32 0 , ptr %valptr , align 4
88
+ %barr = call ptr @llvm.launder.invariant.group.p0 (ptr %valptr )
89
+ ret void
90
+ }
91
+
92
+ define i32 @launder_and_load () {
93
+ ; CHECK-LABEL: @launder_and_load(
94
+ ; CHECK-NEXT: ret i32 undef
95
+ ;
96
+ %valptr = alloca i32 , align 4
97
+ %barr = call ptr @llvm.launder.invariant.group.p0 (ptr %valptr )
98
+ %v2 = load i32 , ptr %valptr
99
+ ret i32 %v2
100
+ }
101
+
102
+ define void @launder_and_ptr_arith () {
103
+ ; CHECK-LABEL: @launder_and_ptr_arith(
104
+ ; CHECK-NEXT: ret void
105
+ ;
106
+ %valptr = alloca i32 , align 4
107
+ %barr = call ptr @llvm.launder.invariant.group.p0 (ptr %valptr )
108
+ %a2 = getelementptr inbounds i32 , ptr %valptr , i32 0
109
+ ret void
110
+ }
111
+
112
+ define void @partial_use_of_alloca () {
113
+ ; CHECK-LABEL: @partial_use_of_alloca(
114
+ ; CHECK-NEXT: [[VALPTR:%.*]] = alloca i32, align 4
115
+ ; CHECK-NEXT: store i32 0, ptr [[VALPTR]], align 4
116
+ ; CHECK-NEXT: [[BARR:%.*]] = call ptr @llvm.launder.invariant.group.p0(ptr [[VALPTR]])
117
+ ; CHECK-NEXT: [[LOAD_VAL:%.*]] = load i32, ptr [[VALPTR]], align 4
118
+ ; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[LOAD_VAL]], 0
119
+ ; CHECK-NEXT: br i1 [[COND]], label [[USE_ALLOCA:%.*]], label [[END:%.*]]
120
+ ; CHECK: use_alloca:
121
+ ; CHECK-NEXT: call void @use(ptr nonnull [[VALPTR]])
122
+ ; CHECK-NEXT: br label [[END]]
123
+ ; CHECK: end:
124
+ ; CHECK-NEXT: ret void
125
+ ;
126
+ %valptr = alloca i32 , align 4
127
+ store i32 0 , ptr %valptr , align 4
128
+ %barr = call ptr @llvm.launder.invariant.group.p0 (ptr %valptr )
129
+ %load_val = load i32 , ptr %valptr , align 4
130
+ %cond = icmp eq i32 %load_val , 0
131
+ br i1 %cond , label %use_alloca , label %end
132
+
133
+ use_alloca:
134
+ call void @use (ptr nonnull %valptr )
135
+ br label %end
136
+
137
+ end:
138
+ ret void
139
+ }
140
+
141
+ define void @partial_promotion_of_alloca () {
142
+ ; CHECK-LABEL: @partial_promotion_of_alloca(
143
+ ; CHECK-NEXT: [[STRUCT_PTR_SROA_2:%.*]] = alloca i32, align 4
144
+ ; CHECK-NEXT: store volatile i32 0, ptr [[STRUCT_PTR_SROA_2]], align 4
145
+ ; CHECK-NEXT: [[STRUCT_PTR_SROA_2_0_STRUCT_PTR_SROA_2_4_LOAD_VAL:%.*]] = load volatile i32, ptr [[STRUCT_PTR_SROA_2]], align 4
146
+ ; CHECK-NEXT: ret void
147
+ ;
148
+ %struct_ptr = alloca %t , align 4
149
+ %field_ptr = getelementptr inbounds %t , ptr %struct_ptr , i32 0 , i32 0
150
+ store i32 0 , ptr %field_ptr , align 4
151
+ %volatile_field_ptr = getelementptr inbounds %t , ptr %struct_ptr , i32 0 , i32 1
152
+ store volatile i32 0 , ptr %volatile_field_ptr , align 4 , !invariant.group !0
153
+ %barr = call ptr @llvm.launder.invariant.group.p0 (ptr %struct_ptr )
154
+ %load_val = load volatile i32 , ptr %volatile_field_ptr , align 4 , !invariant.group !0
155
+ ret void
156
+ }
157
+
158
+ declare void @use (i32* )
159
+
82
160
!0 = !{}
83
161
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
84
162
; CHECK-MODIFY-CFG: {{.*}}
0 commit comments