@@ -125,6 +125,72 @@ entry:
125
125
ret void
126
126
}
127
127
128
+ define void @insert_store_nonconst_large_alignment (<4 x i32 >* %q , i32 zeroext %s , i32 %idx ) {
129
+ ; CHECK-LABEL: @insert_store_nonconst_large_alignment(
130
+ ; CHECK-NEXT: entry:
131
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX:%.*]], 4
132
+ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
133
+ ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[Q:%.*]], i32 0, i32 [[IDX]]
134
+ ; CHECK-NEXT: store i32 [[S:%.*]], i32* [[TMP0]], align 4
135
+ ; CHECK-NEXT: ret void
136
+ ;
137
+ entry:
138
+ %cmp = icmp ult i32 %idx , 4
139
+ call void @llvm.assume (i1 %cmp )
140
+ %i = load <4 x i32 >, <4 x i32 >* %q , align 128
141
+ %vecins = insertelement <4 x i32 > %i , i32 %s , i32 %idx
142
+ store <4 x i32 > %vecins , <4 x i32 >* %q , align 128
143
+ ret void
144
+ }
145
+
146
+ define void @insert_store_nonconst_align_maximum_8 (<8 x i64 >* %q , i64 %s , i32 %idx ) {
147
+ ; CHECK-LABEL: @insert_store_nonconst_align_maximum_8(
148
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX:%.*]], 2
149
+ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
150
+ ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds <8 x i64>, <8 x i64>* [[Q:%.*]], i32 0, i32 [[IDX]]
151
+ ; CHECK-NEXT: store i64 [[S:%.*]], i64* [[TMP1]], align 4
152
+ ; CHECK-NEXT: ret void
153
+ ;
154
+ %cmp = icmp ult i32 %idx , 2
155
+ call void @llvm.assume (i1 %cmp )
156
+ %i = load <8 x i64 >, <8 x i64 >* %q , align 8
157
+ %vecins = insertelement <8 x i64 > %i , i64 %s , i32 %idx
158
+ store <8 x i64 > %vecins , <8 x i64 >* %q , align 8
159
+ ret void
160
+ }
161
+
162
+ define void @insert_store_nonconst_align_maximum_4 (<8 x i64 >* %q , i64 %s , i32 %idx ) {
163
+ ; CHECK-LABEL: @insert_store_nonconst_align_maximum_4(
164
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX:%.*]], 2
165
+ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
166
+ ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds <8 x i64>, <8 x i64>* [[Q:%.*]], i32 0, i32 [[IDX]]
167
+ ; CHECK-NEXT: store i64 [[S:%.*]], i64* [[TMP1]], align 4
168
+ ; CHECK-NEXT: ret void
169
+ ;
170
+ %cmp = icmp ult i32 %idx , 2
171
+ call void @llvm.assume (i1 %cmp )
172
+ %i = load <8 x i64 >, <8 x i64 >* %q , align 4
173
+ %vecins = insertelement <8 x i64 > %i , i64 %s , i32 %idx
174
+ store <8 x i64 > %vecins , <8 x i64 >* %q , align 4
175
+ ret void
176
+ }
177
+
178
+ define void @insert_store_nonconst_align_larger (<8 x i64 >* %q , i64 %s , i32 %idx ) {
179
+ ; CHECK-LABEL: @insert_store_nonconst_align_larger(
180
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX:%.*]], 2
181
+ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
182
+ ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds <8 x i64>, <8 x i64>* [[Q:%.*]], i32 0, i32 [[IDX]]
183
+ ; CHECK-NEXT: store i64 [[S:%.*]], i64* [[TMP1]], align 2
184
+ ; CHECK-NEXT: ret void
185
+ ;
186
+ %cmp = icmp ult i32 %idx , 2
187
+ call void @llvm.assume (i1 %cmp )
188
+ %i = load <8 x i64 >, <8 x i64 >* %q , align 4
189
+ %vecins = insertelement <8 x i64 > %i , i64 %s , i32 %idx
190
+ store <8 x i64 > %vecins , <8 x i64 >* %q , align 2
191
+ ret void
192
+ }
193
+
128
194
define void @insert_store_nonconst_index_known_valid_by_assume (<16 x i8 >* %q , i8 zeroext %s , i32 %idx ) {
129
195
; CHECK-LABEL: @insert_store_nonconst_index_known_valid_by_assume(
130
196
; CHECK-NEXT: entry:
0 commit comments