@@ -69,7 +69,10 @@ body: |
69
69
; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT %copy(s32)
70
70
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST [[ZEXT]](s64)
71
71
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
72
- ; CHECK-NEXT: %ctpop:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[CTPOP]](<8 x s8>)
72
+ ; CHECK-NEXT: [[UADDLV:%[0-9]+]]:_(<4 x s32>) = G_UADDLV [[CTPOP]]
73
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
74
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[UADDLV]](<4 x s32>), [[C]](s64)
75
+ ; CHECK-NEXT: %ctpop:_(s32) = COPY [[EVEC]](s32)
73
76
; CHECK-NEXT: $w0 = COPY %ctpop(s32)
74
77
; CHECK-NEXT: RET_ReallyLR implicit $w0
75
78
;
@@ -98,8 +101,11 @@ body: |
98
101
; CHECK-NEXT: %copy:_(s64) = COPY $x0
99
102
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST %copy(s64)
100
103
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
101
- ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[CTPOP]](<8 x s8>)
102
- ; CHECK-NEXT: %ctpop:_(s64) = G_ZEXT [[INT]](s32)
104
+ ; CHECK-NEXT: [[UADDLV:%[0-9]+]]:_(<4 x s32>) = G_UADDLV [[CTPOP]]
105
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
106
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[UADDLV]](<4 x s32>), [[C]](s64)
107
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
108
+ ; CHECK-NEXT: %ctpop:_(s64) = G_ZEXT [[COPY]](s32)
103
109
; CHECK-NEXT: $x0 = COPY %ctpop(s64)
104
110
; CHECK-NEXT: RET_ReallyLR implicit $x0
105
111
;
@@ -131,12 +137,14 @@ body: |
131
137
; CHECK-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
132
138
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<16 x s8>) = G_BITCAST [[MV]](s128)
133
139
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<16 x s8>) = G_CTPOP [[BITCAST]](<16 x s8>)
134
- ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[CTPOP]](<16 x s8>)
135
- ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
136
- ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[INT]](s32), [[C]](s32)
137
- ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
140
+ ; CHECK-NEXT: [[UADDLV:%[0-9]+]]:_(<4 x s32>) = G_UADDLV [[CTPOP]]
141
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
142
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[UADDLV]](<4 x s32>), [[C]](s64)
143
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
144
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
145
+ ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[C1]](s32)
138
146
; CHECK-NEXT: $x0 = COPY [[MV1]](s64)
139
- ; CHECK-NEXT: $x1 = COPY [[C1 ]](s64)
147
+ ; CHECK-NEXT: $x1 = COPY [[C ]](s64)
140
148
; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $x1
141
149
;
142
150
; CHECK-CSSC-LABEL: name: s128_lower
@@ -177,9 +185,12 @@ body: |
177
185
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]]
178
186
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST [[AND]](s64)
179
187
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
180
- ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[CTPOP]](<8 x s8>)
181
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[INT]](s32)
182
- ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
188
+ ; CHECK-NEXT: [[UADDLV:%[0-9]+]]:_(<4 x s32>) = G_UADDLV [[CTPOP]]
189
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
190
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[UADDLV]](<4 x s32>), [[C1]](s64)
191
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
192
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
193
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
183
194
; CHECK-NEXT: RET_ReallyLR implicit $w0
184
195
;
185
196
; CHECK-CSSC-LABEL: name: widen_s16
@@ -216,9 +227,12 @@ body: |
216
227
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]]
217
228
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST [[AND]](s64)
218
229
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
219
- ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[CTPOP]](<8 x s8>)
220
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[INT]](s32)
221
- ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
230
+ ; CHECK-NEXT: [[UADDLV:%[0-9]+]]:_(<4 x s32>) = G_UADDLV [[CTPOP]]
231
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
232
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[UADDLV]](<4 x s32>), [[C1]](s64)
233
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
234
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
235
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
222
236
; CHECK-NEXT: RET_ReallyLR implicit $w0
223
237
;
224
238
; CHECK-CSSC-LABEL: name: widen_s8
@@ -255,9 +269,12 @@ body: |
255
269
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]]
256
270
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST [[AND]](s64)
257
271
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
258
- ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[CTPOP]](<8 x s8>)
259
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[INT]](s32)
260
- ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
272
+ ; CHECK-NEXT: [[UADDLV:%[0-9]+]]:_(<4 x s32>) = G_UADDLV [[CTPOP]]
273
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
274
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[UADDLV]](<4 x s32>), [[C1]](s64)
275
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
276
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
277
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
261
278
; CHECK-NEXT: RET_ReallyLR implicit $w0
262
279
;
263
280
; CHECK-CSSC-LABEL: name: widen_s3
@@ -293,9 +310,12 @@ body: |
293
310
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]]
294
311
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST [[AND]](s64)
295
312
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
296
- ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[CTPOP]](<8 x s8>)
297
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[INT]](s32)
298
- ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
313
+ ; CHECK-NEXT: [[UADDLV:%[0-9]+]]:_(<4 x s32>) = G_UADDLV [[CTPOP]]
314
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
315
+ ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[UADDLV]](<4 x s32>), [[C1]](s64)
316
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
317
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
318
+ ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
299
319
; CHECK-NEXT: RET_ReallyLR implicit $w0
300
320
;
301
321
; CHECK-CSSC-LABEL: name: different_sizes
@@ -329,8 +349,8 @@ body: |
329
349
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
330
350
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<16 x s8>) = G_BITCAST [[COPY]](<8 x s16>)
331
351
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<16 x s8>) = G_CTPOP [[BITCAST]](<16 x s8>)
332
- ; CHECK-NEXT: [[INT :%[0-9]+]]:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<16 x s8>)
333
- ; CHECK-NEXT: $q0 = COPY [[INT ]](<8 x s16>)
352
+ ; CHECK-NEXT: [[UADDLP :%[0-9]+]]:_(<8 x s16>) = G_UADDLP [[CTPOP]]
353
+ ; CHECK-NEXT: $q0 = COPY [[UADDLP ]](<8 x s16>)
334
354
; CHECK-NEXT: RET_ReallyLR implicit $q0
335
355
;
336
356
; CHECK-CSSC-LABEL: name: custom_8x16
@@ -339,8 +359,8 @@ body: |
339
359
; CHECK-CSSC-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
340
360
; CHECK-CSSC-NEXT: [[BITCAST:%[0-9]+]]:_(<16 x s8>) = G_BITCAST [[COPY]](<8 x s16>)
341
361
; CHECK-CSSC-NEXT: [[CTPOP:%[0-9]+]]:_(<16 x s8>) = G_CTPOP [[BITCAST]](<16 x s8>)
342
- ; CHECK-CSSC-NEXT: [[INT :%[0-9]+]]:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<16 x s8>)
343
- ; CHECK-CSSC-NEXT: $q0 = COPY [[INT ]](<8 x s16>)
362
+ ; CHECK-CSSC-NEXT: [[UADDLP :%[0-9]+]]:_(<8 x s16>) = G_UADDLP [[CTPOP]]
363
+ ; CHECK-CSSC-NEXT: $q0 = COPY [[UADDLP ]](<8 x s16>)
344
364
; CHECK-CSSC-NEXT: RET_ReallyLR implicit $q0
345
365
%0:_(<8 x s16>) = COPY $q0
346
366
%1:_(<8 x s16>) = G_CTPOP %0(<8 x s16>)
@@ -361,9 +381,9 @@ body: |
361
381
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
362
382
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<16 x s8>) = G_BITCAST [[COPY]](<4 x s32>)
363
383
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<16 x s8>) = G_CTPOP [[BITCAST]](<16 x s8>)
364
- ; CHECK-NEXT: [[INT :%[0-9]+]]:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<16 x s8>)
365
- ; CHECK-NEXT: [[INT1 :%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[INT]](<8 x s16>)
366
- ; CHECK-NEXT: $q0 = COPY [[INT1 ]](<4 x s32>)
384
+ ; CHECK-NEXT: [[UADDLP :%[0-9]+]]:_(<8 x s16>) = G_UADDLP [[CTPOP]]
385
+ ; CHECK-NEXT: [[UADDLP1 :%[0-9]+]]:_(<4 x s32>) = G_UADDLP [[UADDLP]]
386
+ ; CHECK-NEXT: $q0 = COPY [[UADDLP1 ]](<4 x s32>)
367
387
; CHECK-NEXT: RET_ReallyLR implicit $q0
368
388
;
369
389
; CHECK-CSSC-LABEL: name: custom_4x32
@@ -372,9 +392,9 @@ body: |
372
392
; CHECK-CSSC-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
373
393
; CHECK-CSSC-NEXT: [[BITCAST:%[0-9]+]]:_(<16 x s8>) = G_BITCAST [[COPY]](<4 x s32>)
374
394
; CHECK-CSSC-NEXT: [[CTPOP:%[0-9]+]]:_(<16 x s8>) = G_CTPOP [[BITCAST]](<16 x s8>)
375
- ; CHECK-CSSC-NEXT: [[INT :%[0-9]+]]:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<16 x s8>)
376
- ; CHECK-CSSC-NEXT: [[INT1 :%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[INT]](<8 x s16>)
377
- ; CHECK-CSSC-NEXT: $q0 = COPY [[INT1 ]](<4 x s32>)
395
+ ; CHECK-CSSC-NEXT: [[UADDLP :%[0-9]+]]:_(<8 x s16>) = G_UADDLP [[CTPOP]]
396
+ ; CHECK-CSSC-NEXT: [[UADDLP1 :%[0-9]+]]:_(<4 x s32>) = G_UADDLP [[UADDLP]]
397
+ ; CHECK-CSSC-NEXT: $q0 = COPY [[UADDLP1 ]](<4 x s32>)
378
398
; CHECK-CSSC-NEXT: RET_ReallyLR implicit $q0
379
399
%0:_(<4 x s32>) = COPY $q0
380
400
%1:_(<4 x s32>) = G_CTPOP %0(<4 x s32>)
@@ -395,10 +415,10 @@ body: |
395
415
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
396
416
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<16 x s8>) = G_BITCAST [[COPY]](<2 x s64>)
397
417
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<16 x s8>) = G_CTPOP [[BITCAST]](<16 x s8>)
398
- ; CHECK-NEXT: [[INT :%[0-9]+]]:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<16 x s8>)
399
- ; CHECK-NEXT: [[INT1 :%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[INT]](<8 x s16>)
400
- ; CHECK-NEXT: [[INT2 :%[0-9]+]]:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[INT1]](<4 x s32>)
401
- ; CHECK-NEXT: $q0 = COPY [[INT2 ]](<2 x s64>)
418
+ ; CHECK-NEXT: [[UADDLP :%[0-9]+]]:_(<8 x s16>) = G_UADDLP [[CTPOP]]
419
+ ; CHECK-NEXT: [[UADDLP1 :%[0-9]+]]:_(<4 x s32>) = G_UADDLP [[UADDLP]]
420
+ ; CHECK-NEXT: [[UADDLP2 :%[0-9]+]]:_(<2 x s64>) = G_UADDLP [[UADDLP1]]
421
+ ; CHECK-NEXT: $q0 = COPY [[UADDLP2 ]](<2 x s64>)
402
422
; CHECK-NEXT: RET_ReallyLR implicit $q0
403
423
;
404
424
; CHECK-CSSC-LABEL: name: custom_2x64
@@ -407,10 +427,10 @@ body: |
407
427
; CHECK-CSSC-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
408
428
; CHECK-CSSC-NEXT: [[BITCAST:%[0-9]+]]:_(<16 x s8>) = G_BITCAST [[COPY]](<2 x s64>)
409
429
; CHECK-CSSC-NEXT: [[CTPOP:%[0-9]+]]:_(<16 x s8>) = G_CTPOP [[BITCAST]](<16 x s8>)
410
- ; CHECK-CSSC-NEXT: [[INT :%[0-9]+]]:_(<8 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<16 x s8>)
411
- ; CHECK-CSSC-NEXT: [[INT1 :%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[INT]](<8 x s16>)
412
- ; CHECK-CSSC-NEXT: [[INT2 :%[0-9]+]]:_(<2 x s64>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[INT1]](<4 x s32>)
413
- ; CHECK-CSSC-NEXT: $q0 = COPY [[INT2 ]](<2 x s64>)
430
+ ; CHECK-CSSC-NEXT: [[UADDLP :%[0-9]+]]:_(<8 x s16>) = G_UADDLP [[CTPOP]]
431
+ ; CHECK-CSSC-NEXT: [[UADDLP1 :%[0-9]+]]:_(<4 x s32>) = G_UADDLP [[UADDLP]]
432
+ ; CHECK-CSSC-NEXT: [[UADDLP2 :%[0-9]+]]:_(<2 x s64>) = G_UADDLP [[UADDLP1]]
433
+ ; CHECK-CSSC-NEXT: $q0 = COPY [[UADDLP2 ]](<2 x s64>)
414
434
; CHECK-CSSC-NEXT: RET_ReallyLR implicit $q0
415
435
%0:_(<2 x s64>) = COPY $q0
416
436
%1:_(<2 x s64>) = G_CTPOP %0(<2 x s64>)
@@ -431,8 +451,8 @@ body: |
431
451
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
432
452
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST [[COPY]](<4 x s16>)
433
453
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
434
- ; CHECK-NEXT: [[INT :%[0-9]+]]:_(<4 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<8 x s8>)
435
- ; CHECK-NEXT: $d0 = COPY [[INT ]](<4 x s16>)
454
+ ; CHECK-NEXT: [[UADDLP :%[0-9]+]]:_(<4 x s16>) = G_UADDLP [[CTPOP]]
455
+ ; CHECK-NEXT: $d0 = COPY [[UADDLP ]](<4 x s16>)
436
456
; CHECK-NEXT: RET_ReallyLR implicit $d0
437
457
;
438
458
; CHECK-CSSC-LABEL: name: custom_4x16
@@ -441,8 +461,8 @@ body: |
441
461
; CHECK-CSSC-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
442
462
; CHECK-CSSC-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST [[COPY]](<4 x s16>)
443
463
; CHECK-CSSC-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
444
- ; CHECK-CSSC-NEXT: [[INT :%[0-9]+]]:_(<4 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<8 x s8>)
445
- ; CHECK-CSSC-NEXT: $d0 = COPY [[INT ]](<4 x s16>)
464
+ ; CHECK-CSSC-NEXT: [[UADDLP :%[0-9]+]]:_(<4 x s16>) = G_UADDLP [[CTPOP]]
465
+ ; CHECK-CSSC-NEXT: $d0 = COPY [[UADDLP ]](<4 x s16>)
446
466
; CHECK-CSSC-NEXT: RET_ReallyLR implicit $d0
447
467
%0:_(<4 x s16>) = COPY $d0
448
468
%1:_(<4 x s16>) = G_CTPOP %0(<4 x s16>)
@@ -463,9 +483,9 @@ body: |
463
483
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
464
484
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST [[COPY]](<2 x s32>)
465
485
; CHECK-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
466
- ; CHECK-NEXT: [[INT :%[0-9]+]]:_(<4 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<8 x s8>)
467
- ; CHECK-NEXT: [[INT1 :%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[INT]](<4 x s16>)
468
- ; CHECK-NEXT: $d0 = COPY [[INT1 ]](<2 x s32>)
486
+ ; CHECK-NEXT: [[UADDLP :%[0-9]+]]:_(<4 x s16>) = G_UADDLP [[CTPOP]]
487
+ ; CHECK-NEXT: [[UADDLP1 :%[0-9]+]]:_(<2 x s32>) = G_UADDLP [[UADDLP]]
488
+ ; CHECK-NEXT: $d0 = COPY [[UADDLP1 ]](<2 x s32>)
469
489
; CHECK-NEXT: RET_ReallyLR implicit $d0
470
490
;
471
491
; CHECK-CSSC-LABEL: name: custom_2x32
@@ -474,9 +494,9 @@ body: |
474
494
; CHECK-CSSC-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
475
495
; CHECK-CSSC-NEXT: [[BITCAST:%[0-9]+]]:_(<8 x s8>) = G_BITCAST [[COPY]](<2 x s32>)
476
496
; CHECK-CSSC-NEXT: [[CTPOP:%[0-9]+]]:_(<8 x s8>) = G_CTPOP [[BITCAST]](<8 x s8>)
477
- ; CHECK-CSSC-NEXT: [[INT :%[0-9]+]]:_(<4 x s16>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[CTPOP]](<8 x s8>)
478
- ; CHECK-CSSC-NEXT: [[INT1 :%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlp), [[INT]](<4 x s16>)
479
- ; CHECK-CSSC-NEXT: $d0 = COPY [[INT1 ]](<2 x s32>)
497
+ ; CHECK-CSSC-NEXT: [[UADDLP :%[0-9]+]]:_(<4 x s16>) = G_UADDLP [[CTPOP]]
498
+ ; CHECK-CSSC-NEXT: [[UADDLP1 :%[0-9]+]]:_(<2 x s32>) = G_UADDLP [[UADDLP]]
499
+ ; CHECK-CSSC-NEXT: $d0 = COPY [[UADDLP1 ]](<2 x s32>)
480
500
; CHECK-CSSC-NEXT: RET_ReallyLR implicit $d0
481
501
%0:_(<2 x s32>) = COPY $d0
482
502
%1:_(<2 x s32>) = G_CTPOP %0(<2 x s32>)
0 commit comments