@@ -282,39 +282,17 @@ define <4 x i32> @combine_vec_mul_add(<4 x i32> %x) {
282
282
ret <4 x i32 > %2
283
283
}
284
284
285
- ; TODO fold Y = sra (X, size(X)-1); mul (or (Y, 1), X) -> (abs X)
285
+ ; fold Y = sra (X, size(X)-1); mul (or (Y, 1), X) -> (abs X)
286
286
287
287
define <16 x i8 > @combine_mul_to_abs_v16i8 (<16 x i8 > %x ) {
288
288
; SSE-LABEL: combine_mul_to_abs_v16i8:
289
289
; SSE: # %bb.0:
290
- ; SSE-NEXT: pxor %xmm2, %xmm2
291
- ; SSE-NEXT: pcmpgtb %xmm0, %xmm2
292
- ; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
293
- ; SSE-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
294
- ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
295
- ; SSE-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
296
- ; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
297
- ; SSE-NEXT: pmullw %xmm0, %xmm2
298
- ; SSE-NEXT: pmovzxbw {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
299
- ; SSE-NEXT: pand %xmm0, %xmm2
300
- ; SSE-NEXT: pmullw %xmm3, %xmm1
301
- ; SSE-NEXT: pand %xmm0, %xmm1
302
- ; SSE-NEXT: packuswb %xmm2, %xmm1
303
- ; SSE-NEXT: movdqa %xmm1, %xmm0
290
+ ; SSE-NEXT: pabsb %xmm0, %xmm0
304
291
; SSE-NEXT: retq
305
292
;
306
293
; AVX-LABEL: combine_mul_to_abs_v16i8:
307
294
; AVX: # %bb.0:
308
- ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
309
- ; AVX-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm1
310
- ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
311
- ; AVX-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
312
- ; AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
313
- ; AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0
314
- ; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
315
- ; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
316
- ; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
317
- ; AVX-NEXT: vzeroupper
295
+ ; AVX-NEXT: vpabsb %xmm0, %xmm0
318
296
; AVX-NEXT: retq
319
297
%s = ashr <16 x i8 > %x , <i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 , i8 7 >
320
298
%o = or <16 x i8 > %s , <i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 , i8 1 >
@@ -325,34 +303,16 @@ define <16 x i8> @combine_mul_to_abs_v16i8(<16 x i8> %x) {
325
303
define <2 x i64 > @combine_mul_to_abs_v2i64 (<2 x i64 > %x ) {
326
304
; SSE-LABEL: combine_mul_to_abs_v2i64:
327
305
; SSE: # %bb.0:
328
- ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
329
- ; SSE-NEXT: psrad $31, %xmm1
330
- ; SSE-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
331
- ; SSE-NEXT: movdqa %xmm0, %xmm2
332
- ; SSE-NEXT: psrlq $32, %xmm2
333
- ; SSE-NEXT: pmuludq %xmm1, %xmm2
334
- ; SSE-NEXT: movdqa %xmm1, %xmm3
335
- ; SSE-NEXT: psrlq $32, %xmm3
336
- ; SSE-NEXT: pmuludq %xmm0, %xmm3
337
- ; SSE-NEXT: paddq %xmm2, %xmm3
338
- ; SSE-NEXT: psllq $32, %xmm3
339
- ; SSE-NEXT: pmuludq %xmm1, %xmm0
340
- ; SSE-NEXT: paddq %xmm3, %xmm0
306
+ ; SSE-NEXT: pxor %xmm1, %xmm1
307
+ ; SSE-NEXT: psubq %xmm0, %xmm1
308
+ ; SSE-NEXT: blendvpd %xmm0, %xmm1, %xmm0
341
309
; SSE-NEXT: retq
342
310
;
343
311
; AVX-LABEL: combine_mul_to_abs_v2i64:
344
312
; AVX: # %bb.0:
345
313
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
346
- ; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1
347
- ; AVX-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
348
- ; AVX-NEXT: vpsrlq $32, %xmm0, %xmm2
349
- ; AVX-NEXT: vpmuludq %xmm1, %xmm2, %xmm2
350
- ; AVX-NEXT: vpsrlq $32, %xmm1, %xmm3
351
- ; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
352
- ; AVX-NEXT: vpaddq %xmm2, %xmm3, %xmm2
353
- ; AVX-NEXT: vpsllq $32, %xmm2, %xmm2
354
- ; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
355
- ; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0
314
+ ; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm1
315
+ ; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
356
316
; AVX-NEXT: retq
357
317
%s = ashr <2 x i64 > %x , <i64 63 , i64 63 >
358
318
%o = or <2 x i64 > %s , <i64 1 , i64 1 >
0 commit comments