1
1
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2
- ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix= SSE
3
- ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix= AVX
2
+ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK, SSE
3
+ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK, AVX
4
4
5
5
declare {i32 , i1 } @llvm.smul.with.overflow.i32 (i32 , i32 ) nounwind readnone
6
6
declare {i32 , i1 } @llvm.umul.with.overflow.i32 (i32 , i32 ) nounwind readnone
@@ -10,19 +10,12 @@ declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32>, <4 x i32
10
10
11
11
; fold (smulo x, 2) -> (saddo x, x)
12
12
define i32 @combine_smul_two (i32 %a0 , i32 %a1 ) {
13
- ; SSE-LABEL: combine_smul_two:
14
- ; SSE: # %bb.0:
15
- ; SSE-NEXT: movl %edi, %eax
16
- ; SSE-NEXT: addl %edi, %eax
17
- ; SSE-NEXT: cmovol %esi, %eax
18
- ; SSE-NEXT: retq
19
- ;
20
- ; AVX-LABEL: combine_smul_two:
21
- ; AVX: # %bb.0:
22
- ; AVX-NEXT: movl %edi, %eax
23
- ; AVX-NEXT: addl %edi, %eax
24
- ; AVX-NEXT: cmovol %esi, %eax
25
- ; AVX-NEXT: retq
13
+ ; CHECK-LABEL: combine_smul_two:
14
+ ; CHECK: # %bb.0:
15
+ ; CHECK-NEXT: movl %edi, %eax
16
+ ; CHECK-NEXT: addl %edi, %eax
17
+ ; CHECK-NEXT: cmovol %esi, %eax
18
+ ; CHECK-NEXT: retq
26
19
%1 = call {i32 , i1 } @llvm.smul.with.overflow.i32 (i32 %a0 , i32 2 )
27
20
%2 = extractvalue {i32 , i1 } %1 , 0
28
21
%3 = extractvalue {i32 , i1 } %1 , 1
@@ -58,19 +51,12 @@ define <4 x i32> @combine_vec_smul_two(<4 x i32> %a0, <4 x i32> %a1) {
58
51
59
52
; fold (umulo x, 2) -> (uaddo x, x)
60
53
define i32 @combine_umul_two (i32 %a0 , i32 %a1 ) {
61
- ; SSE-LABEL: combine_umul_two:
62
- ; SSE: # %bb.0:
63
- ; SSE-NEXT: movl %edi, %eax
64
- ; SSE-NEXT: addl %edi, %eax
65
- ; SSE-NEXT: cmovbl %esi, %eax
66
- ; SSE-NEXT: retq
67
- ;
68
- ; AVX-LABEL: combine_umul_two:
69
- ; AVX: # %bb.0:
70
- ; AVX-NEXT: movl %edi, %eax
71
- ; AVX-NEXT: addl %edi, %eax
72
- ; AVX-NEXT: cmovbl %esi, %eax
73
- ; AVX-NEXT: retq
54
+ ; CHECK-LABEL: combine_umul_two:
55
+ ; CHECK: # %bb.0:
56
+ ; CHECK-NEXT: movl %edi, %eax
57
+ ; CHECK-NEXT: addl %edi, %eax
58
+ ; CHECK-NEXT: cmovbl %esi, %eax
59
+ ; CHECK-NEXT: retq
74
60
%1 = call {i32 , i1 } @llvm.umul.with.overflow.i32 (i32 %a0 , i32 2 )
75
61
%2 = extractvalue {i32 , i1 } %1 , 0
76
62
%3 = extractvalue {i32 , i1 } %1 , 1
0 commit comments