Skip to content

Commit 4153f17

Browse files
committed
[InstSimplify][NFC] Tests for skipping 'div-by-0' checks before inverted @llvm.umul.with.overflow
It would be already handled by the non-inverted case if we were hoisting the `not` in InstCombine, but we don't (granted, we don't sink it in this case either), so this is a separate case. llvm-svn: 366801
1 parent 87fdcb8 commit 4153f17

File tree

2 files changed

+212
-0
lines changed

2 files changed

+212
-0
lines changed
Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2+
; RUN: opt %s -instsimplify -S | FileCheck %s
3+
4+
declare { i4, i1 } @llvm.smul.with.overflow.i4(i4, i4) #1
5+
6+
define i1 @t0_umul(i4 %size, i4 %nmemb) {
7+
; CHECK-LABEL: @t0_umul(
8+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
9+
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
10+
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
11+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
12+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
13+
; CHECK-NEXT: ret i1 [[OR]]
14+
;
15+
%cmp = icmp eq i4 %size, 0
16+
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
17+
%smul.ov = extractvalue { i4, i1 } %smul, 1
18+
%phitmp = xor i1 %smul.ov, true
19+
%or = or i1 %cmp, %phitmp
20+
ret i1 %or
21+
}
22+
23+
define i1 @t1_commutative(i4 %size, i4 %nmemb) {
24+
; CHECK-LABEL: @t1_commutative(
25+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
26+
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
27+
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
28+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
29+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[PHITMP]], [[CMP]]
30+
; CHECK-NEXT: ret i1 [[OR]]
31+
;
32+
%cmp = icmp eq i4 %size, 0
33+
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
34+
%smul.ov = extractvalue { i4, i1 } %smul, 1
35+
%phitmp = xor i1 %smul.ov, true
36+
%or = or i1 %phitmp, %cmp ; swapped
37+
ret i1 %or
38+
}
39+
40+
define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
41+
; CHECK-LABEL: @n2_wrong_size(
42+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE1:%.*]], 0
43+
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]])
44+
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
45+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
46+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
47+
; CHECK-NEXT: ret i1 [[OR]]
48+
;
49+
%cmp = icmp eq i4 %size1, 0 ; not %size0
50+
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size0, i4 %nmemb)
51+
%smul.ov = extractvalue { i4, i1 } %smul, 1
52+
%phitmp = xor i1 %smul.ov, true
53+
%or = or i1 %cmp, %phitmp
54+
ret i1 %or
55+
}
56+
57+
define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
58+
; CHECK-LABEL: @n3_wrong_pred(
59+
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
60+
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
61+
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
62+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
63+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
64+
; CHECK-NEXT: ret i1 [[OR]]
65+
;
66+
%cmp = icmp ne i4 %size, 0 ; not 'eq'
67+
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
68+
%smul.ov = extractvalue { i4, i1 } %smul, 1
69+
%phitmp = xor i1 %smul.ov, true
70+
%or = or i1 %cmp, %phitmp
71+
ret i1 %or
72+
}
73+
74+
define i1 @n4_not_and(i4 %size, i4 %nmemb) {
75+
; CHECK-LABEL: @n4_not_and(
76+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
77+
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
78+
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
79+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
80+
; CHECK-NEXT: [[OR:%.*]] = and i1 [[CMP]], [[PHITMP]]
81+
; CHECK-NEXT: ret i1 [[OR]]
82+
;
83+
%cmp = icmp eq i4 %size, 0
84+
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
85+
%smul.ov = extractvalue { i4, i1 } %smul, 1
86+
%phitmp = xor i1 %smul.ov, true
87+
%or = and i1 %cmp, %phitmp ; not 'or'
88+
ret i1 %or
89+
}
90+
91+
define i1 @n5_not_zero(i4 %size, i4 %nmemb) {
92+
; CHECK-LABEL: @n5_not_zero(
93+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 1
94+
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
95+
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
96+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
97+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
98+
; CHECK-NEXT: ret i1 [[OR]]
99+
;
100+
%cmp = icmp eq i4 %size, 1 ; should be '0'
101+
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
102+
%smul.ov = extractvalue { i4, i1 } %smul, 1
103+
%phitmp = xor i1 %smul.ov, true
104+
%or = or i1 %cmp, %phitmp
105+
ret i1 %or
106+
}
Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2+
; RUN: opt %s -instsimplify -S | FileCheck %s
3+
4+
declare { i4, i1 } @llvm.umul.with.overflow.i4(i4, i4) #1
5+
6+
define i1 @t0_umul(i4 %size, i4 %nmemb) {
7+
; CHECK-LABEL: @t0_umul(
8+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
9+
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
10+
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
11+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
12+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
13+
; CHECK-NEXT: ret i1 [[OR]]
14+
;
15+
%cmp = icmp eq i4 %size, 0
16+
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
17+
%umul.ov = extractvalue { i4, i1 } %umul, 1
18+
%phitmp = xor i1 %umul.ov, true
19+
%or = or i1 %cmp, %phitmp
20+
ret i1 %or
21+
}
22+
23+
define i1 @t1_commutative(i4 %size, i4 %nmemb) {
24+
; CHECK-LABEL: @t1_commutative(
25+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
26+
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
27+
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
28+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
29+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[PHITMP]], [[CMP]]
30+
; CHECK-NEXT: ret i1 [[OR]]
31+
;
32+
%cmp = icmp eq i4 %size, 0
33+
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
34+
%umul.ov = extractvalue { i4, i1 } %umul, 1
35+
%phitmp = xor i1 %umul.ov, true
36+
%or = or i1 %phitmp, %cmp ; swapped
37+
ret i1 %or
38+
}
39+
40+
define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
41+
; CHECK-LABEL: @n2_wrong_size(
42+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE1:%.*]], 0
43+
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]])
44+
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
45+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
46+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
47+
; CHECK-NEXT: ret i1 [[OR]]
48+
;
49+
%cmp = icmp eq i4 %size1, 0 ; not %size0
50+
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size0, i4 %nmemb)
51+
%umul.ov = extractvalue { i4, i1 } %umul, 1
52+
%phitmp = xor i1 %umul.ov, true
53+
%or = or i1 %cmp, %phitmp
54+
ret i1 %or
55+
}
56+
57+
define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
58+
; CHECK-LABEL: @n3_wrong_pred(
59+
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
60+
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
61+
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
62+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
63+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
64+
; CHECK-NEXT: ret i1 [[OR]]
65+
;
66+
%cmp = icmp ne i4 %size, 0 ; not 'eq'
67+
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
68+
%umul.ov = extractvalue { i4, i1 } %umul, 1
69+
%phitmp = xor i1 %umul.ov, true
70+
%or = or i1 %cmp, %phitmp
71+
ret i1 %or
72+
}
73+
74+
define i1 @n4_not_and(i4 %size, i4 %nmemb) {
75+
; CHECK-LABEL: @n4_not_and(
76+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
77+
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
78+
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
79+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
80+
; CHECK-NEXT: [[OR:%.*]] = and i1 [[CMP]], [[PHITMP]]
81+
; CHECK-NEXT: ret i1 [[OR]]
82+
;
83+
%cmp = icmp eq i4 %size, 0
84+
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
85+
%umul.ov = extractvalue { i4, i1 } %umul, 1
86+
%phitmp = xor i1 %umul.ov, true
87+
%or = and i1 %cmp, %phitmp ; not 'or'
88+
ret i1 %or
89+
}
90+
91+
define i1 @n5_not_zero(i4 %size, i4 %nmemb) {
92+
; CHECK-LABEL: @n5_not_zero(
93+
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 1
94+
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
95+
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
96+
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
97+
; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP]], [[PHITMP]]
98+
; CHECK-NEXT: ret i1 [[OR]]
99+
;
100+
%cmp = icmp eq i4 %size, 1 ; should be '0'
101+
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
102+
%umul.ov = extractvalue { i4, i1 } %umul, 1
103+
%phitmp = xor i1 %umul.ov, true
104+
%or = or i1 %cmp, %phitmp
105+
ret i1 %or
106+
}

0 commit comments

Comments
 (0)