Skip to content

Commit f6ae0d3

Browse files
committed
[CodeGen] Pre-commit test case related to ComputeNumSignBits for SHL (#97695)
Adding test cases aiming at showing possibility to look through ZERO_EXTEND/ANY_EXTEND when computing number of sign bits for an SHL node. If all extended bits are shifted out we can analyze the operand that is extended.
1 parent e70f376 commit f6ae0d3

File tree

1 file changed

+191
-0
lines changed

1 file changed

+191
-0
lines changed
Lines changed: 191 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,191 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2+
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64
3+
4+
; Verify that we can look through a ZERO_EXTEND/ANY_EXTEND when doing
5+
; ComputeNumSignBits for SHL.
6+
; We use the (sshlsat x, c) -> (shl x, c) fold as verification.
7+
; That fold should happen if c is less than the number of sign bits in x
8+
9+
define void @computeNumSignBits_shl_zext_1(i8 %x, ptr %p) nounwind {
10+
; X64-LABEL: computeNumSignBits_shl_zext_1:
11+
; X64: # %bb.0:
12+
; X64-NEXT: sarb $5, %dil
13+
; X64-NEXT: movzbl %dil, %eax
14+
; X64-NEXT: movl %eax, %ecx
15+
; X64-NEXT: shll $10, %ecx
16+
; X64-NEXT: xorl %edx, %edx
17+
; X64-NEXT: testw %cx, %cx
18+
; X64-NEXT: sets %dl
19+
; X64-NEXT: addl $32767, %edx # imm = 0x7FFF
20+
; X64-NEXT: movl %eax, %edi
21+
; X64-NEXT: shll $11, %edi
22+
; X64-NEXT: movswl %di, %r8d
23+
; X64-NEXT: shrl %r8d
24+
; X64-NEXT: cmpw %r8w, %cx
25+
; X64-NEXT: cmovnel %edx, %edi
26+
; X64-NEXT: movw %di, (%rsi)
27+
; X64-NEXT: movl %eax, %edi
28+
; X64-NEXT: shll $12, %edi
29+
; X64-NEXT: movswl %di, %r8d
30+
; X64-NEXT: shrl $2, %r8d
31+
; X64-NEXT: cmpw %r8w, %cx
32+
; X64-NEXT: cmovnel %edx, %edi
33+
; X64-NEXT: movw %di, (%rsi)
34+
; X64-NEXT: shll $13, %eax
35+
; X64-NEXT: movswl %ax, %edi
36+
; X64-NEXT: shrl $3, %edi
37+
; X64-NEXT: cmpw %di, %cx
38+
; X64-NEXT: cmovnel %edx, %eax
39+
; X64-NEXT: movw %ax, (%rsi)
40+
; X64-NEXT: retq
41+
%ashr = ashr i8 %x, 5
42+
%zext = zext i8 %ashr to i16
43+
%nsb4 = shl i16 %zext, 10
44+
; Expecting (sshlsat x, c) -> (shl x, c) fold.
45+
%tmp1 = call i16 @llvm.sshl.sat.i16(i16 %nsb4, i16 1)
46+
store volatile i16 %tmp1, ptr %p
47+
; Expecting (sshlsat x, c) -> (shl x, c) fold.
48+
%tmp2 = call i16 @llvm.sshl.sat.i16(i16 %nsb4, i16 2)
49+
store volatile i16 %tmp2, ptr %p
50+
; Expecting (sshlsat x, c) -> (shl x, c) fold.
51+
%tmp3 = call i16 @llvm.sshl.sat.i16(i16 %nsb4, i16 3)
52+
store volatile i16 %tmp3, ptr %p
53+
ret void
54+
}
55+
56+
define void @computeNumSignBits_shl_zext_2(i8 %x, ptr %p) nounwind {
57+
; X64-LABEL: computeNumSignBits_shl_zext_2:
58+
; X64: # %bb.0:
59+
; X64-NEXT: sarb $5, %dil
60+
; X64-NEXT: movzbl %dil, %eax
61+
; X64-NEXT: movl %eax, %ecx
62+
; X64-NEXT: shll $10, %ecx
63+
; X64-NEXT: xorl %edx, %edx
64+
; X64-NEXT: testw %cx, %cx
65+
; X64-NEXT: sets %dl
66+
; X64-NEXT: addl $32767, %edx # imm = 0x7FFF
67+
; X64-NEXT: shll $14, %eax
68+
; X64-NEXT: movswl %ax, %edi
69+
; X64-NEXT: shrl $4, %edi
70+
; X64-NEXT: cmpw %di, %cx
71+
; X64-NEXT: cmovnel %edx, %eax
72+
; X64-NEXT: movw %ax, (%rsi)
73+
; X64-NEXT: retq
74+
%ashr = ashr i8 %x, 5
75+
%zext = zext i8 %ashr to i16
76+
%nsb4 = shl i16 %zext, 10
77+
; 4 sign bits. Not expecting (sshlsat x, c) -> (shl x, c) fold.
78+
%tmp4 = call i16 @llvm.sshl.sat.i16(i16 %nsb4, i16 4)
79+
store volatile i16 %tmp4, ptr %p
80+
ret void
81+
}
82+
83+
define void @computeNumSignBits_shl_zext_vec_1(<2 x i8> %x, ptr %p) nounwind {
84+
; X64-LABEL: computeNumSignBits_shl_zext_vec_1:
85+
; X64: # %bb.0:
86+
; X64-NEXT: psrlw $5, %xmm0
87+
; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
88+
; X64-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
89+
; X64-NEXT: pxor %xmm1, %xmm0
90+
; X64-NEXT: psubb %xmm1, %xmm0
91+
; X64-NEXT: pxor %xmm1, %xmm1
92+
; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
93+
; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1024,4096,u,u,u,u,u,u]
94+
; X64-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
95+
; X64-NEXT: pand %xmm0, %xmm2
96+
; X64-NEXT: pcmpgtw %xmm0, %xmm1
97+
; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
98+
; X64-NEXT: por %xmm2, %xmm1
99+
; X64-NEXT: movdqa %xmm0, %xmm2
100+
; X64-NEXT: paddw %xmm0, %xmm2
101+
; X64-NEXT: movdqa %xmm2, %xmm3
102+
; X64-NEXT: psraw $1, %xmm3
103+
; X64-NEXT: pcmpeqw %xmm0, %xmm3
104+
; X64-NEXT: movdqa %xmm3, %xmm0
105+
; X64-NEXT: pandn %xmm1, %xmm0
106+
; X64-NEXT: pand %xmm2, %xmm3
107+
; X64-NEXT: por %xmm0, %xmm3
108+
; X64-NEXT: movd %xmm3, (%rdi)
109+
; X64-NEXT: retq
110+
%ashr = ashr <2 x i8> %x, <i8 5, i8 5>
111+
%zext = zext <2 x i8> %ashr to <2 x i16>
112+
%nsb4_2 = shl <2 x i16> %zext, <i16 10, i16 12>
113+
; Expecting (sshlsat x, c) -> (shl x, c) fold.
114+
%tmp1 = call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> %nsb4_2, <2 x i16> <i16 1, i16 1>)
115+
store volatile <2 x i16> %tmp1, ptr %p
116+
ret void
117+
}
118+
119+
define void @computeNumSignBits_shl_zext_vec_2(<2 x i8> %x, ptr %p) nounwind {
120+
; X64-LABEL: computeNumSignBits_shl_zext_vec_2:
121+
; X64: # %bb.0:
122+
; X64-NEXT: psrlw $5, %xmm0
123+
; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
124+
; X64-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
125+
; X64-NEXT: pxor %xmm1, %xmm0
126+
; X64-NEXT: psubb %xmm1, %xmm0
127+
; X64-NEXT: pxor %xmm1, %xmm1
128+
; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
129+
; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [1024,4096,u,u,u,u,u,u]
130+
; X64-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
131+
; X64-NEXT: pand %xmm0, %xmm2
132+
; X64-NEXT: pcmpgtw %xmm0, %xmm1
133+
; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
134+
; X64-NEXT: por %xmm2, %xmm1
135+
; X64-NEXT: movdqa %xmm0, %xmm2
136+
; X64-NEXT: psllw $2, %xmm2
137+
; X64-NEXT: movdqa %xmm2, %xmm3
138+
; X64-NEXT: psraw $2, %xmm3
139+
; X64-NEXT: pcmpeqw %xmm0, %xmm3
140+
; X64-NEXT: movdqa %xmm3, %xmm0
141+
; X64-NEXT: pandn %xmm1, %xmm0
142+
; X64-NEXT: pand %xmm2, %xmm3
143+
; X64-NEXT: por %xmm0, %xmm3
144+
; X64-NEXT: movd %xmm3, (%rdi)
145+
; X64-NEXT: retq
146+
%ashr = ashr <2 x i8> %x, <i8 5, i8 5>
147+
%zext = zext <2 x i8> %ashr to <2 x i16>
148+
%nsb4_2 = shl <2 x i16> %zext, <i16 10, i16 12>
149+
; Not expecting (sshlsat x, c) -> (shl x, c) fold.
150+
; Because only 2 sign bits in element 1.
151+
%tmp1 = call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> %nsb4_2, <2 x i16> <i16 2, i16 2>)
152+
store volatile <2 x i16> %tmp1, ptr %p
153+
ret void
154+
}
155+
156+
define void @computeNumSignBits_shl_zext_vec_3(<2 x i8> %x, ptr %p) nounwind {
157+
; X64-LABEL: computeNumSignBits_shl_zext_vec_3:
158+
; X64: # %bb.0:
159+
; X64-NEXT: psrlw $5, %xmm0
160+
; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
161+
; X64-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
162+
; X64-NEXT: pxor %xmm1, %xmm0
163+
; X64-NEXT: psubb %xmm1, %xmm0
164+
; X64-NEXT: pxor %xmm1, %xmm1
165+
; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
166+
; X64-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 # [16384,4096,u,u,u,u,u,u]
167+
; X64-NEXT: movdqa {{.*#+}} xmm2 = [32768,32768,32768,32768,32768,32768,32768,32768]
168+
; X64-NEXT: pand %xmm0, %xmm2
169+
; X64-NEXT: pcmpgtw %xmm0, %xmm1
170+
; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
171+
; X64-NEXT: por %xmm2, %xmm1
172+
; X64-NEXT: movdqa %xmm0, %xmm2
173+
; X64-NEXT: paddw %xmm0, %xmm2
174+
; X64-NEXT: movdqa %xmm2, %xmm3
175+
; X64-NEXT: psraw $1, %xmm3
176+
; X64-NEXT: pcmpeqw %xmm0, %xmm3
177+
; X64-NEXT: movdqa %xmm3, %xmm0
178+
; X64-NEXT: pandn %xmm1, %xmm0
179+
; X64-NEXT: pand %xmm2, %xmm3
180+
; X64-NEXT: por %xmm0, %xmm3
181+
; X64-NEXT: movd %xmm3, (%rdi)
182+
; X64-NEXT: retq
183+
%ashr = ashr <2 x i8> %x, <i8 5, i8 5>
184+
%zext = zext <2 x i8> %ashr to <2 x i16>
185+
%nsb1_2 = shl <2 x i16> %zext, <i16 14, i16 12>
186+
; Not expecting (sshlsat x, c) -> (shl x, c) fold.
187+
; Because all sign bits shifted out for element 0
188+
%tmp1 = call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> %nsb1_2, <2 x i16> <i16 1, i16 1>)
189+
store volatile <2 x i16> %tmp1, ptr %p
190+
ret void
191+
}

0 commit comments

Comments
 (0)