Skip to content

Commit a8913f8

Browse files
committed
[X86] Regenerate pr38539.ll
Even though we're only interested in the X64 codegen for the first test, its much easier to maintain if we just let the update script generate the codegen checks for X86 as well.
1 parent d1e3d32 commit a8913f8

File tree

1 file changed

+238
-6
lines changed

1 file changed

+238
-6
lines changed

llvm/test/CodeGen/X86/pr38539.ll

Lines changed: 238 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
; RUN: llc < %s -mtriple=i686-unknown -verify-machineinstrs | FileCheck %s --check-prefix=X86
44

55
; This test is targeted at 64-bit mode. It used to crash due to the creation of an EXTRACT_SUBREG after the peephole pass had ran.
6-
define void @f() {
6+
define void @f() nounwind {
77
; X64-LABEL: f:
88
; X64: # %bb.0: # %BB
99
; X64-NEXT: movzbl (%rax), %eax
@@ -13,6 +13,242 @@ define void @f() {
1313
; X64-NEXT: movq %rax, (%rax)
1414
; X64-NEXT: movb $0, (%rax)
1515
; X64-NEXT: retq
16+
;
17+
; X86-LABEL: f:
18+
; X86: # %bb.0: # %BB_udiv-special-cases
19+
; X86-NEXT: pushl %ebp
20+
; X86-NEXT: movl %esp, %ebp
21+
; X86-NEXT: pushl %ebx
22+
; X86-NEXT: pushl %edi
23+
; X86-NEXT: pushl %esi
24+
; X86-NEXT: andl $-16, %esp
25+
; X86-NEXT: subl $176, %esp
26+
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
27+
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
28+
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
29+
; X86-NEXT: movzbl (%eax), %eax
30+
; X86-NEXT: movzbl (%eax), %ecx
31+
; X86-NEXT: movzbl %al, %eax
32+
; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
33+
; X86-NEXT: divb %cl
34+
; X86-NEXT: movl %edx, %eax
35+
; X86-NEXT: shll $30, %eax
36+
; X86-NEXT: movl %eax, %ecx
37+
; X86-NEXT: sarl $30, %ecx
38+
; X86-NEXT: sarl $31, %eax
39+
; X86-NEXT: shrdl $1, %eax, %ecx
40+
; X86-NEXT: xorl %eax, %edx
41+
; X86-NEXT: xorl %eax, %edi
42+
; X86-NEXT: xorl %ecx, %esi
43+
; X86-NEXT: subl %ecx, %esi
44+
; X86-NEXT: sbbl %eax, %edi
45+
; X86-NEXT: sbbl %eax, %edx
46+
; X86-NEXT: andl $3, %edx
47+
; X86-NEXT: testl %edi, %edi
48+
; X86-NEXT: jne .LBB0_1
49+
; X86-NEXT: # %bb.2: # %BB_udiv-special-cases
50+
; X86-NEXT: bsrl %esi, %ecx
51+
; X86-NEXT: xorl $31, %ecx
52+
; X86-NEXT: addl $32, %ecx
53+
; X86-NEXT: jmp .LBB0_3
54+
; X86-NEXT: .LBB0_1:
55+
; X86-NEXT: bsrl %edi, %ecx
56+
; X86-NEXT: xorl $31, %ecx
57+
; X86-NEXT: .LBB0_3: # %BB_udiv-special-cases
58+
; X86-NEXT: xorl %eax, %eax
59+
; X86-NEXT: testl %edx, %edx
60+
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
61+
; X86-NEXT: jne .LBB0_4
62+
; X86-NEXT: # %bb.5: # %BB_udiv-special-cases
63+
; X86-NEXT: addl $64, %ecx
64+
; X86-NEXT: jmp .LBB0_6
65+
; X86-NEXT: .LBB0_4:
66+
; X86-NEXT: bsrl %edx, %ecx
67+
; X86-NEXT: xorl $31, %ecx
68+
; X86-NEXT: addl $32, %ecx
69+
; X86-NEXT: .LBB0_6: # %BB_udiv-special-cases
70+
; X86-NEXT: subl $62, %ecx
71+
; X86-NEXT: movl $0, %ebx
72+
; X86-NEXT: sbbl %ebx, %ebx
73+
; X86-NEXT: sbbl %eax, %eax
74+
; X86-NEXT: addl $-66, %ecx
75+
; X86-NEXT: adcl $-1, %ebx
76+
; X86-NEXT: adcl $3, %eax
77+
; X86-NEXT: movl %eax, %edi
78+
; X86-NEXT: movb $1, %al
79+
; X86-NEXT: testb %al, %al
80+
; X86-NEXT: jne .LBB0_11
81+
; X86-NEXT: # %bb.7: # %BB_udiv-special-cases
82+
; X86-NEXT: andl $3, %edi
83+
; X86-NEXT: movl %ecx, %eax
84+
; X86-NEXT: xorl $65, %eax
85+
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
86+
; X86-NEXT: orl %edi, %eax
87+
; X86-NEXT: orl %ebx, %eax
88+
; X86-NEXT: je .LBB0_11
89+
; X86-NEXT: # %bb.8: # %udiv-bb1
90+
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
91+
; X86-NEXT: movl %ecx, %eax
92+
; X86-NEXT: addl $1, %ecx
93+
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
94+
; X86-NEXT: adcl $0, %ebx
95+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
96+
; X86-NEXT: adcl $0, %esi
97+
; X86-NEXT: andl $3, %esi
98+
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
99+
; X86-NEXT: movb $65, %cl
100+
; X86-NEXT: subb %al, %cl
101+
; X86-NEXT: movb %cl, %ch
102+
; X86-NEXT: andb $7, %ch
103+
; X86-NEXT: shrb $3, %cl
104+
; X86-NEXT: andb $15, %cl
105+
; X86-NEXT: negb %cl
106+
; X86-NEXT: movsbl %cl, %eax
107+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
108+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
109+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
110+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
111+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
112+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
113+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
114+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
115+
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
116+
; X86-NEXT: movl 136(%esp,%eax), %edx
117+
; X86-NEXT: movb %ch, %cl
118+
; X86-NEXT: shll %cl, %edx
119+
; X86-NEXT: notb %cl
120+
; X86-NEXT: movl 128(%esp,%eax), %edi
121+
; X86-NEXT: movl 132(%esp,%eax), %esi
122+
; X86-NEXT: movl %esi, %eax
123+
; X86-NEXT: shrl %eax
124+
; X86-NEXT: shrl %cl, %eax
125+
; X86-NEXT: movb %ch, %cl
126+
; X86-NEXT: shldl %cl, %edi, %esi
127+
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
128+
; X86-NEXT: shll %cl, %edi
129+
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
130+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
131+
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
132+
; X86-NEXT: orl %ebx, %ecx
133+
; X86-NEXT: je .LBB0_11
134+
; X86-NEXT: # %bb.9: # %udiv-preheader
135+
; X86-NEXT: orl %eax, %edx
136+
; X86-NEXT: andl $3, %edx
137+
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
138+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
139+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
140+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
141+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
142+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
143+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
144+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
145+
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
146+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
147+
; X86-NEXT: movb %al, %ch
148+
; X86-NEXT: andb $7, %ch
149+
; X86-NEXT: # kill: def $al killed $al killed $eax
150+
; X86-NEXT: shrb $3, %al
151+
; X86-NEXT: andb $15, %al
152+
; X86-NEXT: movzbl %al, %esi
153+
; X86-NEXT: movl 80(%esp,%esi), %edx
154+
; X86-NEXT: movl 84(%esp,%esi), %eax
155+
; X86-NEXT: movl %eax, %edi
156+
; X86-NEXT: movb %ch, %cl
157+
; X86-NEXT: shrl %cl, %edi
158+
; X86-NEXT: notb %cl
159+
; X86-NEXT: movl 88(%esp,%esi), %esi
160+
; X86-NEXT: addl %esi, %esi
161+
; X86-NEXT: shll %cl, %esi
162+
; X86-NEXT: orl %edi, %esi
163+
; X86-NEXT: movb %ch, %cl
164+
; X86-NEXT: shrdl %cl, %eax, %edx
165+
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
166+
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
167+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
168+
; X86-NEXT: addl $-1, %eax
169+
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
170+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
171+
; X86-NEXT: adcl $-1, %eax
172+
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
173+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
174+
; X86-NEXT: adcl $3, %eax
175+
; X86-NEXT: andl $3, %eax
176+
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
177+
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
178+
; X86-NEXT: xorl %ecx, %ecx
179+
; X86-NEXT: .p2align 4, 0x90
180+
; X86-NEXT: .LBB0_10: # %udiv-do-while
181+
; X86-NEXT: # =>This Inner Loop Header: Depth=1
182+
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
183+
; X86-NEXT: shldl $1, %esi, %ecx
184+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
185+
; X86-NEXT: shldl $1, %edx, %esi
186+
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
187+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
188+
; X86-NEXT: movl %ebx, %eax
189+
; X86-NEXT: andl $2, %eax
190+
; X86-NEXT: shrl %eax
191+
; X86-NEXT: leal (%eax,%edx,2), %edx
192+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
193+
; X86-NEXT: shldl $1, %edi, %ebx
194+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
195+
; X86-NEXT: orl %esi, %ebx
196+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
197+
; X86-NEXT: shldl $1, %eax, %edi
198+
; X86-NEXT: orl %esi, %edi
199+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
200+
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
201+
; X86-NEXT: addl %eax, %eax
202+
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
203+
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
204+
; X86-NEXT: andl $3, %ebx
205+
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
206+
; X86-NEXT: cmpl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
207+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
208+
; X86-NEXT: sbbl %esi, %eax
209+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
210+
; X86-NEXT: sbbl %ecx, %ebx
211+
; X86-NEXT: shll $30, %ebx
212+
; X86-NEXT: movl %ebx, %eax
213+
; X86-NEXT: sarl $30, %eax
214+
; X86-NEXT: sarl $31, %ebx
215+
; X86-NEXT: shrdl $1, %ebx, %eax
216+
; X86-NEXT: movl %eax, %edi
217+
; X86-NEXT: andl $1, %edi
218+
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
219+
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
220+
; X86-NEXT: movl %ebx, %edi
221+
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
222+
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
223+
; X86-NEXT: subl %eax, %edx
224+
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
225+
; X86-NEXT: sbbl %ebx, %esi
226+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
227+
; X86-NEXT: sbbl %edi, %ecx
228+
; X86-NEXT: andl $3, %ecx
229+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
230+
; X86-NEXT: addl $-1, %eax
231+
; X86-NEXT: adcl $-1, %ebx
232+
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
233+
; X86-NEXT: adcl $3, %edi
234+
; X86-NEXT: andl $3, %edi
235+
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
236+
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
237+
; X86-NEXT: orl %edi, %eax
238+
; X86-NEXT: orl %ebx, %eax
239+
; X86-NEXT: jne .LBB0_10
240+
; X86-NEXT: .LBB0_11: # %udiv-end
241+
; X86-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
242+
; X86-NEXT: setne (%eax)
243+
; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
244+
; X86-NEXT: movl %eax, (%eax)
245+
; X86-NEXT: movb $0, (%eax)
246+
; X86-NEXT: leal -12(%ebp), %esp
247+
; X86-NEXT: popl %esi
248+
; X86-NEXT: popl %edi
249+
; X86-NEXT: popl %ebx
250+
; X86-NEXT: popl %ebp
251+
; X86-NEXT: retl
16252
BB:
17253
%A30 = alloca i66
18254
%L17 = load i66, ptr %A30
@@ -38,7 +274,7 @@ BB:
38274
}
39275

40276
; Similar to above, but bitwidth adjusted to target 32-bit mode. This also shows that we didn't constrain the register class when extracting a subreg.
41-
define void @g() {
277+
define void @g() nounwind {
42278
; X64-LABEL: g:
43279
; X64: # %bb.0: # %BB
44280
; X64-NEXT: movzbl (%rax), %eax
@@ -52,10 +288,7 @@ define void @g() {
52288
; X86-LABEL: g:
53289
; X86: # %bb.0: # %BB
54290
; X86-NEXT: pushl %ebp
55-
; X86-NEXT: .cfi_def_cfa_offset 8
56-
; X86-NEXT: .cfi_offset %ebp, -8
57291
; X86-NEXT: movl %esp, %ebp
58-
; X86-NEXT: .cfi_def_cfa_register %ebp
59292
; X86-NEXT: andl $-8, %esp
60293
; X86-NEXT: subl $8, %esp
61294
; X86-NEXT: movzbl (%eax), %eax
@@ -66,7 +299,6 @@ define void @g() {
66299
; X86-NEXT: movb $0, (%eax)
67300
; X86-NEXT: movl %ebp, %esp
68301
; X86-NEXT: popl %ebp
69-
; X86-NEXT: .cfi_def_cfa %esp, 4
70302
; X86-NEXT: retl
71303
BB:
72304
%A30 = alloca i34

0 commit comments

Comments
 (0)