|
2 | 2 | ; RUN: llc < %s -mtriple=i686-- | FileCheck %s --check-prefix=X86
|
3 | 3 | ; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s --check-prefix=X64
|
4 | 4 |
|
5 |
| -define i16 @test(i8 %_in) { |
6 |
| -; X86-LABEL: test: |
7 |
| -; X86: # %bb.0: |
8 |
| -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
9 |
| -; X86-NEXT: notb %al |
10 |
| -; X86-NEXT: movl %eax, %ecx |
11 |
| -; X86-NEXT: orb $-128, %cl |
12 |
| -; X86-NEXT: movzbl %cl, %ecx |
13 |
| -; X86-NEXT: shll $8, %ecx |
14 |
| -; X86-NEXT: addb %al, %al |
15 |
| -; X86-NEXT: movzbl %al, %eax |
16 |
| -; X86-NEXT: orl %ecx, %eax |
17 |
| -; X86-NEXT: # kill: def $ax killed $ax killed $eax |
| 5 | +define i64 @PR69965(ptr %input_ptrs, ptr %output_ptrs) { |
| 6 | +; X86-LABEL: PR69965: |
| 7 | +; X86: # %bb.0: # %entry |
| 8 | +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| 9 | +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| 10 | +; X86-NEXT: movl (%eax), %eax |
| 11 | +; X86-NEXT: movzbl (%eax), %eax |
| 12 | +; X86-NEXT: notl %eax |
| 13 | +; X86-NEXT: movzbl %al, %edx |
| 14 | +; X86-NEXT: shll $8, %eax |
| 15 | +; X86-NEXT: movl (%ecx), %ecx |
| 16 | +; X86-NEXT: leal (%eax,%edx,2), %eax |
| 17 | +; X86-NEXT: orl $32768, %eax # imm = 0x8000 |
| 18 | +; X86-NEXT: movw %ax, (%ecx) |
| 19 | +; X86-NEXT: xorl %eax, %eax |
| 20 | +; X86-NEXT: xorl %edx, %edx |
18 | 21 | ; X86-NEXT: retl
|
19 | 22 | ;
|
20 |
| -; X64-LABEL: test: |
21 |
| -; X64: # %bb.0: |
22 |
| -; X64-NEXT: notb %dil |
23 |
| -; X64-NEXT: movl %edi, %eax |
24 |
| -; X64-NEXT: orb $-128, %al |
| 23 | +; X64-LABEL: PR69965: |
| 24 | +; X64: # %bb.0: # %entry |
| 25 | +; X64-NEXT: movq (%rdi), %rax |
| 26 | +; X64-NEXT: movzbl (%rax), %eax |
| 27 | +; X64-NEXT: notl %eax |
25 | 28 | ; X64-NEXT: movzbl %al, %ecx
|
26 |
| -; X64-NEXT: shll $8, %ecx |
27 |
| -; X64-NEXT: addb %dil, %dil |
28 |
| -; X64-NEXT: movzbl %dil, %eax |
29 |
| -; X64-NEXT: orl %ecx, %eax |
30 |
| -; X64-NEXT: # kill: def $ax killed $ax killed $eax |
| 29 | +; X64-NEXT: # kill: def $eax killed $eax def $rax |
| 30 | +; X64-NEXT: shll $8, %eax |
| 31 | +; X64-NEXT: movq (%rsi), %rdx |
| 32 | +; X64-NEXT: leal (%rax,%rcx,2), %eax |
| 33 | +; X64-NEXT: orl $32768, %eax # imm = 0x8000 |
| 34 | +; X64-NEXT: movw %ax, (%rdx) |
| 35 | +; X64-NEXT: xorl %eax, %eax |
31 | 36 | ; X64-NEXT: retq
|
32 |
| - %_1 = and i8 %_in, 127 |
33 |
| - %_2 = xor i8 %_1, 127 |
34 |
| - %_3 = or i8 %_2, -128 |
35 |
| - %_4 = zext i8 %_3 to i16 |
36 |
| - %_6 = shl nuw i16 %_4, 8 |
37 |
| - %_7 = shl nuw i8 %_2, 1 |
38 |
| - %_8 = zext i8 %_7 to i16 |
39 |
| - %_9 = or i16 %_6, %_8 |
40 |
| - ret i16 %_9 |
| 37 | +entry: |
| 38 | + %0 = load ptr, ptr %input_ptrs, align 8 |
| 39 | + %.val.i = load i8, ptr %0, align 1 |
| 40 | + %1 = and i8 %.val.i, 127 |
| 41 | + %2 = xor i8 %1, 127 |
| 42 | + %3 = or i8 %2, -128 |
| 43 | + %4 = zext i8 %3 to i16 |
| 44 | + %5 = load ptr, ptr %output_ptrs, align 8 |
| 45 | + %6 = shl nuw i16 %4, 8 |
| 46 | + %7 = shl nuw i8 %2, 1 |
| 47 | + %8 = zext i8 %7 to i16 |
| 48 | + %9 = or i16 %6, %8 |
| 49 | + store i16 %9, ptr %5, align 2 |
| 50 | + ret i64 0 |
41 | 51 | }
|
0 commit comments