|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| 2 | +; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s |
| 3 | + |
| 4 | +define i32 @func(i32 %x, i32 %y) nounwind { |
| 5 | +; CHECK-LABEL: func: |
| 6 | +; CHECK: // %bb.0: |
| 7 | +; CHECK-NEXT: smull x8, w0, w1 |
| 8 | +; CHECK-NEXT: lsr x9, x8, #32 |
| 9 | +; CHECK-NEXT: extr w0, w9, w8, #2 |
| 10 | +; CHECK-NEXT: ret |
| 11 | + %tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 2) |
| 12 | + ret i32 %tmp |
| 13 | +} |
| 14 | + |
| 15 | +define i64 @func2(i64 %x, i64 %y) { |
| 16 | +; CHECK-LABEL: func2: |
| 17 | +; CHECK: // %bb.0: |
| 18 | +; CHECK-NEXT: mul x8, x0, x1 |
| 19 | +; CHECK-NEXT: smulh x9, x0, x1 |
| 20 | +; CHECK-NEXT: extr x0, x9, x8, #2 |
| 21 | +; CHECK-NEXT: ret |
| 22 | + %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2) |
| 23 | + ret i64 %tmp |
| 24 | +} |
| 25 | + |
| 26 | +define i4 @func3(i4 %x, i4 %y) nounwind { |
| 27 | +; CHECK-LABEL: func3: |
| 28 | +; CHECK: // %bb.0: |
| 29 | +; CHECK-NEXT: sbfx w8, w1, #0, #4 |
| 30 | +; CHECK-NEXT: sbfx w9, w0, #0, #4 |
| 31 | +; CHECK-NEXT: smull x8, w9, w8 |
| 32 | +; CHECK-NEXT: lsr x9, x8, #32 |
| 33 | +; CHECK-NEXT: extr w0, w9, w8, #2 |
| 34 | +; CHECK-NEXT: ret |
| 35 | + %tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 2) |
| 36 | + ret i4 %tmp |
| 37 | +} |
| 38 | + |
| 39 | +;; These result in regular integer multiplication |
| 40 | +define i32 @func4(i32 %x, i32 %y) nounwind { |
| 41 | +; CHECK-LABEL: func4: |
| 42 | +; CHECK: // %bb.0: |
| 43 | +; CHECK-NEXT: mul w0, w0, w1 |
| 44 | +; CHECK-NEXT: ret |
| 45 | + %tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 0) |
| 46 | + ret i32 %tmp |
| 47 | +} |
| 48 | + |
| 49 | +define i64 @func5(i64 %x, i64 %y) { |
| 50 | +; CHECK-LABEL: func5: |
| 51 | +; CHECK: // %bb.0: |
| 52 | +; CHECK-NEXT: mul x0, x0, x1 |
| 53 | +; CHECK-NEXT: ret |
| 54 | + %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 0) |
| 55 | + ret i64 %tmp |
| 56 | +} |
| 57 | + |
| 58 | +define i4 @func6(i4 %x, i4 %y) nounwind { |
| 59 | +; CHECK-LABEL: func6: |
| 60 | +; CHECK: // %bb.0: |
| 61 | +; CHECK-NEXT: sbfx w8, w1, #0, #4 |
| 62 | +; CHECK-NEXT: sbfx w9, w0, #0, #4 |
| 63 | +; CHECK-NEXT: mul w0, w9, w8 |
| 64 | +; CHECK-NEXT: ret |
| 65 | + %tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 0) |
| 66 | + ret i4 %tmp |
| 67 | +} |
| 68 | + |
| 69 | +define i64 @func7(i64 %x, i64 %y) nounwind { |
| 70 | +; CHECK-LABEL: func7: |
| 71 | +; CHECK: // %bb.0: |
| 72 | +; CHECK-NEXT: mul x8, x0, x1 |
| 73 | +; CHECK-NEXT: smulh x9, x0, x1 |
| 74 | +; CHECK-NEXT: extr x0, x9, x8, #32 |
| 75 | +; CHECK-NEXT: ret |
| 76 | + %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 32) |
| 77 | + ret i64 %tmp |
| 78 | +} |
| 79 | + |
| 80 | +define i64 @func8(i64 %x, i64 %y) nounwind { |
| 81 | +; CHECK-LABEL: func8: |
| 82 | +; CHECK: // %bb.0: |
| 83 | +; CHECK-NEXT: mul x8, x0, x1 |
| 84 | +; CHECK-NEXT: smulh x9, x0, x1 |
| 85 | +; CHECK-NEXT: extr x0, x9, x8, #63 |
| 86 | +; CHECK-NEXT: ret |
| 87 | + %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 63) |
| 88 | + ret i64 %tmp |
| 89 | +} |
| 90 | + |
| 91 | +define <2 x i32> @vec(<2 x i32> %x, <2 x i32> %y) nounwind { |
| 92 | +; CHECK-LABEL: vec: |
| 93 | +; CHECK: // %bb.0: |
| 94 | +; CHECK-NEXT: mul v0.2s, v0.2s, v1.2s |
| 95 | +; CHECK-NEXT: ret |
| 96 | + %tmp = call <2 x i32> @llvm.smul.fix.v2i32(<2 x i32> %x, <2 x i32> %y, i32 0) |
| 97 | + ret <2 x i32> %tmp |
| 98 | +} |
| 99 | + |
| 100 | +define <4 x i32> @vec2(<4 x i32> %x, <4 x i32> %y) nounwind { |
| 101 | +; CHECK-LABEL: vec2: |
| 102 | +; CHECK: // %bb.0: |
| 103 | +; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s |
| 104 | +; CHECK-NEXT: ret |
| 105 | + %tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0) |
| 106 | + ret <4 x i32> %tmp |
| 107 | +} |
| 108 | + |
| 109 | +define <4 x i64> @vec3(<4 x i64> %x, <4 x i64> %y) nounwind { |
| 110 | +; CHECK-LABEL: vec3: |
| 111 | +; CHECK: // %bb.0: |
| 112 | +; CHECK-NEXT: mov x8, v2.d[1] |
| 113 | +; CHECK-NEXT: mov x9, v0.d[1] |
| 114 | +; CHECK-NEXT: fmov x10, d2 |
| 115 | +; CHECK-NEXT: fmov x11, d0 |
| 116 | +; CHECK-NEXT: mov x14, v3.d[1] |
| 117 | +; CHECK-NEXT: mov x15, v1.d[1] |
| 118 | +; CHECK-NEXT: mul x12, x11, x10 |
| 119 | +; CHECK-NEXT: mul x13, x9, x8 |
| 120 | +; CHECK-NEXT: smulh x8, x9, x8 |
| 121 | +; CHECK-NEXT: smulh x9, x11, x10 |
| 122 | +; CHECK-NEXT: fmov x10, d3 |
| 123 | +; CHECK-NEXT: fmov x11, d1 |
| 124 | +; CHECK-NEXT: mul x16, x11, x10 |
| 125 | +; CHECK-NEXT: extr x8, x8, x13, #32 |
| 126 | +; CHECK-NEXT: smulh x10, x11, x10 |
| 127 | +; CHECK-NEXT: extr x9, x9, x12, #32 |
| 128 | +; CHECK-NEXT: mul x11, x15, x14 |
| 129 | +; CHECK-NEXT: fmov d0, x9 |
| 130 | +; CHECK-NEXT: smulh x14, x15, x14 |
| 131 | +; CHECK-NEXT: extr x10, x10, x16, #32 |
| 132 | +; CHECK-NEXT: mov v0.d[1], x8 |
| 133 | +; CHECK-NEXT: fmov d1, x10 |
| 134 | +; CHECK-NEXT: extr x11, x14, x11, #32 |
| 135 | +; CHECK-NEXT: mov v1.d[1], x11 |
| 136 | +; CHECK-NEXT: ret |
| 137 | + %tmp = call <4 x i64> @llvm.smul.fix.v4i64(<4 x i64> %x, <4 x i64> %y, i32 32) |
| 138 | + ret <4 x i64> %tmp |
| 139 | +} |
0 commit comments