|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 |
| 2 | +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 < %s | FileCheck %s |
| 3 | + |
| 4 | +; Replace pattern min(max(v1,v2),v3) by clamp |
| 5 | + |
| 6 | +define <vscale x 16 x i8> @uclampi8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { |
| 7 | +; CHECK-LABEL: uclampi8: |
| 8 | +; CHECK: // %bb.0: |
| 9 | +; CHECK-NEXT: uclamp z0.b, z1.b, z2.b |
| 10 | +; CHECK-NEXT: ret |
| 11 | + %min = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) |
| 12 | + %res = tail call <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8> %min, <vscale x 16 x i8> %c) |
| 13 | + ret <vscale x 16 x i8> %res |
| 14 | +} |
| 15 | + |
| 16 | +define <vscale x 8 x i16> @uclampi16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { |
| 17 | +; CHECK-LABEL: uclampi16: |
| 18 | +; CHECK: // %bb.0: |
| 19 | +; CHECK-NEXT: uclamp z0.h, z1.h, z2.h |
| 20 | +; CHECK-NEXT: ret |
| 21 | + %min = tail call <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) |
| 22 | + %res = tail call <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16> %min, <vscale x 8 x i16> %c) |
| 23 | + ret <vscale x 8 x i16> %res |
| 24 | +} |
| 25 | + |
| 26 | +define <vscale x 4 x i32> @uclampi32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { |
| 27 | +; CHECK-LABEL: uclampi32: |
| 28 | +; CHECK: // %bb.0: |
| 29 | +; CHECK-NEXT: uclamp z0.s, z1.s, z2.s |
| 30 | +; CHECK-NEXT: ret |
| 31 | + %min = tail call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) |
| 32 | + %res = tail call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> %min, <vscale x 4 x i32> %c) |
| 33 | + ret <vscale x 4 x i32> %res |
| 34 | +} |
| 35 | + |
| 36 | +define <vscale x 2 x i64> @uclampi64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| 37 | +; CHECK-LABEL: uclampi64: |
| 38 | +; CHECK: // %bb.0: |
| 39 | +; CHECK-NEXT: uclamp z0.d, z1.d, z2.d |
| 40 | +; CHECK-NEXT: ret |
| 41 | + %min = tail call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) |
| 42 | + %res = tail call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> %min, <vscale x 2 x i64> %c) |
| 43 | + ret <vscale x 2 x i64> %res |
| 44 | +} |
| 45 | + |
| 46 | +define <vscale x 16 x i8> @sclampi8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { |
| 47 | +; CHECK-LABEL: sclampi8: |
| 48 | +; CHECK: // %bb.0: |
| 49 | +; CHECK-NEXT: sclamp z0.b, z1.b, z2.b |
| 50 | +; CHECK-NEXT: ret |
| 51 | + %min = tail call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) |
| 52 | + %res = tail call <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8> %min, <vscale x 16 x i8> %c) |
| 53 | + ret <vscale x 16 x i8> %res |
| 54 | +} |
| 55 | + |
| 56 | +define <vscale x 8 x i16> @sclampi16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { |
| 57 | +; CHECK-LABEL: sclampi16: |
| 58 | +; CHECK: // %bb.0: |
| 59 | +; CHECK-NEXT: sclamp z0.h, z1.h, z2.h |
| 60 | +; CHECK-NEXT: ret |
| 61 | + %min = tail call <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) |
| 62 | + %res = tail call <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16> %min, <vscale x 8 x i16> %c) |
| 63 | + ret <vscale x 8 x i16> %res |
| 64 | +} |
| 65 | + |
| 66 | +define <vscale x 4 x i32> @sclampi32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { |
| 67 | +; CHECK-LABEL: sclampi32: |
| 68 | +; CHECK: // %bb.0: |
| 69 | +; CHECK-NEXT: sclamp z0.s, z1.s, z2.s |
| 70 | +; CHECK-NEXT: ret |
| 71 | + %min = tail call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) |
| 72 | + %res = tail call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> %min, <vscale x 4 x i32> %c) |
| 73 | + ret <vscale x 4 x i32> %res |
| 74 | +} |
| 75 | + |
| 76 | +define <vscale x 2 x i64> @sclampi64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| 77 | +; CHECK-LABEL: sclampi64: |
| 78 | +; CHECK: // %bb.0: |
| 79 | +; CHECK-NEXT: sclamp z0.d, z1.d, z2.d |
| 80 | +; CHECK-NEXT: ret |
| 81 | + %min = tail call <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) |
| 82 | + %res = tail call <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64> %min, <vscale x 2 x i64> %c) |
| 83 | + ret <vscale x 2 x i64> %res |
| 84 | +} |
| 85 | + |
| 86 | +define <vscale x 8 x bfloat> @fclampbf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) { |
| 87 | +; CHECK-LABEL: fclampbf16: |
| 88 | +; CHECK: // %bb.0: |
| 89 | +; CHECK-NEXT: bfclamp z0.h, z1.h, z2.h |
| 90 | +; CHECK-NEXT: ret |
| 91 | + %min = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) |
| 92 | + %res = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> %min, <vscale x 8 x bfloat> %c) |
| 93 | + ret <vscale x 8 x bfloat> %res |
| 94 | +} |
| 95 | + |
| 96 | +define <vscale x 8 x half> @fclampf16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) { |
| 97 | +; CHECK-LABEL: fclampf16: |
| 98 | +; CHECK: // %bb.0: |
| 99 | +; CHECK-NEXT: fclamp z0.h, z1.h, z2.h |
| 100 | +; CHECK-NEXT: ret |
| 101 | + %min = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) |
| 102 | + %res = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %min, <vscale x 8 x half> %c) |
| 103 | + ret <vscale x 8 x half> %res |
| 104 | +} |
| 105 | + |
| 106 | +define <vscale x 4 x float> @fclampf32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) { |
| 107 | +; CHECK-LABEL: fclampf32: |
| 108 | +; CHECK: // %bb.0: |
| 109 | +; CHECK-NEXT: fclamp z0.s, z1.s, z2.s |
| 110 | +; CHECK-NEXT: ret |
| 111 | + %min = tail call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) |
| 112 | + %res = tail call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %min, <vscale x 4 x float> %c) |
| 113 | + ret <vscale x 4 x float> %res |
| 114 | +} |
| 115 | + |
| 116 | +define <vscale x 2 x double> @fclampf64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| 117 | +; CHECK-LABEL: fclampf64: |
| 118 | +; CHECK: // %bb.0: |
| 119 | +; CHECK-NEXT: fclamp z0.d, z1.d, z2.d |
| 120 | +; CHECK-NEXT: ret |
| 121 | + %min = tail call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) |
| 122 | + %res = tail call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %min, <vscale x 2 x double> %c) |
| 123 | + ret <vscale x 2 x double> %res |
| 124 | +} |
| 125 | + |
| 126 | +declare <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) |
| 127 | +declare <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) |
| 128 | +declare <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) |
| 129 | +declare <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) |
| 130 | +declare <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) |
| 131 | +declare <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) |
| 132 | +declare <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) |
| 133 | +declare <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) |
| 134 | + |
| 135 | +declare <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) |
| 136 | +declare <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) |
| 137 | +declare <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) |
| 138 | +declare <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) |
| 139 | +declare <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) |
| 140 | +declare <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) |
| 141 | +declare <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) |
| 142 | +declare <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) |
| 143 | + |
| 144 | +declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) |
| 145 | +declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>) |
| 146 | +declare <vscale x 8 x half> @llvm.maxnum.nxv8f16 (<vscale x 8 x half>, <vscale x 8 x half>) |
| 147 | +declare <vscale x 8 x half> @llvm.minnum.nxv8f16 (<vscale x 8 x half>, <vscale x 8 x half>) |
| 148 | +declare <vscale x 4 x float> @llvm.maxnum.nxv4f32 (<vscale x 4 x float>, <vscale x 4 x float>) |
| 149 | +declare <vscale x 4 x float> @llvm.minnum.nxv4f32 (<vscale x 4 x float>, <vscale x 4 x float>) |
| 150 | +declare <vscale x 2 x double> @llvm.maxnum.nxv2f64 (<vscale x 2 x double>, <vscale x 2 x double>) |
| 151 | +declare <vscale x 2 x double> @llvm.minnum.nxv2f64 (<vscale x 2 x double>, <vscale x 2 x double>) |
0 commit comments