Skip to content

Commit f3dcc0c

Browse files
authored
[LLVM][AArch64][tblgen]: Match clamp pattern (#75529)
Add isel pattern to replase min(max(v1,v2),v3) by clamp Add tests for uclamp, sclamp, bfclamp, fclamp.
1 parent 8abf8c9 commit f3dcc0c

File tree

2 files changed

+175
-4
lines changed

2 files changed

+175
-4
lines changed

llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -316,6 +316,26 @@ def AArch64ssra : PatFrags<(ops node:$op1, node:$op2, node:$op3),
316316
[(int_aarch64_sve_ssra node:$op1, node:$op2, node:$op3),
317317
(add node:$op1, (AArch64asr_p (SVEAnyPredicate), node:$op2, (SVEShiftSplatImmR (i32 node:$op3))))]>;
318318

319+
// Replace pattern min(max(v1,v2),v3) by clamp
320+
def AArch64sclamp : PatFrags<(ops node:$Zd, node:$Zn, node:$Zm),
321+
[(int_aarch64_sve_sclamp node:$Zd, node:$Zn, node:$Zm),
322+
(AArch64smin_p (SVEAllActive),
323+
(AArch64smax_p (SVEAllActive), node:$Zd, node:$Zn),
324+
node:$Zm)
325+
]>;
326+
def AArch64uclamp : PatFrags<(ops node:$Zd, node:$Zn, node:$Zm),
327+
[(int_aarch64_sve_uclamp node:$Zd, node:$Zn, node:$Zm),
328+
(AArch64umin_p (SVEAllActive),
329+
(AArch64umax_p (SVEAllActive), node:$Zd, node:$Zn),
330+
node:$Zm)
331+
]>;
332+
def AArch64fclamp : PatFrags<(ops node:$Zd, node:$Zn, node:$Zm),
333+
[(int_aarch64_sve_fclamp node:$Zd, node:$Zn, node:$Zm),
334+
(AArch64fminnm_p (SVEAllActive),
335+
(AArch64fmaxnm_p (SVEAllActive), node:$Zd, node:$Zn),
336+
node:$Zm)
337+
]>;
338+
319339
def SDT_AArch64FCVT : SDTypeProfile<1, 3, [
320340
SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3>,
321341
SDTCVecEltisVT<1,i1>
@@ -3802,8 +3822,8 @@ let Predicates = [HasSVE2BitPerm] in {
38023822
let Predicates = [HasSVE2p1_or_HasSME] in {
38033823
defm REVD_ZPmZ : sve2_int_perm_revd<"revd", AArch64revd_mt>;
38043824

3805-
defm SCLAMP_ZZZ : sve2_clamp<"sclamp", 0b0, int_aarch64_sve_sclamp>;
3806-
defm UCLAMP_ZZZ : sve2_clamp<"uclamp", 0b1, int_aarch64_sve_uclamp>;
3825+
defm SCLAMP_ZZZ : sve2_clamp<"sclamp", 0b0, AArch64sclamp>;
3826+
defm UCLAMP_ZZZ : sve2_clamp<"uclamp", 0b1, AArch64uclamp>;
38073827

38083828
defm PSEL_PPPRI : sve2_int_perm_sel_p<"psel", int_aarch64_sve_psel>;
38093829
} // End HasSVE2p1_or_HasSME
@@ -3813,7 +3833,7 @@ defm PSEL_PPPRI : sve2_int_perm_sel_p<"psel", int_aarch64_sve_psel>;
38133833
//===----------------------------------------------------------------------===//
38143834

38153835
let Predicates = [HasSVE2p1_or_HasSME2] in {
3816-
defm FCLAMP_ZZZ : sve2p1_fclamp<"fclamp", int_aarch64_sve_fclamp>;
3836+
defm FCLAMP_ZZZ : sve2p1_fclamp<"fclamp", AArch64fclamp>;
38173837

38183838
defm FDOT_ZZZ_S : sve_float_dot<0b0, 0b0, ZPR32, ZPR16, "fdot", nxv8f16, int_aarch64_sve_fdot_x2>;
38193839
defm FDOT_ZZZI_S : sve_float_dot_indexed<0b0, 0b00, ZPR16, ZPR3b16, "fdot", nxv8f16, int_aarch64_sve_fdot_lane_x2>;
@@ -4055,7 +4075,7 @@ defm BFMINNM_ZPZZ : sve2p1_bf_bin_pred_zds<AArch64fminnm_p>;
40554075

40564076
defm BFMUL_ZZZI : sve2p1_fp_bfmul_by_indexed_elem<"bfmul", int_aarch64_sve_fmul_lane>;
40574077

4058-
defm BFCLAMP_ZZZ : sve2p1_bfclamp<"bfclamp", int_aarch64_sve_fclamp>;
4078+
defm BFCLAMP_ZZZ : sve2p1_bfclamp<"bfclamp", AArch64fclamp>;
40594079
} // End HasSVE2orSME2, HasB16B16
40604080

40614081

Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2+
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 < %s | FileCheck %s
3+
4+
; Replace pattern min(max(v1,v2),v3) by clamp
5+
6+
define <vscale x 16 x i8> @uclampi8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
7+
; CHECK-LABEL: uclampi8:
8+
; CHECK: // %bb.0:
9+
; CHECK-NEXT: uclamp z0.b, z1.b, z2.b
10+
; CHECK-NEXT: ret
11+
%min = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
12+
%res = tail call <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8> %min, <vscale x 16 x i8> %c)
13+
ret <vscale x 16 x i8> %res
14+
}
15+
16+
define <vscale x 8 x i16> @uclampi16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
17+
; CHECK-LABEL: uclampi16:
18+
; CHECK: // %bb.0:
19+
; CHECK-NEXT: uclamp z0.h, z1.h, z2.h
20+
; CHECK-NEXT: ret
21+
%min = tail call <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
22+
%res = tail call <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16> %min, <vscale x 8 x i16> %c)
23+
ret <vscale x 8 x i16> %res
24+
}
25+
26+
define <vscale x 4 x i32> @uclampi32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
27+
; CHECK-LABEL: uclampi32:
28+
; CHECK: // %bb.0:
29+
; CHECK-NEXT: uclamp z0.s, z1.s, z2.s
30+
; CHECK-NEXT: ret
31+
%min = tail call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
32+
%res = tail call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> %min, <vscale x 4 x i32> %c)
33+
ret <vscale x 4 x i32> %res
34+
}
35+
36+
define <vscale x 2 x i64> @uclampi64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
37+
; CHECK-LABEL: uclampi64:
38+
; CHECK: // %bb.0:
39+
; CHECK-NEXT: uclamp z0.d, z1.d, z2.d
40+
; CHECK-NEXT: ret
41+
%min = tail call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
42+
%res = tail call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> %min, <vscale x 2 x i64> %c)
43+
ret <vscale x 2 x i64> %res
44+
}
45+
46+
define <vscale x 16 x i8> @sclampi8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
47+
; CHECK-LABEL: sclampi8:
48+
; CHECK: // %bb.0:
49+
; CHECK-NEXT: sclamp z0.b, z1.b, z2.b
50+
; CHECK-NEXT: ret
51+
%min = tail call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
52+
%res = tail call <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8> %min, <vscale x 16 x i8> %c)
53+
ret <vscale x 16 x i8> %res
54+
}
55+
56+
define <vscale x 8 x i16> @sclampi16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
57+
; CHECK-LABEL: sclampi16:
58+
; CHECK: // %bb.0:
59+
; CHECK-NEXT: sclamp z0.h, z1.h, z2.h
60+
; CHECK-NEXT: ret
61+
%min = tail call <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
62+
%res = tail call <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16> %min, <vscale x 8 x i16> %c)
63+
ret <vscale x 8 x i16> %res
64+
}
65+
66+
define <vscale x 4 x i32> @sclampi32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
67+
; CHECK-LABEL: sclampi32:
68+
; CHECK: // %bb.0:
69+
; CHECK-NEXT: sclamp z0.s, z1.s, z2.s
70+
; CHECK-NEXT: ret
71+
%min = tail call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
72+
%res = tail call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> %min, <vscale x 4 x i32> %c)
73+
ret <vscale x 4 x i32> %res
74+
}
75+
76+
define <vscale x 2 x i64> @sclampi64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
77+
; CHECK-LABEL: sclampi64:
78+
; CHECK: // %bb.0:
79+
; CHECK-NEXT: sclamp z0.d, z1.d, z2.d
80+
; CHECK-NEXT: ret
81+
%min = tail call <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
82+
%res = tail call <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64> %min, <vscale x 2 x i64> %c)
83+
ret <vscale x 2 x i64> %res
84+
}
85+
86+
define <vscale x 8 x bfloat> @fclampbf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) {
87+
; CHECK-LABEL: fclampbf16:
88+
; CHECK: // %bb.0:
89+
; CHECK-NEXT: bfclamp z0.h, z1.h, z2.h
90+
; CHECK-NEXT: ret
91+
%min = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
92+
%res = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> %min, <vscale x 8 x bfloat> %c)
93+
ret <vscale x 8 x bfloat> %res
94+
}
95+
96+
define <vscale x 8 x half> @fclampf16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
97+
; CHECK-LABEL: fclampf16:
98+
; CHECK: // %bb.0:
99+
; CHECK-NEXT: fclamp z0.h, z1.h, z2.h
100+
; CHECK-NEXT: ret
101+
%min = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
102+
%res = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %min, <vscale x 8 x half> %c)
103+
ret <vscale x 8 x half> %res
104+
}
105+
106+
define <vscale x 4 x float> @fclampf32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
107+
; CHECK-LABEL: fclampf32:
108+
; CHECK: // %bb.0:
109+
; CHECK-NEXT: fclamp z0.s, z1.s, z2.s
110+
; CHECK-NEXT: ret
111+
%min = tail call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
112+
%res = tail call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %min, <vscale x 4 x float> %c)
113+
ret <vscale x 4 x float> %res
114+
}
115+
116+
define <vscale x 2 x double> @fclampf64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
117+
; CHECK-LABEL: fclampf64:
118+
; CHECK: // %bb.0:
119+
; CHECK-NEXT: fclamp z0.d, z1.d, z2.d
120+
; CHECK-NEXT: ret
121+
%min = tail call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
122+
%res = tail call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %min, <vscale x 2 x double> %c)
123+
ret <vscale x 2 x double> %res
124+
}
125+
126+
declare <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
127+
declare <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
128+
declare <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
129+
declare <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
130+
declare <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
131+
declare <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
132+
declare <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
133+
declare <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
134+
135+
declare <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
136+
declare <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
137+
declare <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
138+
declare <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
139+
declare <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
140+
declare <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
141+
declare <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
142+
declare <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
143+
144+
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
145+
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
146+
declare <vscale x 8 x half> @llvm.maxnum.nxv8f16 (<vscale x 8 x half>, <vscale x 8 x half>)
147+
declare <vscale x 8 x half> @llvm.minnum.nxv8f16 (<vscale x 8 x half>, <vscale x 8 x half>)
148+
declare <vscale x 4 x float> @llvm.maxnum.nxv4f32 (<vscale x 4 x float>, <vscale x 4 x float>)
149+
declare <vscale x 4 x float> @llvm.minnum.nxv4f32 (<vscale x 4 x float>, <vscale x 4 x float>)
150+
declare <vscale x 2 x double> @llvm.maxnum.nxv2f64 (<vscale x 2 x double>, <vscale x 2 x double>)
151+
declare <vscale x 2 x double> @llvm.minnum.nxv2f64 (<vscale x 2 x double>, <vscale x 2 x double>)

0 commit comments

Comments
 (0)