Skip to content

Commit 19008d3

Browse files
authored
[llvm] Support fixed point multiplication on AArch64 (#84237)
Prior to this, fixed point multiplication would lead to this assertion error on AArhc64, armv8, and armv7. ``` _Accum f(_Accum x, _Accum y) { return x * y; } // ./bin/clang++ -ffixed-point /tmp/test2.cc -c -S -o - -target aarch64 -O3 clang++: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp:10245: void llvm::TargetLowering::forceExpandWideMUL(SelectionDAG &, const SDLoc &, bool, EVT, const SDValue, const SDValue, const SDValue, const SDValue, SDValue &, SDValue &) const: Assertion `Ret.getOpcode() == ISD::MERGE_VALUES && "Ret value is a collection of constituent nodes holding result."' failed. ``` This path into forceExpandWideMUL should only be taken if we don't support [US]MUL_LOHI or MULH[US] for the operand size (32 in this case). But we should also check if we can just leverage regular wide multiplication. That is, extend the operands from 32 to 64, do a regular 64-bit mul, then trunc and shift. These ops are certainly available on aarch64 but for wider types.
1 parent f89b1b8 commit 19008d3

File tree

5 files changed

+775
-0
lines changed

5 files changed

+775
-0
lines changed

llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10500,13 +10500,25 @@ TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const {
1050010500
SDValue Lo, Hi;
1050110501
unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI;
1050210502
unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU;
10503+
EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VTSize * 2);
1050310504
if (isOperationLegalOrCustom(LoHiOp, VT)) {
1050410505
SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS);
1050510506
Lo = Result.getValue(0);
1050610507
Hi = Result.getValue(1);
1050710508
} else if (isOperationLegalOrCustom(HiOp, VT)) {
1050810509
Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
1050910510
Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS);
10511+
} else if (isOperationLegalOrCustom(ISD::MUL, WideVT)) {
10512+
// Try for a multiplication using a wider type.
10513+
unsigned Ext = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
10514+
SDValue LHSExt = DAG.getNode(Ext, dl, WideVT, LHS);
10515+
SDValue RHSExt = DAG.getNode(Ext, dl, WideVT, RHS);
10516+
SDValue Res = DAG.getNode(ISD::MUL, dl, WideVT, LHSExt, RHSExt);
10517+
Lo = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
10518+
SDValue Shifted =
10519+
DAG.getNode(ISD::SRA, dl, WideVT, Res,
10520+
DAG.getShiftAmountConstant(VTSize, WideVT, dl));
10521+
Hi = DAG.getNode(ISD::TRUNCATE, dl, VT, Shifted);
1051010522
} else if (VT.isVector()) {
1051110523
return SDValue();
1051210524
} else {

llvm/test/CodeGen/AArch64/smul_fix.ll

Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
3+
4+
define i32 @func(i32 %x, i32 %y) nounwind {
5+
; CHECK-LABEL: func:
6+
; CHECK: // %bb.0:
7+
; CHECK-NEXT: smull x8, w0, w1
8+
; CHECK-NEXT: lsr x9, x8, #32
9+
; CHECK-NEXT: extr w0, w9, w8, #2
10+
; CHECK-NEXT: ret
11+
%tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 2)
12+
ret i32 %tmp
13+
}
14+
15+
define i64 @func2(i64 %x, i64 %y) {
16+
; CHECK-LABEL: func2:
17+
; CHECK: // %bb.0:
18+
; CHECK-NEXT: mul x8, x0, x1
19+
; CHECK-NEXT: smulh x9, x0, x1
20+
; CHECK-NEXT: extr x0, x9, x8, #2
21+
; CHECK-NEXT: ret
22+
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2)
23+
ret i64 %tmp
24+
}
25+
26+
define i4 @func3(i4 %x, i4 %y) nounwind {
27+
; CHECK-LABEL: func3:
28+
; CHECK: // %bb.0:
29+
; CHECK-NEXT: sbfx w8, w1, #0, #4
30+
; CHECK-NEXT: sbfx w9, w0, #0, #4
31+
; CHECK-NEXT: smull x8, w9, w8
32+
; CHECK-NEXT: lsr x9, x8, #32
33+
; CHECK-NEXT: extr w0, w9, w8, #2
34+
; CHECK-NEXT: ret
35+
%tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 2)
36+
ret i4 %tmp
37+
}
38+
39+
;; These result in regular integer multiplication
40+
define i32 @func4(i32 %x, i32 %y) nounwind {
41+
; CHECK-LABEL: func4:
42+
; CHECK: // %bb.0:
43+
; CHECK-NEXT: mul w0, w0, w1
44+
; CHECK-NEXT: ret
45+
%tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 0)
46+
ret i32 %tmp
47+
}
48+
49+
define i64 @func5(i64 %x, i64 %y) {
50+
; CHECK-LABEL: func5:
51+
; CHECK: // %bb.0:
52+
; CHECK-NEXT: mul x0, x0, x1
53+
; CHECK-NEXT: ret
54+
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 0)
55+
ret i64 %tmp
56+
}
57+
58+
define i4 @func6(i4 %x, i4 %y) nounwind {
59+
; CHECK-LABEL: func6:
60+
; CHECK: // %bb.0:
61+
; CHECK-NEXT: sbfx w8, w1, #0, #4
62+
; CHECK-NEXT: sbfx w9, w0, #0, #4
63+
; CHECK-NEXT: mul w0, w9, w8
64+
; CHECK-NEXT: ret
65+
%tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 0)
66+
ret i4 %tmp
67+
}
68+
69+
define i64 @func7(i64 %x, i64 %y) nounwind {
70+
; CHECK-LABEL: func7:
71+
; CHECK: // %bb.0:
72+
; CHECK-NEXT: mul x8, x0, x1
73+
; CHECK-NEXT: smulh x9, x0, x1
74+
; CHECK-NEXT: extr x0, x9, x8, #32
75+
; CHECK-NEXT: ret
76+
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 32)
77+
ret i64 %tmp
78+
}
79+
80+
define i64 @func8(i64 %x, i64 %y) nounwind {
81+
; CHECK-LABEL: func8:
82+
; CHECK: // %bb.0:
83+
; CHECK-NEXT: mul x8, x0, x1
84+
; CHECK-NEXT: smulh x9, x0, x1
85+
; CHECK-NEXT: extr x0, x9, x8, #63
86+
; CHECK-NEXT: ret
87+
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 63)
88+
ret i64 %tmp
89+
}
90+
91+
define <2 x i32> @vec(<2 x i32> %x, <2 x i32> %y) nounwind {
92+
; CHECK-LABEL: vec:
93+
; CHECK: // %bb.0:
94+
; CHECK-NEXT: mul v0.2s, v0.2s, v1.2s
95+
; CHECK-NEXT: ret
96+
%tmp = call <2 x i32> @llvm.smul.fix.v2i32(<2 x i32> %x, <2 x i32> %y, i32 0)
97+
ret <2 x i32> %tmp
98+
}
99+
100+
define <4 x i32> @vec2(<4 x i32> %x, <4 x i32> %y) nounwind {
101+
; CHECK-LABEL: vec2:
102+
; CHECK: // %bb.0:
103+
; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s
104+
; CHECK-NEXT: ret
105+
%tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0)
106+
ret <4 x i32> %tmp
107+
}
108+
109+
define <4 x i64> @vec3(<4 x i64> %x, <4 x i64> %y) nounwind {
110+
; CHECK-LABEL: vec3:
111+
; CHECK: // %bb.0:
112+
; CHECK-NEXT: mov x8, v2.d[1]
113+
; CHECK-NEXT: mov x9, v0.d[1]
114+
; CHECK-NEXT: fmov x10, d2
115+
; CHECK-NEXT: fmov x11, d0
116+
; CHECK-NEXT: mov x14, v3.d[1]
117+
; CHECK-NEXT: mov x15, v1.d[1]
118+
; CHECK-NEXT: mul x12, x11, x10
119+
; CHECK-NEXT: mul x13, x9, x8
120+
; CHECK-NEXT: smulh x8, x9, x8
121+
; CHECK-NEXT: smulh x9, x11, x10
122+
; CHECK-NEXT: fmov x10, d3
123+
; CHECK-NEXT: fmov x11, d1
124+
; CHECK-NEXT: mul x16, x11, x10
125+
; CHECK-NEXT: extr x8, x8, x13, #32
126+
; CHECK-NEXT: smulh x10, x11, x10
127+
; CHECK-NEXT: extr x9, x9, x12, #32
128+
; CHECK-NEXT: mul x11, x15, x14
129+
; CHECK-NEXT: fmov d0, x9
130+
; CHECK-NEXT: smulh x14, x15, x14
131+
; CHECK-NEXT: extr x10, x10, x16, #32
132+
; CHECK-NEXT: mov v0.d[1], x8
133+
; CHECK-NEXT: fmov d1, x10
134+
; CHECK-NEXT: extr x11, x14, x11, #32
135+
; CHECK-NEXT: mov v1.d[1], x11
136+
; CHECK-NEXT: ret
137+
%tmp = call <4 x i64> @llvm.smul.fix.v4i64(<4 x i64> %x, <4 x i64> %y, i32 32)
138+
ret <4 x i64> %tmp
139+
}

0 commit comments

Comments
 (0)