Skip to content

Commit 28a6608

Browse files
committed
[llvm] Support fixed point multiplication on AArch64
Prior to this, fixed point multiplication would lead to this assertion error on AArhc64, armv8, and armv7. ``` _Accum f(_Accum x, _Accum y) { return x * y; } // ./bin/clang++ -ffixed-point /tmp/test2.cc -c -S -o - -target aarch64 -O3 clang++: llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp:10245: void llvm::TargetLowering::forceExpandWideMUL(SelectionDAG &, const SDLoc &, bool, EVT, const SDValue, const SDValue, const SDValue, const SDValue, SDValue &, SDValue &) const: Assertion `Ret.getOpcode() == ISD::MERGE_VALUES && "Ret value is a collection of constituent nodes holding result."' failed. ``` This path into forceExpandWideMUL should only be taken if we don't support [US]MUL_LOHI or MULH[US] for the operand size (32 in this case). But we should also check if we can just leverage regular wide multiplication. That is, extend the operands from 32 to 64, do a regular 64-bit mul, then trunc and shift. These ops are certainly available on aarch64 but for wider types.
1 parent 96c45a7 commit 28a6608

File tree

5 files changed

+496
-0
lines changed

5 files changed

+496
-0
lines changed

llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10402,13 +10402,25 @@ TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const {
1040210402
SDValue Lo, Hi;
1040310403
unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI;
1040410404
unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU;
10405+
EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VTSize * 2);
1040510406
if (isOperationLegalOrCustom(LoHiOp, VT)) {
1040610407
SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS);
1040710408
Lo = Result.getValue(0);
1040810409
Hi = Result.getValue(1);
1040910410
} else if (isOperationLegalOrCustom(HiOp, VT)) {
1041010411
Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
1041110412
Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS);
10413+
} else if (isOperationLegalOrCustom(ISD::MUL, WideVT)) {
10414+
// Try for a multiplication using a wider type.
10415+
unsigned Ext = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
10416+
SDValue LHSExt = DAG.getNode(Ext, dl, WideVT, LHS);
10417+
SDValue RHSExt = DAG.getNode(Ext, dl, WideVT, RHS);
10418+
SDValue Res = DAG.getNode(ISD::MUL, dl, WideVT, LHSExt, RHSExt);
10419+
Lo = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
10420+
SDValue Shifted =
10421+
DAG.getNode(ISD::SRA, dl, WideVT, Res,
10422+
DAG.getShiftAmountConstant(VTSize, WideVT, dl));
10423+
Hi = DAG.getNode(ISD::TRUNCATE, dl, VT, Shifted);
1041210424
} else if (VT.isVector()) {
1041310425
return SDValue();
1041410426
} else {

llvm/test/CodeGen/AArch64/smul_fix.ll

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
3+
4+
define i32 @func(i32 %x, i32 %y) nounwind {
5+
; CHECK-LABEL: func:
6+
; CHECK: // %bb.0:
7+
; CHECK-NEXT: smull x8, w0, w1
8+
; CHECK-NEXT: lsr x9, x8, #32
9+
; CHECK-NEXT: extr w0, w9, w8, #2
10+
; CHECK-NEXT: ret
11+
%tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 2)
12+
ret i32 %tmp
13+
}
14+
15+
define i64 @func2(i64 %x, i64 %y) {
16+
; CHECK-LABEL: func2:
17+
; CHECK: // %bb.0:
18+
; CHECK-NEXT: mul x8, x0, x1
19+
; CHECK-NEXT: smulh x9, x0, x1
20+
; CHECK-NEXT: extr x0, x9, x8, #2
21+
; CHECK-NEXT: ret
22+
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2)
23+
ret i64 %tmp
24+
}
25+
26+
define i4 @func3(i4 %x, i4 %y) nounwind {
27+
; CHECK-LABEL: func3:
28+
; CHECK: // %bb.0:
29+
; CHECK-NEXT: sbfx w8, w1, #0, #4
30+
; CHECK-NEXT: sbfx w9, w0, #0, #4
31+
; CHECK-NEXT: smull x8, w9, w8
32+
; CHECK-NEXT: lsr x9, x8, #32
33+
; CHECK-NEXT: extr w0, w9, w8, #2
34+
; CHECK-NEXT: ret
35+
%tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 2)
36+
ret i4 %tmp
37+
}
38+
39+
;; These result in regular integer multiplication
40+
define i32 @func4(i32 %x, i32 %y) nounwind {
41+
; CHECK-LABEL: func4:
42+
; CHECK: // %bb.0:
43+
; CHECK-NEXT: mul w0, w0, w1
44+
; CHECK-NEXT: ret
45+
%tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 0)
46+
ret i32 %tmp
47+
}
48+
49+
define i64 @func5(i64 %x, i64 %y) {
50+
; CHECK-LABEL: func5:
51+
; CHECK: // %bb.0:
52+
; CHECK-NEXT: mul x0, x0, x1
53+
; CHECK-NEXT: ret
54+
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 0)
55+
ret i64 %tmp
56+
}
57+
58+
define i4 @func6(i4 %x, i4 %y) nounwind {
59+
; CHECK-LABEL: func6:
60+
; CHECK: // %bb.0:
61+
; CHECK-NEXT: sbfx w8, w1, #0, #4
62+
; CHECK-NEXT: sbfx w9, w0, #0, #4
63+
; CHECK-NEXT: mul w0, w9, w8
64+
; CHECK-NEXT: ret
65+
%tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 0)
66+
ret i4 %tmp
67+
}
68+
69+
define i64 @func7(i64 %x, i64 %y) nounwind {
70+
; CHECK-LABEL: func7:
71+
; CHECK: // %bb.0:
72+
; CHECK-NEXT: mul x8, x0, x1
73+
; CHECK-NEXT: smulh x9, x0, x1
74+
; CHECK-NEXT: extr x0, x9, x8, #32
75+
; CHECK-NEXT: ret
76+
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 32)
77+
ret i64 %tmp
78+
}
79+
80+
define i64 @func8(i64 %x, i64 %y) nounwind {
81+
; CHECK-LABEL: func8:
82+
; CHECK: // %bb.0:
83+
; CHECK-NEXT: mul x8, x0, x1
84+
; CHECK-NEXT: smulh x9, x0, x1
85+
; CHECK-NEXT: extr x0, x9, x8, #63
86+
; CHECK-NEXT: ret
87+
%tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 63)
88+
ret i64 %tmp
89+
}
Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
3+
4+
declare i4 @llvm.smul.fix.sat.i4 (i4, i4, i32)
5+
declare i32 @llvm.smul.fix.sat.i32 (i32, i32, i32)
6+
declare i64 @llvm.smul.fix.sat.i64 (i64, i64, i32)
7+
8+
define i32 @func(i32 %x, i32 %y) nounwind {
9+
; CHECK-LABEL: func:
10+
; CHECK: // %bb.0:
11+
; CHECK-NEXT: smull x9, w0, w1
12+
; CHECK-NEXT: mov w8, #2147483647 // =0x7fffffff
13+
; CHECK-NEXT: lsr x10, x9, #32
14+
; CHECK-NEXT: extr w9, w10, w9, #2
15+
; CHECK-NEXT: cmp w10, #1
16+
; CHECK-NEXT: csel w8, w8, w9, gt
17+
; CHECK-NEXT: cmn w10, #2
18+
; CHECK-NEXT: mov w9, #-2147483648 // =0x80000000
19+
; CHECK-NEXT: csel w0, w9, w8, lt
20+
; CHECK-NEXT: ret
21+
%tmp = call i32 @llvm.smul.fix.sat.i32(i32 %x, i32 %y, i32 2)
22+
ret i32 %tmp
23+
}
24+
25+
define i64 @func2(i64 %x, i64 %y) nounwind {
26+
; CHECK-LABEL: func2:
27+
; CHECK: // %bb.0:
28+
; CHECK-NEXT: mul x9, x0, x1
29+
; CHECK-NEXT: mov x8, #9223372036854775807 // =0x7fffffffffffffff
30+
; CHECK-NEXT: smulh x10, x0, x1
31+
; CHECK-NEXT: extr x9, x10, x9, #2
32+
; CHECK-NEXT: cmp x10, #1
33+
; CHECK-NEXT: csel x8, x8, x9, gt
34+
; CHECK-NEXT: cmn x10, #2
35+
; CHECK-NEXT: mov x9, #-9223372036854775808 // =0x8000000000000000
36+
; CHECK-NEXT: csel x0, x9, x8, lt
37+
; CHECK-NEXT: ret
38+
%tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 2)
39+
ret i64 %tmp
40+
}
41+
42+
define i4 @func3(i4 %x, i4 %y) nounwind {
43+
; CHECK-LABEL: func3:
44+
; CHECK: // %bb.0:
45+
; CHECK-NEXT: sbfx w9, w1, #0, #4
46+
; CHECK-NEXT: lsl w10, w0, #28
47+
; CHECK-NEXT: mov w8, #2147483647 // =0x7fffffff
48+
; CHECK-NEXT: smull x9, w10, w9
49+
; CHECK-NEXT: lsr x10, x9, #32
50+
; CHECK-NEXT: extr w9, w10, w9, #2
51+
; CHECK-NEXT: cmp w10, #1
52+
; CHECK-NEXT: csel w8, w8, w9, gt
53+
; CHECK-NEXT: cmn w10, #2
54+
; CHECK-NEXT: mov w9, #-2147483648 // =0x80000000
55+
; CHECK-NEXT: csel w8, w9, w8, lt
56+
; CHECK-NEXT: asr w0, w8, #28
57+
; CHECK-NEXT: ret
58+
%tmp = call i4 @llvm.smul.fix.sat.i4(i4 %x, i4 %y, i32 2)
59+
ret i4 %tmp
60+
}
61+
62+
;; These result in regular integer multiplication with a saturation check.
63+
define i32 @func4(i32 %x, i32 %y) nounwind {
64+
; CHECK-LABEL: func4:
65+
; CHECK: // %bb.0:
66+
; CHECK-NEXT: smull x9, w0, w1
67+
; CHECK-NEXT: eor w10, w0, w1
68+
; CHECK-NEXT: mov w8, #-2147483648 // =0x80000000
69+
; CHECK-NEXT: cmp w10, #0
70+
; CHECK-NEXT: cinv w8, w8, ge
71+
; CHECK-NEXT: cmp x9, w9, sxtw
72+
; CHECK-NEXT: csel w0, w8, w9, ne
73+
; CHECK-NEXT: ret
74+
%tmp = call i32 @llvm.smul.fix.sat.i32(i32 %x, i32 %y, i32 0)
75+
ret i32 %tmp
76+
}
77+
78+
define i64 @func5(i64 %x, i64 %y) {
79+
; CHECK-LABEL: func5:
80+
; CHECK: // %bb.0:
81+
; CHECK-NEXT: mul x9, x0, x1
82+
; CHECK-NEXT: eor x11, x0, x1
83+
; CHECK-NEXT: mov x8, #-9223372036854775808 // =0x8000000000000000
84+
; CHECK-NEXT: cmp x11, #0
85+
; CHECK-NEXT: smulh x10, x0, x1
86+
; CHECK-NEXT: cinv x8, x8, ge
87+
; CHECK-NEXT: cmp x10, x9, asr #63
88+
; CHECK-NEXT: csel x0, x8, x9, ne
89+
; CHECK-NEXT: ret
90+
%tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 0)
91+
ret i64 %tmp
92+
}
93+
94+
define i4 @func6(i4 %x, i4 %y) nounwind {
95+
; CHECK-LABEL: func6:
96+
; CHECK: // %bb.0:
97+
; CHECK-NEXT: sbfx w9, w1, #0, #4
98+
; CHECK-NEXT: lsl w10, w0, #28
99+
; CHECK-NEXT: mov w8, #-2147483648 // =0x80000000
100+
; CHECK-NEXT: smull x11, w10, w9
101+
; CHECK-NEXT: eor w9, w10, w9
102+
; CHECK-NEXT: cmp w9, #0
103+
; CHECK-NEXT: cinv w8, w8, ge
104+
; CHECK-NEXT: cmp x11, w11, sxtw
105+
; CHECK-NEXT: csel w8, w8, w11, ne
106+
; CHECK-NEXT: asr w0, w8, #28
107+
; CHECK-NEXT: ret
108+
%tmp = call i4 @llvm.smul.fix.sat.i4(i4 %x, i4 %y, i32 0)
109+
ret i4 %tmp
110+
}
111+
112+
define i64 @func7(i64 %x, i64 %y) nounwind {
113+
; CHECK-LABEL: func7:
114+
; CHECK: // %bb.0:
115+
; CHECK-NEXT: mul x9, x0, x1
116+
; CHECK-NEXT: mov w8, #2147483647 // =0x7fffffff
117+
; CHECK-NEXT: mov x11, #-2147483648 // =0xffffffff80000000
118+
; CHECK-NEXT: smulh x10, x0, x1
119+
; CHECK-NEXT: extr x9, x10, x9, #32
120+
; CHECK-NEXT: cmp x10, x8
121+
; CHECK-NEXT: mov x8, #9223372036854775807 // =0x7fffffffffffffff
122+
; CHECK-NEXT: csel x8, x8, x9, gt
123+
; CHECK-NEXT: cmp x10, x11
124+
; CHECK-NEXT: mov x9, #-9223372036854775808 // =0x8000000000000000
125+
; CHECK-NEXT: csel x0, x9, x8, lt
126+
; CHECK-NEXT: ret
127+
%tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 32)
128+
ret i64 %tmp
129+
}
130+
131+
define i64 @func8(i64 %x, i64 %y) nounwind {
132+
; CHECK-LABEL: func8:
133+
; CHECK: // %bb.0:
134+
; CHECK-NEXT: mul x9, x0, x1
135+
; CHECK-NEXT: mov x8, #4611686018427387903 // =0x3fffffffffffffff
136+
; CHECK-NEXT: mov x11, #-4611686018427387904 // =0xc000000000000000
137+
; CHECK-NEXT: smulh x10, x0, x1
138+
; CHECK-NEXT: extr x9, x10, x9, #63
139+
; CHECK-NEXT: cmp x10, x8
140+
; CHECK-NEXT: mov x8, #9223372036854775807 // =0x7fffffffffffffff
141+
; CHECK-NEXT: csel x8, x8, x9, gt
142+
; CHECK-NEXT: cmp x10, x11
143+
; CHECK-NEXT: mov x9, #-9223372036854775808 // =0x8000000000000000
144+
; CHECK-NEXT: csel x0, x9, x8, lt
145+
; CHECK-NEXT: ret
146+
%tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 63)
147+
ret i64 %tmp
148+
}

llvm/test/CodeGen/AArch64/umul_fix.ll

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s -mtriple=aarch64-linux-gnu | FileCheck %s
3+
4+
declare i4 @llvm.umul.fix.i4 (i4, i4, i32)
5+
declare i32 @llvm.umul.fix.i32 (i32, i32, i32)
6+
declare i64 @llvm.umul.fix.i64 (i64, i64, i32)
7+
8+
define i32 @func(i32 %x, i32 %y) nounwind {
9+
; CHECK-LABEL: func:
10+
; CHECK: // %bb.0:
11+
; CHECK-NEXT: umull x8, w0, w1
12+
; CHECK-NEXT: lsr x9, x8, #32
13+
; CHECK-NEXT: extr w0, w9, w8, #2
14+
; CHECK-NEXT: ret
15+
%tmp = call i32 @llvm.umul.fix.i32(i32 %x, i32 %y, i32 2)
16+
ret i32 %tmp
17+
}
18+
19+
define i64 @func2(i64 %x, i64 %y) nounwind {
20+
; CHECK-LABEL: func2:
21+
; CHECK: // %bb.0:
22+
; CHECK-NEXT: mul x8, x0, x1
23+
; CHECK-NEXT: umulh x9, x0, x1
24+
; CHECK-NEXT: extr x0, x9, x8, #2
25+
; CHECK-NEXT: ret
26+
%tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 2)
27+
ret i64 %tmp
28+
}
29+
30+
define i4 @func3(i4 %x, i4 %y) nounwind {
31+
; CHECK-LABEL: func3:
32+
; CHECK: // %bb.0:
33+
; CHECK-NEXT: and w8, w1, #0xf
34+
; CHECK-NEXT: and w9, w0, #0xf
35+
; CHECK-NEXT: mul w8, w9, w8
36+
; CHECK-NEXT: lsr w0, w8, #2
37+
; CHECK-NEXT: ret
38+
%tmp = call i4 @llvm.umul.fix.i4(i4 %x, i4 %y, i32 2)
39+
ret i4 %tmp
40+
}
41+
42+
;; These result in regular integer multiplication
43+
define i32 @func4(i32 %x, i32 %y) nounwind {
44+
; CHECK-LABEL: func4:
45+
; CHECK: // %bb.0:
46+
; CHECK-NEXT: mul w0, w0, w1
47+
; CHECK-NEXT: ret
48+
%tmp = call i32 @llvm.umul.fix.i32(i32 %x, i32 %y, i32 0)
49+
ret i32 %tmp
50+
}
51+
52+
define i64 @func5(i64 %x, i64 %y) nounwind {
53+
; CHECK-LABEL: func5:
54+
; CHECK: // %bb.0:
55+
; CHECK-NEXT: mul x0, x0, x1
56+
; CHECK-NEXT: ret
57+
%tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 0)
58+
ret i64 %tmp
59+
}
60+
61+
define i4 @func6(i4 %x, i4 %y) nounwind {
62+
; CHECK-LABEL: func6:
63+
; CHECK: // %bb.0:
64+
; CHECK-NEXT: and w8, w1, #0xf
65+
; CHECK-NEXT: and w9, w0, #0xf
66+
; CHECK-NEXT: mul w0, w9, w8
67+
; CHECK-NEXT: ret
68+
%tmp = call i4 @llvm.umul.fix.i4(i4 %x, i4 %y, i32 0)
69+
ret i4 %tmp
70+
}
71+
72+
define i64 @func7(i64 %x, i64 %y) nounwind {
73+
; CHECK-LABEL: func7:
74+
; CHECK: // %bb.0:
75+
; CHECK-NEXT: mul x8, x0, x1
76+
; CHECK-NEXT: umulh x9, x0, x1
77+
; CHECK-NEXT: extr x0, x9, x8, #32
78+
; CHECK-NEXT: ret
79+
%tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 32)
80+
ret i64 %tmp
81+
}
82+
83+
define i64 @func8(i64 %x, i64 %y) nounwind {
84+
; CHECK-LABEL: func8:
85+
; CHECK: // %bb.0:
86+
; CHECK-NEXT: mul x8, x0, x1
87+
; CHECK-NEXT: umulh x9, x0, x1
88+
; CHECK-NEXT: extr x0, x9, x8, #63
89+
; CHECK-NEXT: ret
90+
%tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 63)
91+
ret i64 %tmp
92+
}
93+
94+
define i64 @func9(i64 %x, i64 %y) nounwind {
95+
; CHECK-LABEL: func9:
96+
; CHECK: // %bb.0:
97+
; CHECK-NEXT: umulh x0, x0, x1
98+
; CHECK-NEXT: ret
99+
%tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 64)
100+
ret i64 %tmp
101+
}

0 commit comments

Comments
 (0)