Skip to content

Commit 0560632

Browse files
[AArch64][SVE] Add SVE intrinsics for masked loads & stores
Summary: Implements the following intrinsics for contiguous loads & stores: - @llvm.aarch64.sve.ld1 - @llvm.aarch64.sve.st1 Reviewers: sdesmalen, andwar, efriedma, cameron.mcinally, dancgr, rengolin Reviewed By: cameron.mcinally Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, danielkiss, cfe-commits, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D76688
1 parent d82c1e8 commit 0560632

File tree

3 files changed

+200
-6
lines changed

3 files changed

+200
-6
lines changed

llvm/include/llvm/IR/IntrinsicsAArch64.td

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1282,6 +1282,8 @@ class SVE_gather_prf_vector_base_scalar_offset
12821282
// Loads
12831283
//
12841284

1285+
def int_aarch64_sve_ld1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
1286+
12851287
def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
12861288
def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
12871289
def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
@@ -1290,6 +1292,8 @@ def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
12901292
// Stores
12911293
//
12921294

1295+
def int_aarch64_sve_st1 : AdvSIMD_1Vec_PredStore_Intrinsic;
1296+
12931297
def int_aarch64_sve_stnt1 : AdvSIMD_1Vec_PredStore_Intrinsic;
12941298

12951299
//

llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8978,24 +8978,30 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
89788978
Info.align = Align(16);
89798979
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
89808980
return true;
8981+
case Intrinsic::aarch64_sve_ld1:
89818982
case Intrinsic::aarch64_sve_ldnt1: {
89828983
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
89838984
Info.opc = ISD::INTRINSIC_W_CHAIN;
89848985
Info.memVT = MVT::getVT(I.getType());
89858986
Info.ptrVal = I.getArgOperand(1);
89868987
Info.offset = 0;
89878988
Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType()));
8988-
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal;
8989+
Info.flags = MachineMemOperand::MOLoad;
8990+
if (Intrinsic == Intrinsic::aarch64_sve_ldnt1)
8991+
Info.flags |= MachineMemOperand::MONonTemporal;
89898992
return true;
89908993
}
8994+
case Intrinsic::aarch64_sve_st1:
89918995
case Intrinsic::aarch64_sve_stnt1: {
89928996
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(2)->getType());
89938997
Info.opc = ISD::INTRINSIC_W_CHAIN;
89948998
Info.memVT = MVT::getVT(I.getOperand(0)->getType());
89958999
Info.ptrVal = I.getArgOperand(2);
89969000
Info.offset = 0;
89979001
Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType()));
8998-
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal;
9002+
Info.flags = MachineMemOperand::MOStore;
9003+
if (Intrinsic == Intrinsic::aarch64_sve_stnt1)
9004+
Info.flags |= MachineMemOperand::MONonTemporal;
89999005
return true;
90009006
}
90019007
default:
@@ -11514,7 +11520,7 @@ static MVT getSVEContainerType(EVT ContentTy) {
1151411520
}
1151511521
}
1151611522

11517-
static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
11523+
static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG) {
1151811524
SDLoc DL(N);
1151911525
EVT VT = N->getValueType(0);
1152011526
EVT PtrTy = N->getOperand(3).getValueType();
@@ -11539,7 +11545,7 @@ static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
1153911545
return L;
1154011546
}
1154111547

11542-
static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) {
11548+
static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {
1154311549
SDLoc DL(N);
1154411550

1154511551
SDValue Data = N->getOperand(2);
@@ -13130,8 +13136,9 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
1313013136
case Intrinsic::aarch64_neon_st3lane:
1313113137
case Intrinsic::aarch64_neon_st4lane:
1313213138
return performNEONPostLDSTCombine(N, DCI, DAG);
13139+
case Intrinsic::aarch64_sve_ld1:
1313313140
case Intrinsic::aarch64_sve_ldnt1:
13134-
return performLDNT1Combine(N, DAG);
13141+
return performLD1Combine(N, DAG);
1313513142
case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
1313613143
return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1);
1313713144
case Intrinsic::aarch64_sve_ldnt1_gather:
@@ -13144,8 +13151,9 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
1314413151
return performLDNF1Combine(N, DAG, AArch64ISD::LDNF1);
1314513152
case Intrinsic::aarch64_sve_ldff1:
1314613153
return performLDNF1Combine(N, DAG, AArch64ISD::LDFF1);
13154+
case Intrinsic::aarch64_sve_st1:
1314713155
case Intrinsic::aarch64_sve_stnt1:
13148-
return performSTNT1Combine(N, DAG);
13156+
return performST1Combine(N, DAG);
1314913157
case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
1315013158
return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1);
1315113159
case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
Lines changed: 182 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,182 @@
1+
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
2+
3+
;
4+
; LD1B
5+
;
6+
7+
define <vscale x 16 x i8> @ld1b_i8(<vscale x 16 x i1> %pred, i8* %addr) {
8+
; CHECK-LABEL: ld1b_i8:
9+
; CHECK: ld1b { z0.b }, p0/z, [x0]
10+
; CHECK-NEXT: ret
11+
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred,
12+
i8* %addr)
13+
ret <vscale x 16 x i8> %res
14+
}
15+
16+
;
17+
; LD1H
18+
;
19+
20+
define <vscale x 8 x i16> @ld1h_i16(<vscale x 8 x i1> %pred, i16* %addr) {
21+
; CHECK-LABEL: ld1h_i16:
22+
; CHECK: ld1h { z0.h }, p0/z, [x0]
23+
; CHECK-NEXT: ret
24+
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pred,
25+
i16* %addr)
26+
ret <vscale x 8 x i16> %res
27+
}
28+
29+
define <vscale x 8 x half> @ld1h_f16(<vscale x 8 x i1> %pred, half* %addr) {
30+
; CHECK-LABEL: ld1h_f16:
31+
; CHECK: ld1h { z0.h }, p0/z, [x0]
32+
; CHECK-NEXT: ret
33+
%res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pred,
34+
half* %addr)
35+
ret <vscale x 8 x half> %res
36+
}
37+
38+
;
39+
; LD1W
40+
;
41+
42+
define <vscale x 4 x i32> @ld1w_i32(<vscale x 4 x i1> %pred, i32* %addr) {
43+
; CHECK-LABEL: ld1w_i32:
44+
; CHECK: ld1w { z0.s }, p0/z, [x0]
45+
; CHECK-NEXT: ret
46+
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pred,
47+
i32* %addr)
48+
ret <vscale x 4 x i32> %res
49+
}
50+
51+
define <vscale x 4 x float> @ld1w_f32(<vscale x 4 x i1> %pred, float* %addr) {
52+
; CHECK-LABEL: ld1w_f32:
53+
; CHECK: ld1w { z0.s }, p0/z, [x0]
54+
; CHECK-NEXT: ret
55+
%res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pred,
56+
float* %addr)
57+
ret <vscale x 4 x float> %res
58+
}
59+
60+
;
61+
; LD1D
62+
;
63+
64+
define <vscale x 2 x i64> @ld1d_i64(<vscale x 2 x i1> %pred, i64* %addr) {
65+
; CHECK-LABEL: ld1d_i64:
66+
; CHECK: ld1d { z0.d }, p0/z, [x0]
67+
; CHECK-NEXT: ret
68+
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pred,
69+
i64* %addr)
70+
ret <vscale x 2 x i64> %res
71+
}
72+
73+
define <vscale x 2 x double> @ld1d_f64(<vscale x 2 x i1> %pred, double* %addr) {
74+
; CHECK-LABEL: ld1d_f64:
75+
; CHECK: ld1d { z0.d }, p0/z, [x0]
76+
; CHECK-NEXT: ret
77+
%res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pred,
78+
double* %addr)
79+
ret <vscale x 2 x double> %res
80+
}
81+
82+
;
83+
; ST1B
84+
;
85+
86+
define void @st1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, i8* %addr) {
87+
; CHECK-LABEL: st1b_i8:
88+
; CHECK: st1b { z0.b }, p0, [x0]
89+
; CHECK-NEXT: ret
90+
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data,
91+
<vscale x 16 x i1> %pred,
92+
i8* %addr)
93+
ret void
94+
}
95+
96+
;
97+
; ST1H
98+
;
99+
100+
define void @st1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i16* %addr) {
101+
; CHECK-LABEL: st1h_i16:
102+
; CHECK: st1h { z0.h }, p0, [x0]
103+
; CHECK-NEXT: ret
104+
call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data,
105+
<vscale x 8 x i1> %pred,
106+
i16* %addr)
107+
ret void
108+
}
109+
110+
define void @st1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, half* %addr) {
111+
; CHECK-LABEL: st1h_f16:
112+
; CHECK: st1h { z0.h }, p0, [x0]
113+
; CHECK-NEXT: ret
114+
call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data,
115+
<vscale x 8 x i1> %pred,
116+
half* %addr)
117+
ret void
118+
}
119+
120+
;
121+
; ST1W
122+
;
123+
124+
define void @st1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i32* %addr) {
125+
; CHECK-LABEL: st1w_i32:
126+
; CHECK: st1w { z0.s }, p0, [x0]
127+
; CHECK-NEXT: ret
128+
call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data,
129+
<vscale x 4 x i1> %pred,
130+
i32* %addr)
131+
ret void
132+
}
133+
134+
define void @st1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, float* %addr) {
135+
; CHECK-LABEL: st1w_f32:
136+
; CHECK: st1w { z0.s }, p0, [x0]
137+
; CHECK-NEXT: ret
138+
call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data,
139+
<vscale x 4 x i1> %pred,
140+
float* %addr)
141+
ret void
142+
}
143+
144+
;
145+
; ST1D
146+
;
147+
148+
define void @st1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i64* %addr) {
149+
; CHECK-LABEL: st1d_i64:
150+
; CHECK: st1d { z0.d }, p0, [x0]
151+
; CHECK-NEXT: ret
152+
call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data,
153+
<vscale x 2 x i1> %pred,
154+
i64* %addr)
155+
ret void
156+
}
157+
158+
define void @st1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, double* %addr) {
159+
; CHECK-LABEL: st1d_f64:
160+
; CHECK: st1d { z0.d }, p0, [x0]
161+
; CHECK-NEXT: ret
162+
call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data,
163+
<vscale x 2 x i1> %pred,
164+
double* %addr)
165+
ret void
166+
}
167+
168+
declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)
169+
declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, i16*)
170+
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, i32*)
171+
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, i64*)
172+
declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, half*)
173+
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, float*)
174+
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
175+
176+
declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
177+
declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
178+
declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
179+
declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
180+
declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
181+
declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
182+
declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)

0 commit comments

Comments
 (0)