Skip to content

Commit dad07ba

Browse files
author
Joe Ellis
committed
[clang][AArch64][SVE] Avoid going through memory for VLAT <-> VLST casts
This change makes use of the llvm.vector.extract intrinsic to avoid going through memory when performing bitcasts between vector-length agnostic types and vector-length specific types. Depends on D91362 Reviewed By: c-rhodes Differential Revision: https://reviews.llvm.org/D92761
1 parent 9322e57 commit dad07ba

8 files changed

+236
-304
lines changed

clang/lib/CodeGen/CGExprScalar.cpp

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1996,7 +1996,39 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
19961996
}
19971997
}
19981998

1999+
// If Src is a fixed vector and Dst is a scalable vector, and both have the
2000+
// same element type, use the llvm.experimental.vector.insert intrinsic to
2001+
// perform the bitcast.
2002+
if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2003+
if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2004+
if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2005+
llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2006+
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2007+
return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
2008+
"castScalableSve");
2009+
}
2010+
}
2011+
}
2012+
2013+
// If Src is a scalable vector and Dst is a fixed vector, and both have the
2014+
// same element type, use the llvm.experimental.vector.extract intrinsic to
2015+
// perform the bitcast.
2016+
if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2017+
if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2018+
if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2019+
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2020+
return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2021+
}
2022+
}
2023+
}
2024+
19992025
// Perform VLAT <-> VLST bitcast through memory.
2026+
// TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2027+
// require the element types of the vectors to be the same, we
2028+
// need to keep this around for casting between predicates, or more
2029+
// generally for bitcasts between VLAT <-> VLST where the element
2030+
// types of the vectors are not the same, until we figure out a better
2031+
// way of doing these casts.
20002032
if ((isa<llvm::FixedVectorType>(SrcTy) &&
20012033
isa<llvm::ScalableVectorType>(DstTy)) ||
20022034
(isa<llvm::ScalableVectorType>(SrcTy) &&

clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c

Lines changed: 29 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -51,34 +51,22 @@ vec2048 x2048 = {0, 1, 2, 3, 3 , 2 , 1, 0, 0, 1, 2, 3, 3 , 2 , 1, 0,
5151
typedef int8_t vec_int8 __attribute__((vector_size(N / 8)));
5252
// CHECK128-LABEL: define <16 x i8> @f2(<16 x i8> %x)
5353
// CHECK128-NEXT: entry:
54-
// CHECK128-NEXT: %x.addr = alloca <16 x i8>, align 16
55-
// CHECK128-NEXT: %saved-call-rvalue = alloca <vscale x 16 x i8>, align 16
56-
// CHECK128-NEXT: store <16 x i8> %x, <16 x i8>* %x.addr, align 16
57-
// CHECK128-NEXT: %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
58-
// CHECK128-NEXT: %1 = bitcast <16 x i8>* %x.addr to <vscale x 16 x i8>*
59-
// CHECK128-NEXT: %2 = load <vscale x 16 x i8>, <vscale x 16 x i8>* %1, align 16
60-
// CHECK128-NEXT: %3 = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %2, i32 1)
61-
// CHECK128-NEXT: store <vscale x 16 x i8> %3, <vscale x 16 x i8>* %saved-call-rvalue, align 16
62-
// CHECK128-NEXT: %castFixedSve = bitcast <vscale x 16 x i8>* %saved-call-rvalue to <16 x i8>*
63-
// CHECK128-NEXT: %4 = load <16 x i8>, <16 x i8>* %castFixedSve, align 16
64-
// CHECK128-NEXT: ret <16 x i8> %4
54+
// CHECK128-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
55+
// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[X:%.*]], i64 0)
56+
// CHECK128-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i8> [[CASTSCALABLESVE]], i32 1)
57+
// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[TMP1]], i64 0)
58+
// CHECK128-NEXT: ret <16 x i8> [[CASTFIXEDSVE]]
6559

6660
// CHECK-LABEL: define void @f2(
6761
// CHECK-SAME: <[[#div(VBITS,8)]] x i8>* noalias nocapture sret(<[[#div(VBITS,8)]] x i8>) align 16 %agg.result, <[[#div(VBITS,8)]] x i8>* nocapture readonly %0)
68-
// CHECK-NEXT: entry:
69-
// CHECK-NEXT: %x.addr = alloca <[[#div(VBITS,8)]] x i8>, align 16
70-
// CHECK-NEXT: %saved-call-rvalue = alloca <vscale x 16 x i8>, align 16
71-
// CHECK-NEXT: %x = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* %0, align 16
72-
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> %x, <[[#div(VBITS,8)]] x i8>* %x.addr, align 16
73-
// CHECK-NEXT: %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
74-
// CHECK-NEXT: %2 = bitcast <[[#div(VBITS,8)]] x i8>* %x.addr to <vscale x 16 x i8>*
75-
// CHECK-NEXT: %3 = load <vscale x 16 x i8>, <vscale x 16 x i8>* %2, align 16
76-
// CHECK-NEXT: %4 = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %1, <vscale x 16 x i8> %3, i32 1)
77-
// CHECK-NEXT: store <vscale x 16 x i8> %4, <vscale x 16 x i8>* %saved-call-rvalue, align 16
78-
// CHECK-NEXT: %castFixedSve = bitcast <vscale x 16 x i8>* %saved-call-rvalue to <[[#div(VBITS,8)]] x i8>*
79-
// CHECK-NEXT: %5 = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* %castFixedSve, align 16
80-
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> %5, <[[#div(VBITS,8)]] x i8>* %agg.result, align 16
81-
// CHECK-NEXT: ret void
62+
// CHECK-NEXT: entry:
63+
// CHECK-NEXT: [[X:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[TMP0:%.*]], align 16, [[TBAA6:!tbaa !.*]]
64+
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
65+
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8(<vscale x 16 x i8> undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0)
66+
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> [[TMP1]], <vscale x 16 x i8> [[CASTSCALABLESVE]], i32 1)
67+
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[TMP2]], i64 0)
68+
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[CASTFIXEDSVE]], <[[#div(VBITS,8)]] x i8>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]]
69+
// CHECK-NEXT: ret void
8270
vec_int8 f2(vec_int8 x) { return svasrd_x(svptrue_b8(), x, 1); }
8371
#endif
8472

@@ -90,24 +78,24 @@ void f3(vec1);
9078
typedef svint8_t vec2 __attribute__((arm_sve_vector_bits(N)));
9179

9280
// CHECK128-LABEL: define void @g(<vscale x 16 x i8> %x.coerce)
93-
// CHECK128-NEXT: entry:
94-
// CHECK128-NEXT: %x = alloca <16 x i8>, align 16
95-
// CHECK128-NEXT: %0 = bitcast <16 x i8>* %x to <vscale x 16 x i8>*
96-
// CHECK128-NEXT: store <vscale x 16 x i8> %x.coerce, <vscale x 16 x i8>* %0, align 16
97-
// CHECK128-NEXT: %x1 = load <16 x i8>, <16 x i8>* %x, align 16,
98-
// CHECK128-NEXT: call void @f3(<16 x i8> %x1) #4
99-
// CHECK128-NEXT: ret void
81+
// CHECK128-NEXT: entry:
82+
// CHECK128-NEXT: [[X:%.*]] = alloca <16 x i8>, align 16
83+
// CHECK128-NEXT: [[TMP0:%.*]] = bitcast <16 x i8>* [[X]] to <vscale x 16 x i8>*
84+
// CHECK128-NEXT: store <vscale x 16 x i8> [[X_COERCE:%.*]], <vscale x 16 x i8>* [[TMP0]], align 16
85+
// CHECK128-NEXT: [[X1:%.*]] = load <16 x i8>, <16 x i8>* [[X]], align 16, [[TBAA6:!tbaa !.*]]
86+
// CHECK128-NEXT: call void @f3(<16 x i8> [[X1]]) [[ATTR5:#.*]]
87+
// CHECK128-NEXT: ret void
10088

10189
// CHECK-LABEL: define void @g(<vscale x 16 x i8> %x.coerce)
102-
// CHECK-NEXT: entry:
103-
// CHECK-NEXT: %x = alloca <[[#div(VBITS,8)]] x i8>, align 16
104-
// CHECK-NEXT: %indirect-arg-temp = alloca <[[#div(VBITS,8)]] x i8>, align 16
105-
// CHECK-NEXT: %0 = bitcast <[[#div(VBITS,8)]] x i8>* %x to <vscale x 16 x i8>*
106-
// CHECK-NEXT: store <vscale x 16 x i8> %x.coerce, <vscale x 16 x i8>* %0
107-
// CHECK-NEXT: %x1 = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* %x, align 16
108-
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> %x1, <[[#div(VBITS,8)]] x i8>* %indirect-arg-temp
109-
// CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* nonnull %indirect-arg-temp)
110-
// CHECK-NEXT: ret void
90+
// CHECK-NEXT: entry:
91+
// CHECK-NEXT: [[X:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16
92+
// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16
93+
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <[[#div(VBITS,8)]] x i8>* [[X]] to <vscale x 16 x i8>*
94+
// CHECK-NEXT: store <vscale x 16 x i8> [[X_COERCE:%.*]], <vscale x 16 x i8>* [[TMP0]], align 16
95+
// CHECK-NEXT: [[X1:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[X]], align 16, [[TBAA6]]
96+
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X1]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]]
97+
// CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
98+
// CHECK-NEXT: ret void
11199

112100
// CHECK128-LABEL: declare void @f3(<16 x i8>)
113101

0 commit comments

Comments
 (0)