Skip to content

Commit 352f87d

Browse files
committed
Merge from 'main' to 'sycl-web' (#8)
CONFLICT (content): Merge conflict in clang/lib/CodeGen/CGExprScalar.cpp
2 parents a631a0f + dad07ba commit 352f87d

33 files changed

+775
-735
lines changed

clang/lib/CodeGen/CGExprScalar.cpp

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2019,7 +2019,40 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
20192019

20202020
Src = Builder.CreateAddrSpaceCast(Src, SrcNewAS);
20212021
}
2022+
2023+
// If Src is a fixed vector and Dst is a scalable vector, and both have the
2024+
// same element type, use the llvm.experimental.vector.insert intrinsic to
2025+
// perform the bitcast.
2026+
if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2027+
if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2028+
if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2029+
llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2030+
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2031+
return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
2032+
"castScalableSve");
2033+
}
2034+
}
2035+
}
2036+
2037+
// If Src is a scalable vector and Dst is a fixed vector, and both have the
2038+
// same element type, use the llvm.experimental.vector.extract intrinsic to
2039+
// perform the bitcast.
2040+
if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2041+
if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2042+
if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2043+
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2044+
return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2045+
}
2046+
}
2047+
}
2048+
20222049
// Perform VLAT <-> VLST bitcast through memory.
2050+
// TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2051+
// require the element types of the vectors to be the same, we
2052+
// need to keep this around for casting between predicates, or more
2053+
// generally for bitcasts between VLAT <-> VLST where the element
2054+
// types of the vectors are not the same, until we figure out a better
2055+
// way of doing these casts.
20232056
if ((isa<llvm::FixedVectorType>(SrcTy) &&
20242057
isa<llvm::ScalableVectorType>(DstTy)) ||
20252058
(isa<llvm::ScalableVectorType>(SrcTy) &&

clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c

Lines changed: 29 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -51,34 +51,22 @@ vec2048 x2048 = {0, 1, 2, 3, 3 , 2 , 1, 0, 0, 1, 2, 3, 3 , 2 , 1, 0,
5151
typedef int8_t vec_int8 __attribute__((vector_size(N / 8)));
5252
// CHECK128-LABEL: define <16 x i8> @f2(<16 x i8> %x)
5353
// CHECK128-NEXT: entry:
54-
// CHECK128-NEXT: %x.addr = alloca <16 x i8>, align 16
55-
// CHECK128-NEXT: %saved-call-rvalue = alloca <vscale x 16 x i8>, align 16
56-
// CHECK128-NEXT: store <16 x i8> %x, <16 x i8>* %x.addr, align 16
57-
// CHECK128-NEXT: %0 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
58-
// CHECK128-NEXT: %1 = bitcast <16 x i8>* %x.addr to <vscale x 16 x i8>*
59-
// CHECK128-NEXT: %2 = load <vscale x 16 x i8>, <vscale x 16 x i8>* %1, align 16
60-
// CHECK128-NEXT: %3 = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %2, i32 1)
61-
// CHECK128-NEXT: store <vscale x 16 x i8> %3, <vscale x 16 x i8>* %saved-call-rvalue, align 16
62-
// CHECK128-NEXT: %castFixedSve = bitcast <vscale x 16 x i8>* %saved-call-rvalue to <16 x i8>*
63-
// CHECK128-NEXT: %4 = load <16 x i8>, <16 x i8>* %castFixedSve, align 16
64-
// CHECK128-NEXT: ret <16 x i8> %4
54+
// CHECK128-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
55+
// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> [[X:%.*]], i64 0)
56+
// CHECK128-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i8> [[CASTSCALABLESVE]], i32 1)
57+
// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> [[TMP1]], i64 0)
58+
// CHECK128-NEXT: ret <16 x i8> [[CASTFIXEDSVE]]
6559

6660
// CHECK-LABEL: define void @f2(
6761
// CHECK-SAME: <[[#div(VBITS,8)]] x i8>* noalias nocapture sret(<[[#div(VBITS,8)]] x i8>) align 16 %agg.result, <[[#div(VBITS,8)]] x i8>* nocapture readonly %0)
68-
// CHECK-NEXT: entry:
69-
// CHECK-NEXT: %x.addr = alloca <[[#div(VBITS,8)]] x i8>, align 16
70-
// CHECK-NEXT: %saved-call-rvalue = alloca <vscale x 16 x i8>, align 16
71-
// CHECK-NEXT: %x = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* %0, align 16
72-
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> %x, <[[#div(VBITS,8)]] x i8>* %x.addr, align 16
73-
// CHECK-NEXT: %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
74-
// CHECK-NEXT: %2 = bitcast <[[#div(VBITS,8)]] x i8>* %x.addr to <vscale x 16 x i8>*
75-
// CHECK-NEXT: %3 = load <vscale x 16 x i8>, <vscale x 16 x i8>* %2, align 16
76-
// CHECK-NEXT: %4 = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %1, <vscale x 16 x i8> %3, i32 1)
77-
// CHECK-NEXT: store <vscale x 16 x i8> %4, <vscale x 16 x i8>* %saved-call-rvalue, align 16
78-
// CHECK-NEXT: %castFixedSve = bitcast <vscale x 16 x i8>* %saved-call-rvalue to <[[#div(VBITS,8)]] x i8>*
79-
// CHECK-NEXT: %5 = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* %castFixedSve, align 16
80-
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> %5, <[[#div(VBITS,8)]] x i8>* %agg.result, align 16
81-
// CHECK-NEXT: ret void
62+
// CHECK-NEXT: entry:
63+
// CHECK-NEXT: [[X:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[TMP0:%.*]], align 16, [[TBAA6:!tbaa !.*]]
64+
// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
65+
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8(<vscale x 16 x i8> undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0)
66+
// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> [[TMP1]], <vscale x 16 x i8> [[CASTSCALABLESVE]], i32 1)
67+
// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[TMP2]], i64 0)
68+
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[CASTFIXEDSVE]], <[[#div(VBITS,8)]] x i8>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]]
69+
// CHECK-NEXT: ret void
8270
vec_int8 f2(vec_int8 x) { return svasrd_x(svptrue_b8(), x, 1); }
8371
#endif
8472

@@ -90,24 +78,24 @@ void f3(vec1);
9078
typedef svint8_t vec2 __attribute__((arm_sve_vector_bits(N)));
9179

9280
// CHECK128-LABEL: define void @g(<vscale x 16 x i8> %x.coerce)
93-
// CHECK128-NEXT: entry:
94-
// CHECK128-NEXT: %x = alloca <16 x i8>, align 16
95-
// CHECK128-NEXT: %0 = bitcast <16 x i8>* %x to <vscale x 16 x i8>*
96-
// CHECK128-NEXT: store <vscale x 16 x i8> %x.coerce, <vscale x 16 x i8>* %0, align 16
97-
// CHECK128-NEXT: %x1 = load <16 x i8>, <16 x i8>* %x, align 16,
98-
// CHECK128-NEXT: call void @f3(<16 x i8> %x1) #4
99-
// CHECK128-NEXT: ret void
81+
// CHECK128-NEXT: entry:
82+
// CHECK128-NEXT: [[X:%.*]] = alloca <16 x i8>, align 16
83+
// CHECK128-NEXT: [[TMP0:%.*]] = bitcast <16 x i8>* [[X]] to <vscale x 16 x i8>*
84+
// CHECK128-NEXT: store <vscale x 16 x i8> [[X_COERCE:%.*]], <vscale x 16 x i8>* [[TMP0]], align 16
85+
// CHECK128-NEXT: [[X1:%.*]] = load <16 x i8>, <16 x i8>* [[X]], align 16, [[TBAA6:!tbaa !.*]]
86+
// CHECK128-NEXT: call void @f3(<16 x i8> [[X1]]) [[ATTR5:#.*]]
87+
// CHECK128-NEXT: ret void
10088

10189
// CHECK-LABEL: define void @g(<vscale x 16 x i8> %x.coerce)
102-
// CHECK-NEXT: entry:
103-
// CHECK-NEXT: %x = alloca <[[#div(VBITS,8)]] x i8>, align 16
104-
// CHECK-NEXT: %indirect-arg-temp = alloca <[[#div(VBITS,8)]] x i8>, align 16
105-
// CHECK-NEXT: %0 = bitcast <[[#div(VBITS,8)]] x i8>* %x to <vscale x 16 x i8>*
106-
// CHECK-NEXT: store <vscale x 16 x i8> %x.coerce, <vscale x 16 x i8>* %0
107-
// CHECK-NEXT: %x1 = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* %x, align 16
108-
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> %x1, <[[#div(VBITS,8)]] x i8>* %indirect-arg-temp
109-
// CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* nonnull %indirect-arg-temp)
110-
// CHECK-NEXT: ret void
90+
// CHECK-NEXT: entry:
91+
// CHECK-NEXT: [[X:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16
92+
// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16
93+
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <[[#div(VBITS,8)]] x i8>* [[X]] to <vscale x 16 x i8>*
94+
// CHECK-NEXT: store <vscale x 16 x i8> [[X_COERCE:%.*]], <vscale x 16 x i8>* [[TMP0]], align 16
95+
// CHECK-NEXT: [[X1:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[X]], align 16, [[TBAA6]]
96+
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X1]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]]
97+
// CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
98+
// CHECK-NEXT: ret void
11199

112100
// CHECK128-LABEL: declare void @f3(<16 x i8>)
113101

0 commit comments

Comments
 (0)