Skip to content

Commit 2e7041f

Browse files
committed
Revert "[CodeGen][AArch64] Support arm_sve_vector_bits attribute"
Test CodeGen/attr-arm-sve-vector-bits-call.c is failing on some builders [1][2]. Reverting whilst I investigate. [1] http://lab.llvm.org:8011/builders/fuchsia-x86_64-linux/builds/10375 [2] https://luci-milo.appspot.com/p/fuchsia/builders/ci/clang-linux-x64/b8870800848452818112 This reverts commit 4258734.
1 parent d870e36 commit 2e7041f

12 files changed

+40
-2066
lines changed

clang/lib/AST/ItaniumMangle.cpp

Lines changed: 0 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -531,8 +531,6 @@ class CXXNameMangler {
531531
void mangleNeonVectorType(const DependentVectorType *T);
532532
void mangleAArch64NeonVectorType(const VectorType *T);
533533
void mangleAArch64NeonVectorType(const DependentVectorType *T);
534-
void mangleAArch64FixedSveVectorType(const VectorType *T);
535-
void mangleAArch64FixedSveVectorType(const DependentVectorType *T);
536534

537535
void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
538536
void mangleMemberExprBase(const Expr *base, bool isArrow);
@@ -3325,103 +3323,6 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const DependentVectorType *T) {
33253323
Diags.Report(T->getAttributeLoc(), DiagID);
33263324
}
33273325

3328-
// The AArch64 ACLE specifies that fixed-length SVE vector and predicate types
3329-
// defined with the 'arm_sve_vector_bits' attribute map to the same AAPCS64
3330-
// type as the sizeless variants.
3331-
//
3332-
// The mangling scheme for VLS types is implemented as a "pseudo" template:
3333-
//
3334-
// '__SVE_VLS<<type>, <vector length>>'
3335-
//
3336-
// Combining the existing SVE type and a specific vector length (in bits).
3337-
// For example:
3338-
//
3339-
// typedef __SVInt32_t foo __attribute__((arm_sve_vector_bits(512)));
3340-
//
3341-
// is described as '__SVE_VLS<__SVInt32_t, 512u>' and mangled as:
3342-
//
3343-
// "9__SVE_VLSI" + base type mangling + "Lj" + __ARM_FEATURE_SVE_BITS + "EE"
3344-
//
3345-
// i.e. 9__SVE_VLSIu11__SVInt32_tLj512EE
3346-
//
3347-
// The latest ACLE specification (00bet5) does not contain details of this
3348-
// mangling scheme, it will be specified in the next revision. The mangling
3349-
// scheme is otherwise defined in the appendices to the Procedure Call Standard
3350-
// for the Arm Architecture, see
3351-
// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst#appendix-c-mangling
3352-
void CXXNameMangler::mangleAArch64FixedSveVectorType(const VectorType *T) {
3353-
assert((T->getVectorKind() == VectorType::SveFixedLengthDataVector ||
3354-
T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) &&
3355-
"expected fixed-length SVE vector!");
3356-
3357-
QualType EltType = T->getElementType();
3358-
assert(EltType->isBuiltinType() &&
3359-
"expected builtin type for fixed-length SVE vector!");
3360-
3361-
StringRef TypeName;
3362-
switch (cast<BuiltinType>(EltType)->getKind()) {
3363-
case BuiltinType::SChar:
3364-
TypeName = "__SVInt8_t";
3365-
break;
3366-
case BuiltinType::UChar: {
3367-
if (T->getVectorKind() == VectorType::SveFixedLengthDataVector)
3368-
TypeName = "__SVUint8_t";
3369-
else
3370-
TypeName = "__SVBool_t";
3371-
break;
3372-
}
3373-
case BuiltinType::Short:
3374-
TypeName = "__SVInt16_t";
3375-
break;
3376-
case BuiltinType::UShort:
3377-
TypeName = "__SVUint16_t";
3378-
break;
3379-
case BuiltinType::Int:
3380-
TypeName = "__SVInt32_t";
3381-
break;
3382-
case BuiltinType::UInt:
3383-
TypeName = "__SVUint32_t";
3384-
break;
3385-
case BuiltinType::Long:
3386-
TypeName = "__SVInt64_t";
3387-
break;
3388-
case BuiltinType::ULong:
3389-
TypeName = "__SVUint64_t";
3390-
break;
3391-
case BuiltinType::Float16:
3392-
TypeName = "__SVFloat16_t";
3393-
break;
3394-
case BuiltinType::Float:
3395-
TypeName = "__SVFloat32_t";
3396-
break;
3397-
case BuiltinType::Double:
3398-
TypeName = "__SVFloat64_t";
3399-
break;
3400-
case BuiltinType::BFloat16:
3401-
TypeName = "__SVBfloat16_t";
3402-
break;
3403-
default:
3404-
llvm_unreachable("unexpected element type for fixed-length SVE vector!");
3405-
}
3406-
3407-
unsigned VecSizeInBits = getASTContext().getTypeInfo(T).Width;
3408-
3409-
if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
3410-
VecSizeInBits *= 8;
3411-
3412-
Out << "9__SVE_VLSI" << 'u' << TypeName.size() << TypeName << "Lj"
3413-
<< VecSizeInBits << "EE";
3414-
}
3415-
3416-
void CXXNameMangler::mangleAArch64FixedSveVectorType(
3417-
const DependentVectorType *T) {
3418-
DiagnosticsEngine &Diags = Context.getDiags();
3419-
unsigned DiagID = Diags.getCustomDiagID(
3420-
DiagnosticsEngine::Error,
3421-
"cannot mangle this dependent fixed-length SVE vector type yet");
3422-
Diags.Report(T->getAttributeLoc(), DiagID);
3423-
}
3424-
34253326
// GNU extension: vector types
34263327
// <type> ::= <vector-type>
34273328
// <vector-type> ::= Dv <positive dimension number> _
@@ -3442,10 +3343,6 @@ void CXXNameMangler::mangleType(const VectorType *T) {
34423343
else
34433344
mangleNeonVectorType(T);
34443345
return;
3445-
} else if (T->getVectorKind() == VectorType::SveFixedLengthDataVector ||
3446-
T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
3447-
mangleAArch64FixedSveVectorType(T);
3448-
return;
34493346
}
34503347
Out << "Dv" << T->getNumElements() << '_';
34513348
if (T->getVectorKind() == VectorType::AltiVecPixel)
@@ -3468,10 +3365,6 @@ void CXXNameMangler::mangleType(const DependentVectorType *T) {
34683365
else
34693366
mangleNeonVectorType(T);
34703367
return;
3471-
} else if (T->getVectorKind() == VectorType::SveFixedLengthDataVector ||
3472-
T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
3473-
mangleAArch64FixedSveVectorType(T);
3474-
return;
34753368
}
34763369

34773370
Out << "Dv";

clang/lib/CodeGen/CGCall.cpp

Lines changed: 17 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1119,13 +1119,12 @@ void CodeGenFunction::ExpandTypeToArgs(
11191119

11201120
/// Create a temporary allocation for the purposes of coercion.
11211121
static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1122-
CharUnits MinAlign,
1123-
const Twine &Name = "tmp") {
1122+
CharUnits MinAlign) {
11241123
// Don't use an alignment that's worse than what LLVM would prefer.
11251124
auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
11261125
CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
11271126

1128-
return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1127+
return CGF.CreateTempAlloca(Ty, Align);
11291128
}
11301129

11311130
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
@@ -1231,15 +1230,14 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
12311230
if (SrcTy == Ty)
12321231
return CGF.Builder.CreateLoad(Src);
12331232

1234-
llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1233+
uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
12351234

12361235
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1237-
Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1238-
DstSize.getFixedSize(), CGF);
1236+
Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
12391237
SrcTy = Src.getElementType();
12401238
}
12411239

1242-
llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1240+
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
12431241

12441242
// If the source and destination are integer or pointer types, just do an
12451243
// extension or truncation to the desired type.
@@ -1250,8 +1248,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
12501248
}
12511249

12521250
// If load is legal, just bitcast the src pointer.
1253-
if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1254-
SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
1251+
if (SrcSize >= DstSize) {
12551252
// Generally SrcSize is never greater than DstSize, since this means we are
12561253
// losing bits. However, this can happen in cases where the structure has
12571254
// additional padding, for example due to a user specified alignment.
@@ -1264,12 +1261,10 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
12641261
}
12651262

12661263
// Otherwise do coercion through memory. This is stupid, but simple.
1267-
Address Tmp =
1268-
CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1269-
CGF.Builder.CreateMemCpy(
1270-
Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1271-
Src.getAlignment().getAsAlign(),
1272-
llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
1264+
Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1265+
CGF.Builder.CreateMemCpy(Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
1266+
Src.getPointer(), Src.getAlignment().getAsAlign(),
1267+
llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize));
12731268
return CGF.Builder.CreateLoad(Tmp);
12741269
}
12751270

@@ -1308,11 +1303,10 @@ static void CreateCoercedStore(llvm::Value *Src,
13081303
return;
13091304
}
13101305

1311-
llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1306+
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
13121307

13131308
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1314-
Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1315-
SrcSize.getFixedSize(), CGF);
1309+
Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
13161310
DstTy = Dst.getElementType();
13171311
}
13181312

@@ -1334,12 +1328,10 @@ static void CreateCoercedStore(llvm::Value *Src,
13341328
return;
13351329
}
13361330

1337-
llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1331+
uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
13381332

13391333
// If store is legal, just bitcast the src pointer.
1340-
if (isa<llvm::ScalableVectorType>(SrcTy) ||
1341-
isa<llvm::ScalableVectorType>(DstTy) ||
1342-
SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
1334+
if (SrcSize <= DstSize) {
13431335
Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
13441336
CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
13451337
} else {
@@ -1354,10 +1346,9 @@ static void CreateCoercedStore(llvm::Value *Src,
13541346
// to that information.
13551347
Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
13561348
CGF.Builder.CreateStore(Src, Tmp);
1357-
CGF.Builder.CreateMemCpy(
1358-
Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1359-
Tmp.getAlignment().getAsAlign(),
1360-
llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
1349+
CGF.Builder.CreateMemCpy(Dst.getPointer(), Dst.getAlignment().getAsAlign(),
1350+
Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
1351+
llvm::ConstantInt::get(CGF.IntPtrTy, DstSize));
13611352
}
13621353
}
13631354

clang/lib/CodeGen/CGExprScalar.cpp

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -2003,34 +2003,6 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
20032003
}
20042004
}
20052005

2006-
// Perform VLAT <-> VLST bitcast through memory.
2007-
if ((isa<llvm::FixedVectorType>(SrcTy) &&
2008-
isa<llvm::ScalableVectorType>(DstTy)) ||
2009-
(isa<llvm::ScalableVectorType>(SrcTy) &&
2010-
isa<llvm::FixedVectorType>(DstTy))) {
2011-
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
2012-
// Call expressions can't have a scalar return unless the return type
2013-
// is a reference type so an lvalue can't be emitted. Create a temp
2014-
// alloca to store the call, bitcast the address then load.
2015-
QualType RetTy = CE->getCallReturnType(CGF.getContext());
2016-
Address Addr =
2017-
CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-call-rvalue");
2018-
LValue LV = CGF.MakeAddrLValue(Addr, RetTy);
2019-
CGF.EmitStoreOfScalar(Src, LV);
2020-
Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2021-
"castFixedSve");
2022-
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2023-
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2024-
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2025-
}
2026-
2027-
Address Addr = EmitLValue(E).getAddress(CGF);
2028-
Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
2029-
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2030-
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2031-
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2032-
}
2033-
20342006
return Builder.CreateBitCast(Src, DstTy);
20352007
}
20362008
case CK_AddressSpaceConversion: {

0 commit comments

Comments
 (0)