Skip to content

Commit 785337e

Browse files
authored
[LV][AArch64] Don't query registers for illegal scalable vector elts (#109411)
When trying to maximize vector bandwidth we ask TTI for the number of registers required for a given operation. If the type of that operation happens to be something illegal for scalable vectors (e.g. <vscale x 4 x fp128>) then we would see a crash. Instead, just return a default value and let the cost model reject the invalid operation later.
1 parent 7773dcd commit 785337e

File tree

2 files changed

+79
-1
lines changed

2 files changed

+79
-1
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5207,7 +5207,9 @@ LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
52075207

52085208
const auto &TTICapture = TTI;
52095209
auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
5210-
if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
5210+
if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty) ||
5211+
(VF.isScalable() &&
5212+
!TTICapture.isElementTypeLegalForScalableVector(Ty)))
52115213
return 0;
52125214
return TTICapture.getRegUsageForType(VectorType::get(Ty, VF));
52135215
};
Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
2+
; RUN: opt < %s -mattr=+sve -passes=loop-vectorize -debug-only=loop-vectorize -vectorizer-maximize-bandwidth -force-vector-interleave=1 -S 2>&1 | FileCheck %s
3+
; REQUIRES: asserts
4+
5+
target triple = "aarch64-unknown-linux-gnu"
6+
7+
;; Make sure we reject scalable vectors for fp128 types. We were previously
8+
;; crashing before reaching the cost model when checking for the number of
9+
;; registers required for a <vscale x 4 x fp128> when trying to maximize
10+
;; vector bandwidth with SVE.
11+
12+
; CHECK: LV: Found an estimated cost of Invalid for VF vscale x 2 For instruction: %load.ext = fpext double %load.in to fp128
13+
14+
define void @load_ext_trunc_store(ptr readonly %in, ptr noalias %out, i64 %N) {
15+
; CHECK-LABEL: define void @load_ext_trunc_store(
16+
; CHECK-SAME: ptr readonly [[IN:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
17+
; CHECK-NEXT: [[ENTRY:.*]]:
18+
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1)
19+
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 4
20+
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
21+
; CHECK: [[VECTOR_PH]]:
22+
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], 4
23+
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]]
24+
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
25+
; CHECK: [[VECTOR_BODY]]:
26+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
27+
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
28+
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[IN]], i64 [[TMP0]]
29+
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, ptr [[TMP2]], i32 0
30+
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP4]], align 8
31+
; CHECK-NEXT: [[TMP3:%.*]] = fpext <4 x double> [[WIDE_LOAD]] to <4 x fp128>
32+
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[TMP0]]
33+
; CHECK-NEXT: [[TMP5:%.*]] = fptrunc <4 x fp128> [[TMP3]] to <4 x float>
34+
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 0
35+
; CHECK-NEXT: store <4 x float> [[TMP5]], ptr [[TMP12]], align 4
36+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
37+
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
38+
; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
39+
; CHECK: [[MIDDLE_BLOCK]]:
40+
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX]], [[N_VEC]]
41+
; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_EXIT:.*]], label %[[SCALAR_PH]]
42+
; CHECK: [[SCALAR_PH]]:
43+
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
44+
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
45+
; CHECK: [[FOR_BODY]]:
46+
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
47+
; CHECK-NEXT: [[GEP_IN:%.*]] = getelementptr inbounds nuw double, ptr [[IN]], i64 [[IV]]
48+
; CHECK-NEXT: [[LOAD_IN:%.*]] = load double, ptr [[GEP_IN]], align 8
49+
; CHECK-NEXT: [[LOAD_EXT:%.*]] = fpext double [[LOAD_IN]] to fp128
50+
; CHECK-NEXT: [[GEP_OUT:%.*]] = getelementptr inbounds nuw float, ptr [[OUT]], i64 [[IV]]
51+
; CHECK-NEXT: [[TRUNC_OUT:%.*]] = fptrunc fp128 [[LOAD_EXT]] to float
52+
; CHECK-NEXT: store float [[TRUNC_OUT]], ptr [[GEP_OUT]], align 4
53+
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
54+
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ult i64 [[IV_NEXT]], [[N]]
55+
; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_BODY]], label %[[FOR_EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
56+
; CHECK: [[FOR_EXIT]]:
57+
; CHECK-NEXT: ret void
58+
;
59+
entry:
60+
br label %for.body
61+
62+
for.body:
63+
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
64+
%gep.in = getelementptr inbounds nuw double, ptr %in, i64 %iv
65+
%load.in = load double, ptr %gep.in, align 8
66+
%load.ext = fpext double %load.in to fp128
67+
%gep.out = getelementptr inbounds nuw float, ptr %out, i64 %iv
68+
%trunc.out = fptrunc fp128 %load.ext to float
69+
store float %trunc.out, ptr %gep.out, align 4
70+
%iv.next = add nuw nsw i64 %iv, 1
71+
%exitcond = icmp ult i64 %iv.next, %N
72+
br i1 %exitcond, label %for.body, label %for.exit
73+
74+
for.exit:
75+
ret void
76+
}

0 commit comments

Comments
 (0)