Skip to content

Commit 59d6f03

Browse files
committed
[VPlan] Support narrowing widened loads in truncateToMinimimalBitwidths.
MinBWs may also contain widened load instructions, handle them by only narrowing their result. Fixes #77468
1 parent 39b2104 commit 59d6f03

File tree

2 files changed

+96
-12
lines changed

2 files changed

+96
-12
lines changed

llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -895,7 +895,10 @@ void VPlanTransforms::truncateToMinimalBitwidths(
895895
vp_depth_first_deep(Plan.getVectorLoopRegion()))) {
896896
for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
897897
if (!isa<VPWidenRecipe, VPWidenCastRecipe, VPReplicateRecipe,
898-
VPWidenSelectRecipe>(&R))
898+
VPWidenSelectRecipe, VPWidenMemoryInstructionRecipe>(&R))
899+
continue;
900+
if (isa<VPWidenMemoryInstructionRecipe>(&R) &&
901+
cast<VPWidenMemoryInstructionRecipe>(&R)->isStore())
899902
continue;
900903

901904
VPValue *ResultVPV = R.getVPSingleValue();
@@ -948,6 +951,23 @@ void VPlanTransforms::truncateToMinimalBitwidths(
948951

949952
auto *NewResTy = IntegerType::get(Ctx, NewResSizeInBits);
950953

954+
// Any wrapping introduced by shrinking this operation shouldn't be
955+
// considered undefined behavior. So, we can't unconditionally copy
956+
// arithmetic wrapping flags to VPW.
957+
if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
958+
VPW->dropPoisonGeneratingFlags();
959+
960+
// Extend result to original width.
961+
auto *Ext = new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy);
962+
Ext->insertAfter(&R);
963+
ResultVPV->replaceAllUsesWith(Ext);
964+
Ext->setOperand(0, ResultVPV);
965+
966+
if (isa<VPWidenMemoryInstructionRecipe>(&R)) {
967+
assert(!cast<VPWidenMemoryInstructionRecipe>(&R)->isStore() && "stores cannot be narrowed");
968+
continue;
969+
}
970+
951971
// Shrink operands by introducing truncates as needed.
952972
unsigned StartIdx = isa<VPWidenSelectRecipe>(&R) ? 1 : 0;
953973
for (unsigned Idx = StartIdx; Idx != R.getNumOperands(); ++Idx) {
@@ -979,17 +999,6 @@ void VPlanTransforms::truncateToMinimalBitwidths(
979999
}
9801000
}
9811001

982-
// Any wrapping introduced by shrinking this operation shouldn't be
983-
// considered undefined behavior. So, we can't unconditionally copy
984-
// arithmetic wrapping flags to VPW.
985-
if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
986-
VPW->dropPoisonGeneratingFlags();
987-
988-
// Extend result to original width.
989-
auto *Ext = new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy);
990-
Ext->insertAfter(&R);
991-
ResultVPV->replaceAllUsesWith(Ext);
992-
Ext->setOperand(0, ResultVPV);
9931002
}
9941003
}
9951004

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2+
; RUN: opt -p loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
3+
4+
target datalayout = "p:16:16"
5+
6+
define void @pr77468(ptr noalias %src, ptr noalias %dst, i1 %x) {
7+
; CHECK-LABEL: define void @pr77468(
8+
; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i1 [[X:%.*]]) {
9+
; CHECK-NEXT: entry:
10+
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
11+
; CHECK: vector.ph:
12+
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[X]], i64 0
13+
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
14+
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
15+
; CHECK: vector.body:
16+
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
17+
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = trunc i32 [[INDEX]] to i16
18+
; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[OFFSET_IDX]], 0
19+
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[SRC]], i16 [[TMP0]]
20+
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[TMP1]], i32 0
21+
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 1
22+
; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i1> [[BROADCAST_SPLAT]] to <4 x i16>
23+
; CHECK-NEXT: [[TMP4:%.*]] = trunc <4 x i32> [[WIDE_LOAD]] to <4 x i16>
24+
; CHECK-NEXT: [[TMP5:%.*]] = and <4 x i16> [[TMP3]], [[TMP4]]
25+
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[DST]], i16 [[TMP0]]
26+
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[TMP6]], i32 0
27+
; CHECK-NEXT: store <4 x i16> [[TMP5]], ptr [[TMP7]], align 2
28+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
29+
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 100
30+
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
31+
; CHECK: middle.block:
32+
; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
33+
; CHECK: scalar.ph:
34+
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
35+
; CHECK-NEXT: br label [[LOOP:%.*]]
36+
; CHECK: loop:
37+
; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
38+
; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i16 [[IV]]
39+
; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 1
40+
; CHECK-NEXT: [[X_EXT:%.*]] = zext i1 [[X]] to i32
41+
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X_EXT]], [[L]]
42+
; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i16 [[IV]]
43+
; CHECK-NEXT: [[T:%.*]] = trunc i32 [[AND]] to i16
44+
; CHECK-NEXT: store i16 [[T]], ptr [[GEP_DST]], align 2
45+
; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
46+
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i16 [[IV_NEXT]], 100
47+
; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
48+
; CHECK: exit:
49+
; CHECK-NEXT: ret void
50+
;
51+
entry:
52+
br label %loop
53+
54+
loop:
55+
%iv = phi i16 [ 0, %entry ], [ %iv.next, %loop ]
56+
%gep.src = getelementptr i32, ptr %src, i16 %iv
57+
%l = load i32, ptr %gep.src, align 1
58+
%x.ext = zext i1 %x to i32
59+
%and = and i32 %x.ext, %l
60+
%gep.dst = getelementptr i16, ptr %dst, i16 %iv
61+
%t = trunc i32 %and to i16
62+
store i16 %t, ptr %gep.dst
63+
%iv.next = add i16 %iv , 1
64+
%exitcond.not = icmp eq i16 %iv.next, 100
65+
br i1 %exitcond.not, label %exit, label %loop
66+
67+
exit:
68+
ret void
69+
}
70+
;.
71+
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
72+
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
73+
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
74+
; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
75+
;.

0 commit comments

Comments
 (0)