Skip to content

Commit 659f92e

Browse files
committed
Revert "[LV] Extend trunc optimization to all IVs with constant integer steps"
This reverts commit r294967. This patch caused execution time slowdowns in a few LLVM test-suite tests, as reported by the clang-cmake-aarch64-quick bot. I'm reverting to investigate. llvm-svn: 294973
1 parent 58a221a commit 659f92e

File tree

3 files changed

+14
-54
lines changed

3 files changed

+14
-54
lines changed

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 12 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4879,15 +4879,12 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
48794879
// induction variable. Notice that we can only optimize the 'trunc' case
48804880
// because (a) FP conversions lose precision, (b) sext/zext may wrap, and
48814881
// (c) other casts depend on pointer size.
4882-
if (auto *Trunc = dyn_cast<TruncInst>(CI))
4883-
if (auto *Phi = dyn_cast<PHINode>(Trunc->getOperand(0))) {
4884-
auto II = Legal->getInductionVars()->find(Phi);
4885-
if (II != Legal->getInductionVars()->end())
4886-
if (II->second.getConstIntStepValue()) {
4887-
widenIntInduction(Phi, Trunc);
4888-
break;
4889-
}
4890-
}
4882+
auto ID = Legal->getInductionVars()->lookup(OldInduction);
4883+
if (isa<TruncInst>(CI) && CI->getOperand(0) == OldInduction &&
4884+
ID.getConstIntStepValue()) {
4885+
widenIntInduction(OldInduction, cast<TruncInst>(CI));
4886+
break;
4887+
}
48914888

48924889
/// Vectorize casts.
48934890
Type *DestTy =
@@ -7227,17 +7224,12 @@ unsigned LoopVectorizationCostModel::getInstructionCost(Instruction *I,
72277224
case Instruction::Trunc:
72287225
case Instruction::FPTrunc:
72297226
case Instruction::BitCast: {
7230-
// We optimize the truncation of induction variables having constant
7231-
// integer steps. The cost of these truncations is the same as the scalar
7232-
// operation.
7233-
if (auto *Trunc = dyn_cast<TruncInst>(I))
7234-
if (auto *Phi = dyn_cast<PHINode>(Trunc->getOperand(0))) {
7235-
auto II = Legal->getInductionVars()->find(Phi);
7236-
if (II != Legal->getInductionVars()->end())
7237-
if (II->second.getConstIntStepValue())
7238-
return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7239-
Trunc->getSrcTy());
7240-
}
7227+
// We optimize the truncation of induction variable.
7228+
// The cost of these is the same as the scalar operation.
7229+
if (I->getOpcode() == Instruction::Trunc &&
7230+
Legal->isInductionVariable(I->getOperand(0)))
7231+
return TTI.getCastInstrCost(I->getOpcode(), I->getType(),
7232+
I->getOperand(0)->getType());
72417233

72427234
Type *SrcScalarTy = I->getOperand(0)->getType();
72437235
Type *SrcVecTy = ToVectorTy(SrcScalarTy, VF);

llvm/test/Transforms/LoopVectorize/induction.ll

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -773,34 +773,3 @@ for.body:
773773
exit:
774774
ret void
775775
}
776-
777-
; CHECK-LABEL: @non_primary_iv_trunc(
778-
; CHECK: vector.body:
779-
; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
780-
; CHECK: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 2>, %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ]
781-
; CHECK: [[TMP3:%.*]] = add i64 %index, 0
782-
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* %a, i64 [[TMP3]]
783-
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, i32* [[TMP4]], i32 0
784-
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <2 x i32>*
785-
; CHECK-NEXT: store <2 x i32> [[VEC_IND]], <2 x i32>* [[TMP6]], align 4
786-
; CHECK-NEXT: %index.next = add i64 %index, 2
787-
; CHECK: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], <i32 4, i32 4>
788-
; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body
789-
define void @non_primary_iv_trunc(i32* %a, i64 %n) {
790-
entry:
791-
br label %for.body
792-
793-
for.body:
794-
%i = phi i64 [ %i.next, %for.body ], [ 0, %entry ]
795-
%j = phi i64 [ %j.next, %for.body ], [ 0, %entry ]
796-
%tmp0 = getelementptr inbounds i32, i32* %a, i64 %i
797-
%tmp1 = trunc i64 %j to i32
798-
store i32 %tmp1, i32* %tmp0, align 4
799-
%i.next = add nuw nsw i64 %i, 1
800-
%j.next = add nuw nsw i64 %j, 2
801-
%cond = icmp slt i64 %i.next, %n
802-
br i1 %cond, label %for.body, label %for.end
803-
804-
for.end:
805-
ret void
806-
}

llvm/test/Transforms/LoopVectorize/reverse_iter.ll

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
44

5-
; PR15882: This test ensures that we do not produce wrapping arithmetic when
6-
; creating constant reverse step vectors.
5+
; Make sure that the reverse iterators are calculated using 64bit arithmetic, not 32.
76
;
87
; int foo(int n, int *A) {
98
; int sum;
@@ -14,7 +13,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
1413
;
1514

1615
;CHECK-LABEL: @foo(
17-
;CHECK: <i32 0, i32 -1, i32 -2, i32 -3>
16+
;CHECK: <i64 0, i64 -1, i64 -2, i64 -3>
1817
;CHECK: ret
1918
define i32 @foo(i32 %n, i32* nocapture %A) {
2019
%1 = icmp sgt i32 %n, 0

0 commit comments

Comments
 (0)