Skip to content

[InstCombine] Implement vp.reverse reordering/elimination through binop/unop #143963

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jun 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3590,6 +3590,25 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
break;
}
case Intrinsic::experimental_vp_reverse: {
Value *X;
Value *Vec = II->getArgOperand(0);
Value *Mask = II->getArgOperand(1);
if (!match(Mask, m_AllOnes()))
break;
Value *EVL = II->getArgOperand(2);
// rev(unop rev(X)) --> unop X
if (match(Vec,
m_OneUse(m_UnOp(m_Intrinsic<Intrinsic::experimental_vp_reverse>(
m_Value(X), m_AllOnes(), m_Specific(EVL)))))) {
auto *OldUnOp = cast<UnaryOperator>(Vec);
auto *NewUnOp = UnaryOperator::CreateWithCopiedFlags(
OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(),
II->getIterator());
return replaceInstUsesWith(CI, NewUnOp);
}
break;
}
case Intrinsic::vector_reduce_or:
case Intrinsic::vector_reduce_and: {
// Canonicalize logical or/and reductions:
Expand Down
33 changes: 33 additions & 0 deletions llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2231,6 +2231,39 @@ Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
return createBinOpReverse(LHS, V2);

auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
if (auto *BO = dyn_cast<BinaryOperator>(V))
BO->copyIRFlags(&Inst);

ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
Value *AllTrueMask = Builder.CreateVectorSplat(EC, Builder.getTrue());
Module *M = Inst.getModule();
Function *F = Intrinsic::getOrInsertDeclaration(
M, Intrinsic::experimental_vp_reverse, V->getType());
return CallInst::Create(F, {V, AllTrueMask, EVL});
};

Value *EVL;
if (match(LHS, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
m_Value(V1), m_AllOnes(), m_Value(EVL)))) {
// Op(rev(V1), rev(V2)) -> rev(Op(V1, V2))
if (match(RHS, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
m_Value(V2), m_AllOnes(), m_Specific(EVL))) &&
(LHS->hasOneUse() || RHS->hasOneUse() ||
(LHS == RHS && LHS->hasNUses(2))))
return createBinOpVPReverse(V1, V2, EVL);

// Op(rev(V1), RHSSplat)) -> rev(Op(V1, RHSSplat))
if (LHS->hasOneUse() && isSplatValue(RHS))
return createBinOpVPReverse(V1, RHS, EVL);
}
// Op(LHSSplat, rev(V2)) -> rev(Op(LHSSplat, V2))
else if (isSplatValue(LHS) &&
match(RHS, m_Intrinsic<Intrinsic::experimental_vp_reverse>(
m_Value(V2), m_AllOnes(), m_Value(EVL))))
return createBinOpVPReverse(LHS, V2, EVL);

// It may not be safe to reorder shuffles and things like div, urem, etc.
// because we may trap when executing those ops on unknown vector elements.
// See PR20059.
Expand Down
68 changes: 45 additions & 23 deletions llvm/test/Transforms/InstCombine/vp-reverse.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,8 @@

define <vscale x 4 x i32> @binop_reverse_elim(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 %evl) {
; CHECK-LABEL: @binop_reverse_elim(
; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[A1:%.*]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL:%.*]])
; CHECK-NEXT: [[B:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[B1:%.*]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
; CHECK-NEXT: [[ADD1:%.*]] = add nsw <vscale x 4 x i32> [[A]], [[B]]
; CHECK-NEXT: [[ADD_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD_REV]]
; CHECK-NEXT: [[ADD1:%.*]] = add nsw <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %a, <vscale x 4 x i1> splat (i1 true), i32 %evl)
%b.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
Expand All @@ -16,8 +13,10 @@ define <vscale x 4 x i32> @binop_reverse_elim(<vscale x 4 x i32> %a, <vscale x 4
ret <vscale x 4 x i32> %add.rev
}

define <vscale x 4 x i32> @binop_reverse_elim2(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl) {
; CHECK-LABEL: @binop_reverse_elim2(
; Negative test - the mask needs to be reversed between the inner and
; the outer to be correct.
define <vscale x 4 x i32> @binop_reverse_elim_samemask(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl) {
; CHECK-LABEL: @binop_reverse_elim_samemask(
; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]], <vscale x 4 x i1> [[M:%.*]], i32 [[EVL:%.*]])
; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]], <vscale x 4 x i1> [[M]], i32 [[EVL]])
; CHECK-NEXT: [[ADD:%.*]] = add nsw <vscale x 4 x i32> [[A_REV]], [[B_REV]]
Expand Down Expand Up @@ -48,10 +47,9 @@ define <vscale x 4 x i32> @binop_reverse_elim_diffmask(<vscale x 4 x i32> %a, <v

define <vscale x 4 x i32> @binop_reverse_elim_diffevl(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 %evl) {
; CHECK-LABEL: @binop_reverse_elim_diffevl(
; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL:%.*]])
; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
; CHECK-NEXT: [[ADD:%.*]] = add nsw <vscale x 4 x i32> [[A_REV]], [[B_REV]]
; CHECK-NEXT: [[ADD_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[ADD]], <vscale x 4 x i1> splat (i1 true), i32 10)
; CHECK-NEXT: [[ADD:%.*]] = add nsw <vscale x 4 x i32> [[A_REV:%.*]], [[B_REV:%.*]]
; CHECK-NEXT: [[ADD1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[ADD]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL:%.*]])
; CHECK-NEXT: [[ADD_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]], <vscale x 4 x i1> splat (i1 true), i32 10)
Copy link
Collaborator Author

@preames preames Jun 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe we could add a separate combine to take the min of the two EVLs here since all the other elements are poison. Possibly a followup.

Edit: nevermind, reversing the bottom 10 elements, and then reversing the bottom 5 of that isn't the same as the original bottom 5.

; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD_REV]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %a, <vscale x 4 x i1> splat (i1 true), i32 %evl)
Expand All @@ -63,10 +61,8 @@ define <vscale x 4 x i32> @binop_reverse_elim_diffevl(<vscale x 4 x i32> %a, <vs

define <vscale x 4 x i32> @binop_reverse_splat_elim(<vscale x 4 x i32> %a, i32 %evl) {
; CHECK-LABEL: @binop_reverse_splat_elim(
; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[A1:%.*]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL:%.*]])
; CHECK-NEXT: [[ADD1:%.*]] = add nsw <vscale x 4 x i32> [[A]], splat (i32 22)
; CHECK-NEXT: [[ADD_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD_REV]]
; CHECK-NEXT: [[ADD1:%.*]] = add nsw <vscale x 4 x i32> [[A:%.*]], splat (i32 22)
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %a, <vscale x 4 x i1> splat (i1 true), i32 %evl)
%add = add nsw <vscale x 4 x i32> %a.rev, splat (i32 22)
Expand All @@ -76,23 +72,49 @@ define <vscale x 4 x i32> @binop_reverse_splat_elim(<vscale x 4 x i32> %a, i32 %

define <vscale x 4 x i32> @binop_reverse_splat_elim2(<vscale x 4 x i32> %a, i32 %evl) {
; CHECK-LABEL: @binop_reverse_splat_elim2(
; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[A1:%.*]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL:%.*]])
; CHECK-NEXT: [[ADD1:%.*]] = add nsw <vscale x 4 x i32> [[A]], splat (i32 22)
; CHECK-NEXT: [[ADD_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD_REV]]
; CHECK-NEXT: [[ADD1:%.*]] = add nsw <vscale x 4 x i32> [[A:%.*]], splat (i32 22)
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %a, <vscale x 4 x i1> splat (i1 true), i32 %evl)
%add = add nsw <vscale x 4 x i32> splat (i32 22), %a.rev
%add.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %add, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x i32> %add.rev
}

define <vscale x 4 x i32> @binop_reverse_splat_elim3(<vscale x 4 x i32> %a, i32 %b, i32 %evl) {
; CHECK-LABEL: @binop_reverse_splat_elim3(
; CHECK-NEXT: [[B_INS:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
; CHECK-NEXT: [[B_VEC:%.*]] = shufflevector <vscale x 4 x i32> [[B_INS]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[ADD:%.*]] = add nsw <vscale x 4 x i32> [[B_VEC]], [[A_REV:%.*]]
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
;
%b.ins = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
%b.vec = shufflevector <vscale x 4 x i32> %b.ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%a.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %a, <vscale x 4 x i1> splat (i1 true), i32 %evl)
%add = add nsw <vscale x 4 x i32> %b.vec, %a.rev
%add.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %add, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x i32> %add.rev
}

define <vscale x 4 x i32> @binop_reverse_splat_elim4(<vscale x 4 x i32> %a, i32 %b, i32 %evl) {
; CHECK-LABEL: @binop_reverse_splat_elim4(
; CHECK-NEXT: [[B_INS:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
; CHECK-NEXT: [[B_VEC:%.*]] = shufflevector <vscale x 4 x i32> [[B_INS]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[ADD1:%.*]] = add nsw <vscale x 4 x i32> [[A:%.*]], [[B_VEC]]
; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
;
%b.ins = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
%b.vec = shufflevector <vscale x 4 x i32> %b.ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%a.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %a, <vscale x 4 x i1> splat (i1 true), i32 %evl)
%add = add nsw <vscale x 4 x i32> %a.rev, %b.vec
%add.rev = tail call <vscale x 4 x i32> @llvm.experimental.vp.reverse(<vscale x 4 x i32> %add, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x i32> %add.rev
}

define <vscale x 4 x float> @unop_reverse_splat_elim(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 %evl) {
; CHECK-LABEL: @unop_reverse_splat_elim(
; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[A:%.*]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL:%.*]])
; CHECK-NEXT: [[OP:%.*]] = fneg <vscale x 4 x float> [[A_REV]]
; CHECK-NEXT: [[OP_REV:%.*]] = tail call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[OP]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
; CHECK-NEXT: ret <vscale x 4 x float> [[OP_REV]]
; CHECK-NEXT: [[OP:%.*]] = fneg <vscale x 4 x float> [[A_REV:%.*]]
; CHECK-NEXT: ret <vscale x 4 x float> [[OP]]
;
%a.rev = tail call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 true), i32 %evl)
%op = fneg <vscale x 4 x float> %a.rev
Expand Down