Skip to content

Commit 82c5d35

Browse files
authored
[VPlan] Add commutative binary OR matcher, use in transform. (#92539)
Split off from #89386, this extends the binary matcher to support matching commuative operations. This is used for a new m_c_BinaryOr matcher, used in simplifyRecipe. PR: #92539
1 parent f6ae8e6 commit 82c5d35

File tree

4 files changed

+36
-23
lines changed

4 files changed

+36
-23
lines changed

llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h

Lines changed: 30 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ using AllUnaryRecipe_match =
157157
UnaryRecipe_match<Op0_t, Opcode, VPWidenRecipe, VPReplicateRecipe,
158158
VPWidenCastRecipe, VPInstruction>;
159159

160-
template <typename Op0_t, typename Op1_t, unsigned Opcode,
160+
template <typename Op0_t, typename Op1_t, unsigned Opcode, bool Commutative,
161161
typename... RecipeTys>
162162
struct BinaryRecipe_match {
163163
Op0_t Op0;
@@ -179,18 +179,23 @@ struct BinaryRecipe_match {
179179
return false;
180180
assert(R->getNumOperands() == 2 &&
181181
"recipe with matched opcode does not have 2 operands");
182-
return Op0.match(R->getOperand(0)) && Op1.match(R->getOperand(1));
182+
if (Op0.match(R->getOperand(0)) && Op1.match(R->getOperand(1)))
183+
return true;
184+
return Commutative && Op0.match(R->getOperand(1)) &&
185+
Op1.match(R->getOperand(0));
183186
}
184187
};
185188

186189
template <typename Op0_t, typename Op1_t, unsigned Opcode>
187190
using BinaryVPInstruction_match =
188-
BinaryRecipe_match<Op0_t, Op1_t, Opcode, VPInstruction>;
191+
BinaryRecipe_match<Op0_t, Op1_t, Opcode, /*Commutative*/ false,
192+
VPInstruction>;
189193

190-
template <typename Op0_t, typename Op1_t, unsigned Opcode>
194+
template <typename Op0_t, typename Op1_t, unsigned Opcode,
195+
bool Commutative = false>
191196
using AllBinaryRecipe_match =
192-
BinaryRecipe_match<Op0_t, Op1_t, Opcode, VPWidenRecipe, VPReplicateRecipe,
193-
VPWidenCastRecipe, VPInstruction>;
197+
BinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative, VPWidenRecipe,
198+
VPReplicateRecipe, VPWidenCastRecipe, VPInstruction>;
194199

195200
template <unsigned Opcode, typename Op0_t>
196201
inline UnaryVPInstruction_match<Op0_t, Opcode>
@@ -256,10 +261,11 @@ m_ZExtOrSExt(const Op0_t &Op0) {
256261
return m_CombineOr(m_ZExt(Op0), m_SExt(Op0));
257262
}
258263

259-
template <unsigned Opcode, typename Op0_t, typename Op1_t>
260-
inline AllBinaryRecipe_match<Op0_t, Op1_t, Opcode> m_Binary(const Op0_t &Op0,
261-
const Op1_t &Op1) {
262-
return AllBinaryRecipe_match<Op0_t, Op1_t, Opcode>(Op0, Op1);
264+
template <unsigned Opcode, typename Op0_t, typename Op1_t,
265+
bool Commutative = false>
266+
inline AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative>
267+
m_Binary(const Op0_t &Op0, const Op1_t &Op1) {
268+
return AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative>(Op0, Op1);
263269
}
264270

265271
template <typename Op0_t, typename Op1_t>
@@ -268,10 +274,21 @@ m_Mul(const Op0_t &Op0, const Op1_t &Op1) {
268274
return m_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
269275
}
270276

271-
template <typename Op0_t, typename Op1_t>
272-
inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or>
277+
/// Match a binary OR operation. Note that while conceptually the operands can
278+
/// be matched commutatively, \p Commutative defaults to false in line with the
279+
/// IR-based pattern matching infrastructure. Use m_c_BinaryOr for a commutative
280+
/// version of the matcher.
281+
template <typename Op0_t, typename Op1_t, bool Commutative = false>
282+
inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or, Commutative>
273283
m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
274-
return m_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
284+
return m_Binary<Instruction::Or, Op0_t, Op1_t, Commutative>(Op0, Op1);
285+
}
286+
287+
template <typename Op0_t, typename Op1_t>
288+
inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or,
289+
/*Commutative*/ true>
290+
m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
291+
return m_BinaryOr<Op0_t, Op1_t, /*Commutative*/ true>(Op0, Op1);
275292
}
276293

277294
template <typename Op0_t, typename Op1_t>

llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -941,8 +941,8 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
941941
// recipes to be visited during simplification.
942942
VPValue *X, *Y, *X1, *Y1;
943943
if (match(&R,
944-
m_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
945-
m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
944+
m_c_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
945+
m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
946946
X == X1 && Y == Y1) {
947947
R.getVPSingleValue()->replaceAllUsesWith(X);
948948
return;

llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -402,10 +402,9 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
402402
; TFCOMMON-NEXT: [[TMP11:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP10]])
403403
; TFCOMMON-NEXT: [[TMP12:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP8]], <vscale x 2 x i1> zeroinitializer
404404
; TFCOMMON-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[TMP12]])
405-
; TFCOMMON-NEXT: [[TMP14:%.*]] = or <vscale x 2 x i1> [[TMP10]], [[TMP12]]
406405
; TFCOMMON-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[TMP13]]
407406
; TFCOMMON-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
408-
; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[TMP14]])
407+
; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
409408
; TFCOMMON-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
410409
; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025)
411410
; TFCOMMON-NEXT: [[TMP16:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
@@ -453,16 +452,14 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
453452
; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer
454453
; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[TMP21]])
455454
; TFA_INTERLEAVE-NEXT: [[TMP24:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD3]], <vscale x 2 x i1> [[TMP22]])
456-
; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = or <vscale x 2 x i1> [[TMP17]], [[TMP21]]
457-
; TFA_INTERLEAVE-NEXT: [[TMP26:%.*]] = or <vscale x 2 x i1> [[TMP18]], [[TMP22]]
458455
; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP17]], <vscale x 2 x i64> [[TMP19]], <vscale x 2 x i64> [[TMP23]]
459456
; TFA_INTERLEAVE-NEXT: [[PREDPHI4:%.*]] = select <vscale x 2 x i1> [[TMP18]], <vscale x 2 x i64> [[TMP20]], <vscale x 2 x i64> [[TMP24]]
460457
; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
461458
; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
462459
; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 2
463460
; TFA_INTERLEAVE-NEXT: [[TMP30:%.*]] = getelementptr inbounds i64, ptr [[TMP27]], i64 [[TMP29]]
464-
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP27]], i32 8, <vscale x 2 x i1> [[TMP25]])
465-
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP30]], i32 8, <vscale x 2 x i1> [[TMP26]])
461+
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP27]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
462+
; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP30]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
466463
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
467464
; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64()
468465
; TFA_INTERLEAVE-NEXT: [[TMP32:%.*]] = mul i64 [[TMP31]], 2

llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -480,11 +480,10 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr
480480
; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> zeroinitializer
481481
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 4, <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> poison)
482482
; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> zeroinitializer
483-
; CHECK-NEXT: [[TMP18:%.*]] = or <vscale x 4 x i1> [[TMP15]], [[TMP16]]
484483
; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> [[WIDE_MASKED_GATHER]]
485484
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[TMP10]]
486485
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 0
487-
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[PREDPHI]], ptr [[TMP19]], i32 4, <vscale x 4 x i1> [[TMP18]])
486+
; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[PREDPHI]], ptr [[TMP19]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
488487
; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP21]]
489488
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
490489
; CHECK-NEXT: [[TMP22:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)

0 commit comments

Comments
 (0)