Skip to content

Commit 2fe30bc

Browse files
authored
[AArch64] Add cost model for @experimental.vector.match (#118512)
The base cost approximates the expansion code in SelectionDAGBuilder. For the AArch64 cases that don't need generic expansion, fixed-length search vectors have a higher cost than scalable vectors due to the extra instructions to convert the boolean mask.
1 parent be06c79 commit 2fe30bc

File tree

3 files changed

+96
-0
lines changed

3 files changed

+96
-0
lines changed

llvm/include/llvm/CodeGen/BasicTTIImpl.h

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1935,6 +1935,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
19351935

19361936
return Cost;
19371937
}
1938+
case Intrinsic::experimental_vector_match:
1939+
return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);
19381940
}
19391941

19401942
// Assume that we need to scalarize this intrinsic.)
@@ -2190,6 +2192,35 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
21902192
case Intrinsic::vector_reduce_fminimum:
21912193
return thisT()->getMinMaxReductionCost(getMinMaxReductionIntrinsicOp(IID),
21922194
VecOpTy, ICA.getFlags(), CostKind);
2195+
case Intrinsic::experimental_vector_match: {
2196+
auto *SearchTy = cast<VectorType>(ICA.getArgTypes()[0]);
2197+
auto *NeedleTy = cast<FixedVectorType>(ICA.getArgTypes()[1]);
2198+
unsigned SearchSize = NeedleTy->getNumElements();
2199+
2200+
// If we're not expanding the intrinsic then we assume this is cheap to
2201+
// implement.
2202+
EVT SearchVT = getTLI()->getValueType(DL, SearchTy);
2203+
if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))
2204+
return getTypeLegalizationCost(RetTy).first;
2205+
2206+
// Approximate the cost based on the expansion code in
2207+
// SelectionDAGBuilder.
2208+
InstructionCost Cost = 0;
2209+
Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,
2210+
CostKind, 1, nullptr, nullptr);
2211+
Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,
2212+
CostKind, 0, nullptr, nullptr);
2213+
Cost += thisT()->getShuffleCost(TTI::SK_Broadcast, SearchTy, std::nullopt,
2214+
CostKind, 0, nullptr);
2215+
Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,
2216+
CmpInst::ICMP_EQ, CostKind);
2217+
Cost +=
2218+
thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
2219+
Cost *= SearchSize;
2220+
Cost +=
2221+
thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy, CostKind);
2222+
return Cost;
2223+
}
21932224
case Intrinsic::abs:
21942225
ISD = ISD::ABS;
21952226
break;

llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -905,6 +905,23 @@ AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
905905
}
906906
break;
907907
}
908+
case Intrinsic::experimental_vector_match: {
909+
auto *NeedleTy = cast<FixedVectorType>(ICA.getArgTypes()[1]);
910+
EVT SearchVT = getTLI()->getValueType(DL, ICA.getArgTypes()[0]);
911+
unsigned SearchSize = NeedleTy->getNumElements();
912+
if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize)) {
913+
// Base cost for MATCH instructions. At least on the Neoverse V2 and
914+
// Neoverse V3, these are cheap operations with the same latency as a
915+
// vector ADD. In most cases, however, we also need to do an extra DUP.
916+
// For fixed-length vectors we currently need an extra five--six
917+
// instructions besides the MATCH.
918+
InstructionCost Cost = 4;
919+
if (isa<FixedVectorType>(RetTy))
920+
Cost += 10;
921+
return Cost;
922+
}
923+
break;
924+
}
908925
default:
909926
break;
910927
}

llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1360,6 +1360,54 @@ define void @histogram_nxv4i64(<vscale x 4 x ptr> %buckets, <vscale x 4 x i1> %m
13601360
ret void
13611361
}
13621362

1363+
define void @match() #3 {
1364+
; CHECK-VSCALE-1-LABEL: 'match'
1365+
; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %match_nxv16i8_v16i8 = call <vscale x 16 x i1> @llvm.experimental.vector.match.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> undef, <vscale x 16 x i1> undef)
1366+
; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %match_nxv8i16_v8i16 = call <vscale x 8 x i1> @llvm.experimental.vector.match.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> undef, <vscale x 8 x i1> undef)
1367+
; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %match_nxv4i32_v4i32 = call <vscale x 4 x i1> @llvm.experimental.vector.match.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> undef, <vscale x 4 x i1> undef)
1368+
; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %match_nxv2i64_v2i64 = call <vscale x 2 x i1> @llvm.experimental.vector.match.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> undef, <vscale x 2 x i1> undef)
1369+
; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %match_v16i8_v16i8 = call <16 x i1> @llvm.experimental.vector.match.v16i8.v16i8(<16 x i8> undef, <16 x i8> undef, <16 x i1> undef)
1370+
; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %match_v8i16_v8i16 = call <8 x i1> @llvm.experimental.vector.match.v8i16.v8i16(<8 x i16> undef, <8 x i16> undef, <8 x i1> undef)
1371+
; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %match_v4i32_v4i32 = call <4 x i1> @llvm.experimental.vector.match.v4i32.v4i32(<4 x i32> undef, <4 x i32> undef, <4 x i1> undef)
1372+
; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %match_v2i64_v2i64 = call <2 x i1> @llvm.experimental.vector.match.v2i64.v2i64(<2 x i64> undef, <2 x i64> undef, <2 x i1> undef)
1373+
; CHECK-VSCALE-1-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
1374+
;
1375+
; CHECK-VSCALE-2-LABEL: 'match'
1376+
; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %match_nxv16i8_v16i8 = call <vscale x 16 x i1> @llvm.experimental.vector.match.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> undef, <vscale x 16 x i1> undef)
1377+
; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %match_nxv8i16_v8i16 = call <vscale x 8 x i1> @llvm.experimental.vector.match.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> undef, <vscale x 8 x i1> undef)
1378+
; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %match_nxv4i32_v4i32 = call <vscale x 4 x i1> @llvm.experimental.vector.match.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> undef, <vscale x 4 x i1> undef)
1379+
; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %match_nxv2i64_v2i64 = call <vscale x 2 x i1> @llvm.experimental.vector.match.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> undef, <vscale x 2 x i1> undef)
1380+
; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %match_v16i8_v16i8 = call <16 x i1> @llvm.experimental.vector.match.v16i8.v16i8(<16 x i8> undef, <16 x i8> undef, <16 x i1> undef)
1381+
; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %match_v8i16_v8i16 = call <8 x i1> @llvm.experimental.vector.match.v8i16.v8i16(<8 x i16> undef, <8 x i16> undef, <8 x i1> undef)
1382+
; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %match_v4i32_v4i32 = call <4 x i1> @llvm.experimental.vector.match.v4i32.v4i32(<4 x i32> undef, <4 x i32> undef, <4 x i1> undef)
1383+
; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %match_v2i64_v2i64 = call <2 x i1> @llvm.experimental.vector.match.v2i64.v2i64(<2 x i64> undef, <2 x i64> undef, <2 x i1> undef)
1384+
; CHECK-VSCALE-2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
1385+
;
1386+
; TYPE_BASED_ONLY-LABEL: 'match'
1387+
; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %match_nxv16i8_v16i8 = call <vscale x 16 x i1> @llvm.experimental.vector.match.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> undef, <vscale x 16 x i1> undef)
1388+
; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %match_nxv8i16_v8i16 = call <vscale x 8 x i1> @llvm.experimental.vector.match.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> undef, <vscale x 8 x i1> undef)
1389+
; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %match_nxv4i32_v4i32 = call <vscale x 4 x i1> @llvm.experimental.vector.match.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> undef, <vscale x 4 x i1> undef)
1390+
; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %match_nxv2i64_v2i64 = call <vscale x 2 x i1> @llvm.experimental.vector.match.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> undef, <vscale x 2 x i1> undef)
1391+
; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %match_v16i8_v16i8 = call <16 x i1> @llvm.experimental.vector.match.v16i8.v16i8(<16 x i8> undef, <16 x i8> undef, <16 x i1> undef)
1392+
; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %match_v8i16_v8i16 = call <8 x i1> @llvm.experimental.vector.match.v8i16.v8i16(<8 x i16> undef, <8 x i16> undef, <8 x i1> undef)
1393+
; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %match_v4i32_v4i32 = call <4 x i1> @llvm.experimental.vector.match.v4i32.v4i32(<4 x i32> undef, <4 x i32> undef, <4 x i1> undef)
1394+
; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %match_v2i64_v2i64 = call <2 x i1> @llvm.experimental.vector.match.v2i64.v2i64(<2 x i64> undef, <2 x i64> undef, <2 x i1> undef)
1395+
; TYPE_BASED_ONLY-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
1396+
;
1397+
1398+
%match_nxv16i8_v16i8 = call <vscale x 16 x i1> @llvm.experimental.vector.match.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> undef, <vscale x 16 x i1> undef)
1399+
%match_nxv8i16_v8i16 = call <vscale x 8 x i1> @llvm.experimental.vector.match.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> undef, <vscale x 8 x i1> undef)
1400+
%match_nxv4i32_v4i32 = call <vscale x 4 x i1> @llvm.experimental.vector.match.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> undef, <vscale x 4 x i1> undef)
1401+
%match_nxv2i64_v2i64 = call <vscale x 2 x i1> @llvm.experimental.vector.match.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> undef, <vscale x 2 x i1> undef)
1402+
1403+
%match_v16i8_v16i8 = call <16 x i1> @llvm.experimental.vector.match.v16i8.v16i8(<16 x i8> undef, <16 x i8> undef, <16 x i1> undef)
1404+
%match_v8i16_v8i16 = call <8 x i1> @llvm.experimental.vector.match.v8i16.v8i16(<8 x i16> undef, <8 x i16> undef, <8 x i1> undef)
1405+
%match_v4i32_v4i32 = call <4 x i1> @llvm.experimental.vector.match.v4i32.v4i32(<4 x i32> undef, <4 x i32> undef, <4 x i1> undef)
1406+
%match_v2i64_v2i64 = call <2 x i1> @llvm.experimental.vector.match.v2i64.v2i64(<2 x i64> undef, <2 x i64> undef, <2 x i1> undef)
1407+
1408+
ret void
1409+
}
1410+
13631411
declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64, i64)
13641412
declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64, i64)
13651413
declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64, i64)

0 commit comments

Comments
 (0)