Skip to content

Commit 714ae91

Browse files
lukel97smallp-o-p
authored andcommitted
[RISCV] Account for factor in interleave memory op costs (llvm#111511)
Currently we cost an interleaved memory op as if it were a load/store of the widened vector type, but this was undercosting in all cases when compared to the measured performance of todays hardware. On the x280 at NF=2 and spacemit-x60 at NF=2,3 and 4, a segmented load is carried out as a wide load and NF LMUL shuffle ops: https://github.com/preames/bp3-microarch#vlseg_lmul_x_sew_throughput All other NFs go through a slow path. On the spacemit-x60 this is proportional to VLMAX * NF, and on the x280 proportional to the number of segments. This patch increases the cost by implementing a wide load + NF LMUL shuffle op cost for the lowest common denominator NF=2, and then a slower cost proportional to VL for the other NFs. In a follow up patch we can add a tuning flag to use the faster cost model for NF=3 and 4 on the spacemit-x60. Note that the FIXME about illegal vectors seems to have been fixed in llvm#100436
1 parent b30116d commit 714ae91

File tree

3 files changed

+130
-119
lines changed

3 files changed

+130
-119
lines changed

llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -723,8 +723,7 @@ InstructionCost RISCVTTIImpl::getInterleavedMemoryOpCost(
723723

724724
// The interleaved memory access pass will lower interleaved memory ops (i.e
725725
// a load and store followed by a specific shuffle) to vlseg/vsseg
726-
// intrinsics. In those cases then we can treat it as if it's just one (legal)
727-
// memory op
726+
// intrinsics.
728727
if (!UseMaskForCond && !UseMaskForGaps &&
729728
Factor <= TLI->getMaxSupportedInterleaveFactor()) {
730729
auto *VTy = cast<VectorType>(VecTy);
@@ -734,19 +733,27 @@ InstructionCost RISCVTTIImpl::getInterleavedMemoryOpCost(
734733
auto *SubVecTy =
735734
VectorType::get(VTy->getElementType(),
736735
VTy->getElementCount().divideCoefficientBy(Factor));
737-
738736
if (VTy->getElementCount().isKnownMultipleOf(Factor) &&
739737
TLI->isLegalInterleavedAccessType(SubVecTy, Factor, Alignment,
740738
AddressSpace, DL)) {
741-
// FIXME: We use the memory op cost of the *legalized* type here,
742-
// because it's getMemoryOpCost returns a really expensive cost for
743-
// types like <6 x i8>, which show up when doing interleaves of
744-
// Factor=3 etc. Should the memory op cost of these be cheaper?
745-
auto *LegalVTy = VectorType::get(VTy->getElementType(),
746-
LT.second.getVectorElementCount());
747-
InstructionCost LegalMemCost = getMemoryOpCost(
748-
Opcode, LegalVTy, Alignment, AddressSpace, CostKind);
749-
return LT.first + LegalMemCost;
739+
740+
// Most available hardware today optimizes NF=2 as as one wide memory op
741+
// + Factor * LMUL shuffle ops.
742+
if (Factor == 2) {
743+
InstructionCost Cost =
744+
getMemoryOpCost(Opcode, VTy, Alignment, AddressSpace, CostKind);
745+
MVT SubVecVT = getTLI()->getValueType(DL, SubVecTy).getSimpleVT();
746+
Cost += Factor * TLI->getLMULCost(SubVecVT);
747+
return LT.first * Cost;
748+
}
749+
750+
// Otherwise, the cost is proportional to the number of elements (VL *
751+
// Factor ops).
752+
InstructionCost MemOpCost =
753+
getMemoryOpCost(Opcode, VTy->getElementType(), Alignment, 0,
754+
CostKind, {TTI::OK_AnyValue, TTI::OP_None});
755+
unsigned NumLoads = getEstimatedVLFor(VTy);
756+
return NumLoads * MemOpCost;
750757
}
751758
}
752759
}

llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll

Lines changed: 39 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -410,45 +410,49 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s
410410
; CHECK-SAME: i64 [[N:%.*]], ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR2]] {
411411
; CHECK-NEXT: [[ENTRY:.*]]:
412412
; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 0)
413-
; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[SMAX]], 3
414-
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 2
413+
; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[SMAX]], 1
414+
; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 1
415415
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
416-
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP2]], 16
416+
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
417+
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
418+
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], [[TMP4]]
417419
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
418420
; CHECK: [[VECTOR_PH]]:
419-
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 16
420-
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
421-
; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i64 16, i64 [[N_MOD_VF]]
422-
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[TMP4]]
423-
; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 4
421+
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
422+
; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4
423+
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], [[TMP6]]
424+
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
425+
; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 2
426+
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
427+
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4
428+
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
429+
; CHECK-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i64> [[TMP9]], zeroinitializer
430+
; CHECK-NEXT: [[TMP11:%.*]] = mul <vscale x 4 x i64> [[TMP10]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 2, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
431+
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP11]]
432+
; CHECK-NEXT: [[TMP12:%.*]] = mul i64 2, [[TMP8]]
433+
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP12]], i64 0
434+
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
424435
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
425436
; CHECK: [[VECTOR_BODY]]:
426437
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
427-
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <8 x i64> [ <i64 0, i64 4, i64 8, i64 12, i64 16, i64 20, i64 24, i64 28>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
428-
; CHECK-NEXT: [[STEP_ADD:%.*]] = add <8 x i64> [[VEC_IND]], <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
429-
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
430-
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0
431-
; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 32
432-
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP5]]
433-
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP6]]
434-
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i8>, ptr [[TMP7]], align 1
435-
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <32 x i8> [[WIDE_VEC]], <32 x i8> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
436-
; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <32 x i8> [[WIDE_VEC]], <32 x i8> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
437-
; CHECK-NEXT: [[WIDE_VEC2:%.*]] = load <32 x i8>, ptr [[TMP8]], align 1
438-
; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <32 x i8> [[WIDE_VEC2]], <32 x i8> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
439-
; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <32 x i8> [[WIDE_VEC2]], <32 x i8> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
440-
; CHECK-NEXT: [[TMP11:%.*]] = zext <8 x i8> [[STRIDED_VEC4]] to <8 x i32>
441-
; CHECK-NEXT: [[TMP12:%.*]] = zext <8 x i8> [[STRIDED_VEC5]] to <8 x i32>
442-
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[DST]], <8 x i64> [[VEC_IND]]
443-
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[DST]], <8 x i64> [[STEP_ADD]]
444-
; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP11]], <8 x ptr> [[TMP13]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
445-
; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> [[TMP12]], <8 x ptr> [[TMP14]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
446-
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
447-
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[STEP_ADD]], <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
448-
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
449-
; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
438+
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
439+
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
440+
; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 0
441+
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP13]]
442+
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i8>, ptr [[TMP14]], align 1
443+
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i8>, <vscale x 4 x i8> } @llvm.vector.deinterleave2.nxv8i8(<vscale x 8 x i8> [[WIDE_VEC]])
444+
; CHECK-NEXT: [[TMP15:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 0
445+
; CHECK-NEXT: [[TMP16:%.*]] = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i8> } [[STRIDED_VEC]], 1
446+
; CHECK-NEXT: [[TMP17:%.*]] = zext <vscale x 4 x i8> [[TMP16]] to <vscale x 4 x i32>
447+
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
448+
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP17]], <vscale x 4 x ptr> [[TMP18]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
449+
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
450+
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
451+
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
452+
; CHECK-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
450453
; CHECK: [[MIDDLE_BLOCK]]:
451-
; CHECK-NEXT: br label %[[SCALAR_PH]]
454+
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
455+
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
452456
; CHECK: [[SCALAR_PH]]:
453457
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
454458
; CHECK-NEXT: br label %[[LOOP:.*]]
@@ -462,9 +466,9 @@ define void @gather_interleave_group_with_dead_insert_pos(i64 %N, ptr noalias %s
462466
; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[L_1]] to i32
463467
; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 [[IV]]
464468
; CHECK-NEXT: store i32 [[EXT]], ptr [[GEP_DST]], align 4
465-
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 4
469+
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 2
466470
; CHECK-NEXT: [[EC:%.*]] = icmp slt i64 [[IV]], [[N]]
467-
; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT:.*]], !llvm.loop [[LOOP21:![0-9]+]]
471+
; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP21:![0-9]+]]
468472
; CHECK: [[EXIT]]:
469473
; CHECK-NEXT: ret void
470474
;
@@ -481,7 +485,7 @@ loop:
481485
%ext = zext i8 %l.1 to i32
482486
%gep.dst = getelementptr i32, ptr %dst, i64 %iv
483487
store i32 %ext, ptr %gep.dst, align 4
484-
%iv.next = add nsw i64 %iv, 4
488+
%iv.next = add nsw i64 %iv, 2
485489
%ec = icmp slt i64 %iv, %N
486490
br i1 %ec, label %loop, label %exit
487491

0 commit comments

Comments
 (0)