Skip to content

Commit be2f01e

Browse files
huntergr-armSterling-Augustine
authored andcommitted
[LV] Vectorize histogram operations (llvm#99851)
This patch implements autovectorization support for the 'all-in-one' histogram intrinsic, which seems to have more support than the 'standalone' intrinsic. See https://discourse.llvm.org/t/rfc-vectorization-support-for-histogram-count-operations/74788/ for an overview of the work and my notes on the tradeoffs between the two approaches.
1 parent 73c13f1 commit be2f01e

14 files changed

+1810
-2
lines changed

llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,18 @@ class LoopVectorizationRequirements {
224224
Instruction *ExactFPMathInst = nullptr;
225225
};
226226

227+
/// This holds details about a histogram operation -- a load -> update -> store
228+
/// sequence where each lane in a vector might be updating the same element as
229+
/// another lane.
230+
struct HistogramInfo {
231+
LoadInst *Load;
232+
Instruction *Update;
233+
StoreInst *Store;
234+
235+
HistogramInfo(LoadInst *Load, Instruction *Update, StoreInst *Store)
236+
: Load(Load), Update(Update), Store(Store) {}
237+
};
238+
227239
/// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
228240
/// to what vectorization factor.
229241
/// This class does not look at the profitability of vectorization, only the
@@ -408,6 +420,20 @@ class LoopVectorizationLegality {
408420
unsigned getNumStores() const { return LAI->getNumStores(); }
409421
unsigned getNumLoads() const { return LAI->getNumLoads(); }
410422

423+
/// Returns a HistogramInfo* for the given instruction if it was determined
424+
/// to be part of a load -> update -> store sequence where multiple lanes
425+
/// may be working on the same memory address.
426+
std::optional<const HistogramInfo *> getHistogramInfo(Instruction *I) const {
427+
for (const HistogramInfo &HGram : Histograms)
428+
if (HGram.Load == I || HGram.Update == I || HGram.Store == I)
429+
return &HGram;
430+
431+
return std::nullopt;
432+
}
433+
434+
/// Returns a list of all known histogram operations in the loop.
435+
bool hasHistograms() const { return !Histograms.empty(); }
436+
411437
PredicatedScalarEvolution *getPredicatedScalarEvolution() const {
412438
return &PSE;
413439
}
@@ -472,6 +498,11 @@ class LoopVectorizationLegality {
472498
/// Returns true if the loop is vectorizable
473499
bool canVectorizeMemory();
474500

501+
/// If LAA cannot determine whether all dependences are safe, we may be able
502+
/// to further analyse some IndirectUnsafe dependences and if they match a
503+
/// certain pattern (like a histogram) then we may still be able to vectorize.
504+
bool canVectorizeIndirectUnsafeDependences();
505+
475506
/// Return true if we can vectorize this loop using the IF-conversion
476507
/// transformation.
477508
bool canVectorizeWithIfConvert();
@@ -593,6 +624,11 @@ class LoopVectorizationLegality {
593624
/// conditional assumes.
594625
SmallPtrSet<const Instruction *, 8> MaskedOp;
595626

627+
/// Contains all identified histogram operations, which are sequences of
628+
/// load -> update -> store instructions where multiple lanes in a vector
629+
/// may work on the same memory location.
630+
SmallVector<HistogramInfo, 1> Histograms;
631+
596632
/// BFI and PSI are used to check for profile guided size optimizations.
597633
BlockFrequencyInfo *BFI;
598634
ProfileSummaryInfo *PSI;

llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp

Lines changed: 132 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,10 @@ static cl::opt<LoopVectorizeHints::ScalableForceKind>
7979
"Scalable vectorization is available and favored when the "
8080
"cost is inconclusive.")));
8181

82+
static cl::opt<bool> EnableHistogramVectorization(
83+
"enable-histogram-loop-vectorization", cl::init(false), cl::Hidden,
84+
cl::desc("Enables autovectorization of some loops containing histograms"));
85+
8286
/// Maximum vectorization interleave count.
8387
static const unsigned MaxInterleaveFactor = 16;
8488

@@ -1051,6 +1055,133 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
10511055
return true;
10521056
}
10531057

1058+
/// Find histogram operations that match high-level code in loops:
1059+
/// \code
1060+
/// buckets[indices[i]]+=step;
1061+
/// \endcode
1062+
///
1063+
/// It matches a pattern starting from \p HSt, which Stores to the 'buckets'
1064+
/// array the computed histogram. It uses a BinOp to sum all counts, storing
1065+
/// them using a loop-variant index Load from the 'indices' input array.
1066+
///
1067+
/// On successful matches it updates the STATISTIC 'HistogramsDetected',
1068+
/// regardless of hardware support. When there is support, it additionally
1069+
/// stores the BinOp/Load pairs in \p HistogramCounts, as well the pointers
1070+
/// used to update histogram in \p HistogramPtrs.
1071+
static bool findHistogram(LoadInst *LI, StoreInst *HSt, Loop *TheLoop,
1072+
const PredicatedScalarEvolution &PSE,
1073+
SmallVectorImpl<HistogramInfo> &Histograms) {
1074+
1075+
// Store value must come from a Binary Operation.
1076+
Instruction *HPtrInstr = nullptr;
1077+
BinaryOperator *HBinOp = nullptr;
1078+
if (!match(HSt, m_Store(m_BinOp(HBinOp), m_Instruction(HPtrInstr))))
1079+
return false;
1080+
1081+
// BinOp must be an Add or a Sub modifying the bucket value by a
1082+
// loop invariant amount.
1083+
// FIXME: We assume the loop invariant term is on the RHS.
1084+
// Fine for an immediate/constant, but maybe not a generic value?
1085+
Value *HIncVal = nullptr;
1086+
if (!match(HBinOp, m_Add(m_Load(m_Specific(HPtrInstr)), m_Value(HIncVal))) &&
1087+
!match(HBinOp, m_Sub(m_Load(m_Specific(HPtrInstr)), m_Value(HIncVal))))
1088+
return false;
1089+
1090+
// Make sure the increment value is loop invariant.
1091+
if (!TheLoop->isLoopInvariant(HIncVal))
1092+
return false;
1093+
1094+
// The address to store is calculated through a GEP Instruction.
1095+
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(HPtrInstr);
1096+
if (!GEP)
1097+
return false;
1098+
1099+
// Restrict address calculation to constant indices except for the last term.
1100+
Value *HIdx = nullptr;
1101+
for (Value *Index : GEP->indices()) {
1102+
if (HIdx)
1103+
return false;
1104+
if (!isa<ConstantInt>(Index))
1105+
HIdx = Index;
1106+
}
1107+
1108+
if (!HIdx)
1109+
return false;
1110+
1111+
// Check that the index is calculated by loading from another array. Ignore
1112+
// any extensions.
1113+
// FIXME: Support indices from other sources than a linear load from memory?
1114+
// We're currently trying to match an operation looping over an array
1115+
// of indices, but there could be additional levels of indirection
1116+
// in place, or possibly some additional calculation to form the index
1117+
// from the loaded data.
1118+
Value *VPtrVal;
1119+
if (!match(HIdx, m_ZExtOrSExtOrSelf(m_Load(m_Value(VPtrVal)))))
1120+
return false;
1121+
1122+
// Make sure the index address varies in this loop, not an outer loop.
1123+
const auto *AR = dyn_cast<SCEVAddRecExpr>(PSE.getSE()->getSCEV(VPtrVal));
1124+
if (!AR || AR->getLoop() != TheLoop)
1125+
return false;
1126+
1127+
// Ensure we'll have the same mask by checking that all parts of the histogram
1128+
// (gather load, update, scatter store) are in the same block.
1129+
LoadInst *IndexedLoad = cast<LoadInst>(HBinOp->getOperand(0));
1130+
BasicBlock *LdBB = IndexedLoad->getParent();
1131+
if (LdBB != HBinOp->getParent() || LdBB != HSt->getParent())
1132+
return false;
1133+
1134+
LLVM_DEBUG(dbgs() << "LV: Found histogram for: " << *HSt << "\n");
1135+
1136+
// Store the operations that make up the histogram.
1137+
Histograms.emplace_back(IndexedLoad, HBinOp, HSt);
1138+
return true;
1139+
}
1140+
1141+
bool LoopVectorizationLegality::canVectorizeIndirectUnsafeDependences() {
1142+
// For now, we only support an IndirectUnsafe dependency that calculates
1143+
// a histogram
1144+
if (!EnableHistogramVectorization)
1145+
return false;
1146+
1147+
// Find a single IndirectUnsafe dependency.
1148+
const MemoryDepChecker::Dependence *IUDep = nullptr;
1149+
const MemoryDepChecker &DepChecker = LAI->getDepChecker();
1150+
const auto *Deps = DepChecker.getDependences();
1151+
// If there were too many dependences, LAA abandons recording them. We can't
1152+
// proceed safely if we don't know what the dependences are.
1153+
if (!Deps)
1154+
return false;
1155+
1156+
for (const MemoryDepChecker::Dependence &Dep : *Deps) {
1157+
// Ignore dependencies that are either known to be safe or can be
1158+
// checked at runtime.
1159+
if (MemoryDepChecker::Dependence::isSafeForVectorization(Dep.Type) !=
1160+
MemoryDepChecker::VectorizationSafetyStatus::Unsafe)
1161+
continue;
1162+
1163+
// We're only interested in IndirectUnsafe dependencies here, where the
1164+
// address might come from a load from memory. We also only want to handle
1165+
// one such dependency, at least for now.
1166+
if (Dep.Type != MemoryDepChecker::Dependence::IndirectUnsafe || IUDep)
1167+
return false;
1168+
1169+
IUDep = &Dep;
1170+
}
1171+
if (!IUDep)
1172+
return false;
1173+
1174+
// For now only normal loads and stores are supported.
1175+
LoadInst *LI = dyn_cast<LoadInst>(IUDep->getSource(DepChecker));
1176+
StoreInst *SI = dyn_cast<StoreInst>(IUDep->getDestination(DepChecker));
1177+
1178+
if (!LI || !SI)
1179+
return false;
1180+
1181+
LLVM_DEBUG(dbgs() << "LV: Checking for a histogram on: " << *SI << "\n");
1182+
return findHistogram(LI, SI, TheLoop, LAI->getPSE(), Histograms);
1183+
}
1184+
10541185
bool LoopVectorizationLegality::canVectorizeMemory() {
10551186
LAI = &LAIs.getInfo(*TheLoop);
10561187
const OptimizationRemarkAnalysis *LAR = LAI->getReport();
@@ -1062,7 +1193,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
10621193
}
10631194

10641195
if (!LAI->canVectorizeMemory())
1065-
return false;
1196+
return canVectorizeIndirectUnsafeDependences();
10661197

10671198
if (LAI->hasLoadStoreDependenceInvolvingLoopInvariantAddress()) {
10681199
reportVectorizationFailure("We don't allow storing to uniform addresses",

llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Lines changed: 67 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6508,8 +6508,33 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I,
65086508
// We've proven all lanes safe to speculate, fall through.
65096509
[[fallthrough]];
65106510
case Instruction::Add:
6511+
case Instruction::Sub: {
6512+
auto Info = Legal->getHistogramInfo(I);
6513+
if (Info && VF.isVector()) {
6514+
const HistogramInfo *HGram = Info.value();
6515+
// Assume that a non-constant update value (or a constant != 1) requires
6516+
// a multiply, and add that into the cost.
6517+
InstructionCost MulCost = TTI::TCC_Free;
6518+
ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
6519+
if (!RHS || RHS->getZExtValue() != 1)
6520+
MulCost = TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy);
6521+
6522+
// Find the cost of the histogram operation itself.
6523+
Type *PtrTy = VectorType::get(HGram->Load->getPointerOperandType(), VF);
6524+
Type *ScalarTy = I->getType();
6525+
Type *MaskTy = VectorType::get(Type::getInt1Ty(I->getContext()), VF);
6526+
IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
6527+
Type::getVoidTy(I->getContext()),
6528+
{PtrTy, ScalarTy, MaskTy});
6529+
6530+
// Add the costs together with the add/sub operation.
6531+
return TTI.getIntrinsicInstrCost(
6532+
ICA, TargetTransformInfo::TCK_RecipThroughput) +
6533+
MulCost + TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy);
6534+
}
6535+
[[fallthrough]];
6536+
}
65116537
case Instruction::FAdd:
6512-
case Instruction::Sub:
65136538
case Instruction::FSub:
65146539
case Instruction::Mul:
65156540
case Instruction::FMul:
@@ -8426,6 +8451,30 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
84268451
};
84278452
}
84288453

8454+
VPHistogramRecipe *
8455+
VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
8456+
ArrayRef<VPValue *> Operands) {
8457+
// FIXME: Support other operations.
8458+
unsigned Opcode = HI->Update->getOpcode();
8459+
assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
8460+
"Histogram update operation must be an Add or Sub");
8461+
8462+
SmallVector<VPValue *, 3> HGramOps;
8463+
// Bucket address.
8464+
HGramOps.push_back(Operands[1]);
8465+
// Increment value.
8466+
HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1)));
8467+
8468+
// In case of predicated execution (due to tail-folding, or conditional
8469+
// execution, or both), pass the relevant mask.
8470+
if (Legal->isMaskRequired(HI->Store))
8471+
HGramOps.push_back(getBlockInMask(HI->Store->getParent()));
8472+
8473+
return new VPHistogramRecipe(Opcode,
8474+
make_range(HGramOps.begin(), HGramOps.end()),
8475+
HI->Store->getDebugLoc());
8476+
}
8477+
84298478
void VPRecipeBuilder::fixHeaderPhis() {
84308479
BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
84318480
for (VPHeaderPHIRecipe *R : PhisToFix) {
@@ -8549,6 +8598,10 @@ VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
85498598
if (auto *CI = dyn_cast<CallInst>(Instr))
85508599
return tryToWidenCall(CI, Operands, Range);
85518600

8601+
if (StoreInst *SI = dyn_cast<StoreInst>(Instr))
8602+
if (auto HistInfo = Legal->getHistogramInfo(SI))
8603+
return tryToWidenHistogram(*HistInfo, Operands);
8604+
85528605
if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
85538606
return tryToWidenMemory(Instr, Operands, Range);
85548607

@@ -9998,6 +10051,19 @@ bool LoopVectorizePass::processLoop(Loop *L) {
999810051
InterleaveLoop = false;
999910052
}
1000010053

10054+
// If there is a histogram in the loop, do not just interleave without
10055+
// vectorizing. The order of operations will be incorrect without the
10056+
// histogram intrinsics, which are only used for recipes with VF > 1.
10057+
if (!VectorizeLoop && InterleaveLoop && LVL.hasHistograms()) {
10058+
LLVM_DEBUG(dbgs() << "LV: Not interleaving without vectorization due "
10059+
<< "to histogram operations.\n");
10060+
IntDiagMsg = std::make_pair(
10061+
"HistogramPreventsScalarInterleaving",
10062+
"Unable to interleave without vectorization due to constraints on "
10063+
"the order of histogram operations");
10064+
InterleaveLoop = false;
10065+
}
10066+
1000110067
// Override IC if user provided an interleave count.
1000210068
IC = UserIC > 0 ? UserIC : IC;
1000310069

llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ namespace llvm {
2121
class LoopVectorizationLegality;
2222
class LoopVectorizationCostModel;
2323
class TargetLibraryInfo;
24+
struct HistogramInfo;
2425

2526
/// Helper class to create VPRecipies from IR instructions.
2627
class VPRecipeBuilder {
@@ -103,6 +104,13 @@ class VPRecipeBuilder {
103104
VPWidenRecipe *tryToWiden(Instruction *I, ArrayRef<VPValue *> Operands,
104105
VPBasicBlock *VPBB);
105106

107+
/// Makes Histogram count operations safe for vectorization, by emitting a
108+
/// llvm.experimental.vector.histogram.add intrinsic in place of the
109+
/// Load + Add|Sub + Store operations that perform the histogram in the
110+
/// original scalar loop.
111+
VPHistogramRecipe *tryToWidenHistogram(const HistogramInfo *HI,
112+
ArrayRef<VPValue *> Operands);
113+
106114
public:
107115
VPRecipeBuilder(VPlan &Plan, Loop *OrigLoop, const TargetLibraryInfo *TLI,
108116
LoopVectorizationLegality *Legal,

llvm/lib/Transforms/Vectorize/VPlan.h

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -907,6 +907,7 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue {
907907
case VPRecipeBase::VPWidenLoadSC:
908908
case VPRecipeBase::VPWidenStoreEVLSC:
909909
case VPRecipeBase::VPWidenStoreSC:
910+
case VPRecipeBase::VPHistogramSC:
910911
// TODO: Widened stores don't define a value, but widened loads do. Split
911912
// the recipes to be able to make widened loads VPSingleDefRecipes.
912913
return false;
@@ -1664,6 +1665,51 @@ class VPWidenCallRecipe : public VPSingleDefRecipe {
16641665
#endif
16651666
};
16661667

1668+
/// A recipe representing a sequence of load -> update -> store as part of
1669+
/// a histogram operation. This means there may be aliasing between vector
1670+
/// lanes, which is handled by the llvm.experimental.vector.histogram family
1671+
/// of intrinsics. The only update operations currently supported are
1672+
/// 'add' and 'sub' where the other term is loop-invariant.
1673+
class VPHistogramRecipe : public VPRecipeBase {
1674+
/// Opcode of the update operation, currently either add or sub.
1675+
unsigned Opcode;
1676+
1677+
public:
1678+
template <typename IterT>
1679+
VPHistogramRecipe(unsigned Opcode, iterator_range<IterT> Operands,
1680+
DebugLoc DL = {})
1681+
: VPRecipeBase(VPDef::VPHistogramSC, Operands, DL), Opcode(Opcode) {}
1682+
1683+
~VPHistogramRecipe() override = default;
1684+
1685+
VPHistogramRecipe *clone() override {
1686+
return new VPHistogramRecipe(Opcode, operands(), getDebugLoc());
1687+
}
1688+
1689+
VP_CLASSOF_IMPL(VPDef::VPHistogramSC);
1690+
1691+
/// Produce a vectorized histogram operation.
1692+
void execute(VPTransformState &State) override;
1693+
1694+
/// Return the cost of this VPHistogramRecipe.
1695+
InstructionCost computeCost(ElementCount VF,
1696+
VPCostContext &Ctx) const override;
1697+
1698+
unsigned getOpcode() const { return Opcode; }
1699+
1700+
/// Return the mask operand if one was provided, or a null pointer if all
1701+
/// lanes should be executed unconditionally.
1702+
VPValue *getMask() const {
1703+
return getNumOperands() == 3 ? getOperand(2) : nullptr;
1704+
}
1705+
1706+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1707+
/// Print the recipe
1708+
void print(raw_ostream &O, const Twine &Indent,
1709+
VPSlotTracker &SlotTracker) const override;
1710+
#endif
1711+
};
1712+
16671713
/// A recipe for widening select instructions.
16681714
struct VPWidenSelectRecipe : public VPSingleDefRecipe {
16691715
template <typename IterT>

0 commit comments

Comments
 (0)