Skip to content

[mlir][sparse] minor cleanup of transform/utils #75396

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenEnv.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//

#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_
#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_

#include "CodegenUtils.h"
#include "LoopEmitter.h"
Expand Down Expand Up @@ -206,4 +206,4 @@ class CodegenEnv {
} // namespace sparse_tensor
} // namespace mlir

#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENENV_H_
#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENENV_H_
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//

#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_
#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_

#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Complex/IR/Complex.h"
Expand Down Expand Up @@ -434,4 +434,4 @@ inline bool isZeroRankedTensorOrScalar(Type type) {
} // namespace sparse_tensor
} // namespace mlir

#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_CODEGENUTILS_H_
#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_CODEGENUTILS_H_
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//===- LoopScheduler.cpp -------------------------------------------------===//
//===- IterationGraphSorter.cpp -------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
Expand All @@ -20,11 +20,10 @@ using namespace mlir::sparse_tensor;

namespace {

/// A helper class that visits an affine expression and tries to find an
/// AffineDimExpr to which the corresponding iterator from a GenericOp matches
/// the desired iterator type.
/// If there is no matched iterator type, returns the first DimExpr in the
/// expression.
/// A helper class that visits an affine expression and tries to find
/// an AffineDimExpr to which the corresponding iterator from a GenericOp
/// matches the desired iterator type. If there is no matched iterator
/// type, the method returns the first DimExpr in the expression.
class AffineDimFinder : public AffineExprVisitor<AffineDimFinder> {
public:
explicit AffineDimFinder(ArrayRef<utils::IteratorType> itTypes)
Expand Down Expand Up @@ -81,11 +80,9 @@ inline static bool includesDenseOutput(SortMask mask) {
return includesAny(mask, SortMask::kIncludeDenseOutput);
}

/// A helper to compute a topological sort. O(n^2) time complexity
/// as we use adj matrix for the graph.
/// The sorted result will put the first Reduction iterator to the
/// latest possible position.
AffineMap IterationGraphSorter::topoSort() {
// The sorted result will put the first Reduction iterator to the
// latest possible position.
std::vector<unsigned> redIt; // reduce iterator with 0 degree
std::vector<unsigned> parIt; // parallel iterator with 0 degree
const unsigned numLoops = getNumLoops();
Expand Down Expand Up @@ -170,6 +167,7 @@ AffineMap IterationGraphSorter::sort(SortMask mask, Value ignored) {
// Reset the interation graph.
for (auto &row : itGraph)
std::fill(row.begin(), row.end(), false);

// Reset cached in-degree.
std::fill(inDegree.begin(), inDegree.end(), 0);

Expand All @@ -179,7 +177,6 @@ AffineMap IterationGraphSorter::sort(SortMask mask, Value ignored) {
// Skip dense inputs when not requested.
if ((!enc && !includesDenseInput(mask)) || in == ignored)
continue;

addConstraints(in, map);
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,17 @@
//===- LoopScheduler.h -----------------------------------------*- C++ -*-===//
//===- IterationGraphSorter.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file defines the iteration graph sorter (top-sort scheduling).
//
//===----------------------------------------------------------------------===//

#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_

#include "mlir/IR/AffineMap.h"

Expand All @@ -21,7 +28,7 @@ class GenericOp;

namespace sparse_tensor {

/// Iteration graph sorting.
/// Iteration graph sorting mask,
enum class SortMask : unsigned {
// The individual mask bits.
kIncludeDenseOutput = 0x1, // b001
Expand All @@ -34,40 +41,52 @@ enum class SortMask : unsigned {

class IterationGraphSorter {
public:
// Constructs a scheduler from linalg.generic
// Maybe reuses the class to schedule foreach as well (to address
// non-permutation, e.g, traverse CSR in BSR order).
/// Factory method that construct an iteration graph sorter
/// for the given linalg.generic operation.
static IterationGraphSorter fromGenericOp(linalg::GenericOp genericOp);

// Returns a permutation that represents the scheduled loop order.
// Note that the returned AffineMap could be null if the kernel can not be
// schedule due to cycles in the iteration graph.
/// Returns a permutation that represents the scheduled loop order.
/// Note that the returned AffineMap could be null if the kernel
/// cannot be scheduled due to cyclic iteration graph.
[[nodiscard]] AffineMap sort(SortMask mask, Value ignored = nullptr);

/// Returns the number of loops in the iteration graph.
unsigned getNumLoops() const { return loop2OutLvl.getNumDims(); }

private:
// Private constructor.
IterationGraphSorter(SmallVector<Value> &&ins,
SmallVector<AffineMap> &&loop2InsLvl, Value out,
AffineMap loop2OutLvl,
SmallVector<utils::IteratorType> &&iterTypes);

// Adds all the constraints in the given loop to level map.
void addConstraints(Value t, AffineMap loop2LvlMap);

/// A helper to compute a topological sort. The method has an
/// O(n^2) time complexity since we use an adjacency matrix
/// representation for the iteration graph.
AffineMap topoSort();

// Input tensors and associated loop to level maps.
SmallVector<Value> ins;
SmallVector<AffineMap> loop2InsLvl;

// Output tensor and associated loop to level map.
Value out;
AffineMap loop2OutLvl;
// Loop type;

// Loop itation types;
SmallVector<utils::IteratorType> iterTypes;

// Adjacent matrix that represents the iteration graph.
std::vector<std::vector<bool>> itGraph;

// InDegree used for topo sort.
std::vector<unsigned> inDegree;
};

} // namespace sparse_tensor
} // namespace mlir

#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_ITERATIONGRAPHSORTER_H_
11 changes: 5 additions & 6 deletions mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//

#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_
#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_

#include <vector>

Expand All @@ -22,7 +22,7 @@ namespace sparse_tensor {
// A compressed <tensor id, level> pair.
using TensorLevel = unsigned;

//===----------------------------------------------------------------------===//
//
// SparseTensorLoopEmiter class, manages sparse tensors and helps to
// generate loop structure to (co)-iterate sparse tensors.
//
Expand All @@ -48,8 +48,7 @@ using TensorLevel = unsigned;
// loopEmiter.exitCurrentLoop(); // exit k
// loopEmiter.exitCurrentLoop(); // exit j
// loopEmiter.exitCurrentLoop(); // exit i
//===----------------------------------------------------------------------===//

//
class LoopEmitter {
public:
/// Optional callback function to setup dense output tensors when
Expand Down Expand Up @@ -705,4 +704,4 @@ class LoopEmitter {
} // namespace sparse_tensor
} // namespace mlir

#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORLOOPEMITTER_H_
#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_LOOPEMITTER_H_
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//

#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORDESCRIPTOR_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSORDESCRIPTOR_H_
#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSORDESCRIPTOR_H_
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSORDESCRIPTOR_H_

#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensorStorageLayout.h"
Expand Down Expand Up @@ -262,4 +262,4 @@ getMutDescriptorFromTensorTuple(Value tensor, SmallVectorImpl<Value> &fields) {
} // namespace sparse_tensor
} // namespace mlir

#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_SPARSETENSODESCRIPTOR_H_
#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_UTILS_SPARSETENSODESCRIPTOR_H_