Skip to content

[mlir][sparse] implement sparse_tensor.extract_value operation. #101220

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#include "Utils/CodegenUtils.h"
#include "Utils/SparseTensorIterator.h"

#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
Expand All @@ -10,8 +11,8 @@
using namespace mlir;
using namespace mlir::sparse_tensor;

void convertLevelType(SparseTensorEncodingAttr enc, Level lvl,
SmallVectorImpl<Type> &fields) {
static void convertLevelType(SparseTensorEncodingAttr enc, Level lvl,
SmallVectorImpl<Type> &fields) {
// Position and coordinate buffer in the sparse structure.
if (enc.getLvlType(lvl).isWithPosLT())
fields.push_back(enc.getPosMemRefType());
Expand Down Expand Up @@ -71,6 +72,21 @@ class ExtractIterSpaceConverter
}
};

/// Sparse codegen rule for number of entries operator.
class ExtractValOpConverter : public OneToNOpConversionPattern<ExtractValOp> {
public:
using OneToNOpConversionPattern::OneToNOpConversionPattern;
LogicalResult
matchAndRewrite(ExtractValOp op, OpAdaptor adaptor,
OneToNPatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Value pos = adaptor.getIterator().back();
Value valBuf = rewriter.create<ToValuesOp>(loc, op.getTensor());
rewriter.replaceOpWithNewOp<memref::LoadOp>(op, valBuf, pos);
return success();
}
};

class SparseIterateOpConverter : public OneToNOpConversionPattern<IterateOp> {
public:
using OneToNOpConversionPattern::OneToNOpConversionPattern;
Expand Down Expand Up @@ -193,6 +209,6 @@ void mlir::populateLowerSparseIterationToSCFPatterns(
TypeConverter &converter, RewritePatternSet &patterns) {

IterateOp::getCanonicalizationPatterns(patterns, patterns.getContext());
patterns.add<ExtractIterSpaceConverter, SparseIterateOpConverter>(
converter, patterns.getContext());
patterns.add<ExtractIterSpaceConverter, ExtractValOpConverter,
SparseIterateOpConverter>(converter, patterns.getContext());
}
10 changes: 10 additions & 0 deletions mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -357,6 +357,9 @@ static Value genSubscript(CodegenEnv &env, OpBuilder &builder, OpOperand *t,
const auto pos = env.emitter().getValPosits(tid);
assert(!pos.empty());
args.append(pos);
// Simply returns the tensor to extract value using iterators.
if (env.options().sparseEmitStrategy == SparseEmitStrategy::kSparseIterator)
return t->get();
} else {
// For dense tensors we push all level's coordinates onto `args`.
const Level lvlRank = stt.getLvlRank();
Expand Down Expand Up @@ -512,9 +515,16 @@ static Value genTensorLoad(CodegenEnv &env, OpBuilder &builder, ExprId exp) {
return genInsertionLoadReduce(env, builder, t);
return genInsertionLoad(env, builder, t);
}

// Actual load.
SmallVector<Value> args;
Value ptr = genSubscript(env, builder, t, args);
if (llvm::isa<TensorType>(ptr.getType())) {
assert(env.options().sparseEmitStrategy ==
SparseEmitStrategy::kSparseIterator &&
args.size() == 1);
return builder.create<ExtractValOp>(loc, ptr, args.front());
}
return builder.create<memref::LoadOp>(loc, ptr, args);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,11 @@ class LoopEmitter {
/// Getters.
///
SmallVector<Value> getValPosits(TensorId tid) const {
// Returns the iterator if we are generating sparse (co)iterate-based loops.
if (emitStrategy == SparseEmitStrategy::kSparseIterator)
return {spIterVals[tid].back()};

// Returns {[batch coords], last-level position}.
SmallVector<Value> batchCrds = iters[tid].back().back()->getBatchCrds();
Value lastLvlPos = iters[tid].back().back()->getCurPosition().front();
batchCrds.push_back(lastLvlPos);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification="sparse-emit-strategy=sparse-iterator" --sparse-space-collapse --lower-sparse-iteration-to-scf | FileCheck %s
// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification="sparse-emit-strategy=sparse-iterator" --cse --sparse-space-collapse --lower-sparse-iteration-to-scf --loop-invariant-code-motion | FileCheck %s


#COO = #sparse_tensor.encoding<{
Expand All @@ -7,8 +7,7 @@
d1 : singleton(nonunique, soa),
d2 : singleton(nonunique, soa),
d3 : singleton(soa)
),
explicitVal = 1 : i32
)
}>

// CHECK-LABEL: func.func @sqsum(
Expand All @@ -17,7 +16,10 @@
// CHECK-DAG: %[[POS_BUF:.*]] = sparse_tensor.positions %{{.*}} {level = 0 : index} : tensor<?x?x?x?xi32, #sparse> to memref<?xindex>
// CHECK: %[[POS_LO:.*]] = memref.load %[[POS_BUF]]{{\[}}%[[C0]]] : memref<?xindex>
// CHECK: %[[POS_HI:.*]] = memref.load %[[POS_BUF]]{{\[}}%[[C1]]] : memref<?xindex>
// CHECK: %[[VAL_BUF:.*]] = sparse_tensor.values %{{.*}} : tensor<?x?x?x?xi32, #sparse> to memref<?xi32>
// CHECK: %[[SQ_SUM:.*]] = scf.for %[[POS:.*]] = %[[POS_LO]] to %[[POS_HI]] step %[[C1]] {{.*}} {
// CHECK: %[[VAL:.*]] = memref.load %[[VAL_BUF]]{{\[}}%[[POS]]] : memref<?xi32>
// CHECK: %[[MUL:.*]] = arith.muli %[[VAL]], %[[VAL]] : i32
// CHECK: %[[SUM:.*]] = arith.addi
// CHECK: scf.yield %[[SUM]] : i32
// CHECK: }
Expand Down
Loading