Skip to content

[mlir] Add support for memref.alloca sub-byte emulation #71956

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 18 additions & 13 deletions mlir/lib/Dialect/MemRef/Transforms/EmulateNarrowType.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,15 +56,19 @@ namespace {
// ConvertMemRefAlloc
//===----------------------------------------------------------------------===//

struct ConvertMemRefAlloc final : OpConversionPattern<memref::AllocOp> {
using OpConversionPattern::OpConversionPattern;
template <typename OpTy>
struct ConvertMemRefAlloc final : OpConversionPattern<OpTy> {
using OpConversionPattern<OpTy>::OpConversionPattern;

LogicalResult
matchAndRewrite(memref::AllocOp op, OpAdaptor adaptor,
matchAndRewrite(OpTy op, typename OpTy::Adaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
auto currentType = op.getMemref().getType().cast<MemRefType>();
auto newResultType =
getTypeConverter()->convertType(op.getType()).dyn_cast<MemRefType>();
static_assert(std::is_same<OpTy, memref::AllocOp>() ||
std::is_same<OpTy, memref::AllocaOp>(),
"expected only memref::AllocOp or memref::AllocaOp");
auto currentType = cast<MemRefType>(op.getMemref().getType());
auto newResultType = dyn_cast<MemRefType>(
this->getTypeConverter()->convertType(op.getType()));
if (!newResultType) {
return rewriter.notifyMatchFailure(
op->getLoc(),
Expand All @@ -73,9 +77,9 @@ struct ConvertMemRefAlloc final : OpConversionPattern<memref::AllocOp> {

// Special case zero-rank memrefs.
if (currentType.getRank() == 0) {
rewriter.replaceOpWithNewOp<memref::AllocOp>(
op, newResultType, ValueRange{}, adaptor.getSymbolOperands(),
adaptor.getAlignmentAttr());
rewriter.replaceOpWithNewOp<OpTy>(op, newResultType, ValueRange{},
adaptor.getSymbolOperands(),
adaptor.getAlignmentAttr());
return success();
}

Expand All @@ -97,9 +101,9 @@ struct ConvertMemRefAlloc final : OpConversionPattern<memref::AllocOp> {
rewriter, loc, linearizedMemRefInfo.linearizedSize));
}

rewriter.replaceOpWithNewOp<memref::AllocOp>(
op, newResultType, dynamicLinearizedSize, adaptor.getSymbolOperands(),
adaptor.getAlignmentAttr());
rewriter.replaceOpWithNewOp<OpTy>(op, newResultType, dynamicLinearizedSize,
adaptor.getSymbolOperands(),
adaptor.getAlignmentAttr());
return success();
}
};
Expand Down Expand Up @@ -291,7 +295,8 @@ void memref::populateMemRefNarrowTypeEmulationPatterns(
RewritePatternSet &patterns) {

// Populate `memref.*` conversion patterns.
patterns.add<ConvertMemRefAlloc, ConvertMemRefLoad,
patterns.add<ConvertMemRefAlloc<memref::AllocOp>,
ConvertMemRefAlloc<memref::AllocaOp>, ConvertMemRefLoad,
ConvertMemRefAssumeAlignment, ConvertMemRefSubview>(
typeConverter, patterns.getContext());
memref::populateResolveExtractStridedMetadataPatterns(patterns);
Expand Down
33 changes: 33 additions & 0 deletions mlir/test/Dialect/MemRef/emulate-narrow-type.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -174,3 +174,36 @@ func.func @memref_strided_i4(%idx : index) -> i4 {
// CHECK32: %[[ALLOC:.+]] = memref.alloc() : memref<16xi32>
// CHECK32: %[[SUBVIEW:.+]] = memref.subview %[[ALLOC]][4] [4] [1] : memref<16xi32> to memref<4xi32, strided<[1], offset: 4>>
// CHECK32: %[[LOAD:.+]] = memref.load %[[SUBVIEW]]

// -----

func.func @memref_alloca_load_i4(%arg0: index) -> i4 {
%0 = memref.alloca() : memref<5xi4>
%1 = memref.load %0[%arg0] : memref<5xi4>
return %1 : i4
}
// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 floordiv 2)>
// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 * 4 - (s0 floordiv 2) * 8)
// CHECK: func @memref_alloca_load_i4(
// CHECK-SAME: %[[ARG0:.+]]: index
// CHECK: %[[ALLOCA:.+]] = memref.alloca() : memref<3xi8>
// CHECK: %[[INDEX:.+]] = affine.apply #[[MAP0]]()[%[[ARG0]]]
// CHECK: %[[LOADVAL:.+]] = memref.load %[[ALLOCA]][%[[INDEX]]]
// CHECK: %[[BITOFFSET:.+]] = affine.apply #[[MAP1]]()[%[[ARG0]]]
// CHECK: %[[CAST:.+]] = arith.index_cast %[[BITOFFSET]] : index to i8
// CHECK: %[[SHIFTRT:.+]] = arith.shrsi %[[LOADVAL]], %[[CAST]]
// CHECK: %[[TRUNC:.+]] = arith.trunci %[[SHIFTRT]] : i8 to i4
// CHECK: return %[[TRUNC]]

// CHECK32-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 floordiv 8)>
// CHECK32-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 * 4 - (s0 floordiv 8) * 32)
// CHECK32: func @memref_alloca_load_i4(
// CHECK32-SAME: %[[ARG0:.+]]: index
// CHECK32: %[[ALLOCA:.+]] = memref.alloca() : memref<1xi32>
// CHECK32: %[[INDEX:.+]] = affine.apply #[[MAP0]]()[%[[ARG0]]]
// CHECK32: %[[LOADVAL:.+]] = memref.load %[[ALLOCA]][%[[INDEX]]]
// CHECK32: %[[BITOFFSET:.+]] = affine.apply #[[MAP1]]()[%[[ARG0]]]
// CHECK32: %[[CAST:.+]] = arith.index_cast %[[BITOFFSET]] : index to i32
// CHECK32: %[[SHIFTRT:.+]] = arith.shrsi %[[LOADVAL]], %[[CAST]]
// CHECK32: %[[TRUNC:.+]] = arith.trunci %[[SHIFTRT]] : i32 to i4
// CHECK32: return %[[TRUNC]]