Skip to content

[mlir] Use create instead of createOrFold for ConstantOp as folding has no effect (NFC) #80129

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ static Value createI32Constant(ConversionPatternRewriter &rewriter,
static Value createI1Constant(ConversionPatternRewriter &rewriter, Location loc,
bool value) {
Type llvmI1 = rewriter.getI1Type();
return rewriter.createOrFold<LLVM::ConstantOp>(loc, llvmI1, value);
return rewriter.create<LLVM::ConstantOp>(loc, llvmI1, value);
}

namespace {
Expand Down Expand Up @@ -163,7 +163,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
Value ptr = memrefDescriptor.alignedPtr(rewriter, loc);
// The stride value is always 0 for raw buffers. This also disables
// swizling.
Value stride = rewriter.createOrFold<LLVM::ConstantOp>(
Value stride = rewriter.create<LLVM::ConstantOp>(
loc, llvmI16, rewriter.getI16IntegerAttr(0));
Value numRecords;
if (memrefType.hasStaticShape()) {
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ void ExtFOnFloat8RewritePattern::rewrite(arith::ExtFOp op,
}
VectorType inType = in.getType().cast<VectorType>();
int64_t numElements = inType.getNumElements();
Value zero = rewriter.createOrFold<arith::ConstantOp>(
Value zero = rewriter.create<arith::ConstantOp>(
loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
Value result =
rewriter.createOrFold<vector::SplatOp>(loc, op.getOut().getType(), zero);
Expand Down Expand Up @@ -209,7 +209,7 @@ void TruncFToFloat8RewritePattern::rewrite(arith::TruncFOp op,
}
VectorType outType = op.getOut().getType().cast<VectorType>();
int64_t numElements = outType.getNumElements();
Value zero = rewriter.createOrFold<arith::ConstantOp>(
Value zero = rewriter.create<arith::ConstantOp>(
loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
Value result = rewriter.createOrFold<vector::SplatOp>(loc, outType, zero);
if (outType.getShape().empty()) {
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ static bool canBeCalledWithBarePointers(gpu::GPUFuncOp func) {
Value getLaneId(ConversionPatternRewriter &rewriter, Location loc,
const unsigned indexBitwidth) {
auto int32Type = IntegerType::get(rewriter.getContext(), 32);
Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32);
Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32);
Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32);
Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32);
Value mbcntLo = rewriter.create<ROCDL::MbcntLoOp>(loc, int32Type,
ValueRange{minus1, zero});
Value laneId = rewriter.create<ROCDL::MbcntHiOp>(loc, int32Type,
Expand All @@ -89,8 +89,8 @@ struct GPULaneIdOpToROCDL : ConvertOpToLLVMPattern<gpu::LaneIdOp> {
// followed by: %lid = call @llvm.amdgcn.mbcnt.hi(-1, %mlo)

Type intTy = IntegerType::get(context, 32);
Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32);
Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32);
Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32);
Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32);
Value mbcntLo =
rewriter.create<ROCDL::MbcntLoOp>(loc, intTy, ValueRange{minus1, zero});
Value laneId = rewriter.create<ROCDL::MbcntHiOp>(
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ class PadConverter : public OpRewritePattern<tosa::PadOp> {
highValues.reserve(rank);

for (int i = 0; i < rank; i++) {
Value inputIndex = rewriter.createOrFold<arith::ConstantIndexOp>(loc, i);
Value inputIndex = rewriter.create<arith::ConstantIndexOp>(loc, i);
Value lowVal = rewriter.createOrFold<tensor::ExtractOp>(
loc, padding, ValueRange({inputIndex, lowIndex}));
Value highVal = rewriter.createOrFold<tensor::ExtractOp>(
Expand Down Expand Up @@ -360,8 +360,8 @@ struct ConcatConverter : public OpConversionPattern<tosa::ConcatOp> {

Location loc = op.getLoc();
int axis = op.getAxis();
Value axisValue = rewriter.createOrFold<arith::ConstantOp>(
loc, rewriter.getIndexAttr(axis));
Value axisValue =
rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(axis));
int64_t rank = resultType.getRank();

SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ struct DecomposeTensorConcatOp : public OpRewritePattern<ConcatOp> {
return failure();

int64_t dim = concatOp.getDim();
Value dimValue = rewriter.createOrFold<arith::ConstantOp>(
loc, rewriter.getIndexAttr(dim));
Value dimValue =
rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(dim));

int64_t rank = concatOp.getResultType().getRank();
SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
Expand Down