Skip to content

Commit 65066c0

Browse files
authored
[mlir] Use create instead of createOrFold for ConstantOp as folding has no effect (NFC) (llvm#80129)
This aims to clean-up confusing uses of builder.createOrFold<ConstantOp> since folding of constants fails.
1 parent 54f324f commit 65066c0

File tree

5 files changed

+13
-13
lines changed

5 files changed

+13
-13
lines changed

mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ static Value createI32Constant(ConversionPatternRewriter &rewriter,
3838
static Value createI1Constant(ConversionPatternRewriter &rewriter, Location loc,
3939
bool value) {
4040
Type llvmI1 = rewriter.getI1Type();
41-
return rewriter.createOrFold<LLVM::ConstantOp>(loc, llvmI1, value);
41+
return rewriter.create<LLVM::ConstantOp>(loc, llvmI1, value);
4242
}
4343

4444
namespace {
@@ -163,7 +163,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
163163
Value ptr = memrefDescriptor.alignedPtr(rewriter, loc);
164164
// The stride value is always 0 for raw buffers. This also disables
165165
// swizling.
166-
Value stride = rewriter.createOrFold<LLVM::ConstantOp>(
166+
Value stride = rewriter.create<LLVM::ConstantOp>(
167167
loc, llvmI16, rewriter.getI16IntegerAttr(0));
168168
Value numRecords;
169169
if (memrefType.hasStaticShape()) {

mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ void ExtFOnFloat8RewritePattern::rewrite(arith::ExtFOp op,
8989
}
9090
VectorType inType = in.getType().cast<VectorType>();
9191
int64_t numElements = inType.getNumElements();
92-
Value zero = rewriter.createOrFold<arith::ConstantOp>(
92+
Value zero = rewriter.create<arith::ConstantOp>(
9393
loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
9494
Value result =
9595
rewriter.createOrFold<vector::SplatOp>(loc, op.getOut().getType(), zero);
@@ -209,7 +209,7 @@ void TruncFToFloat8RewritePattern::rewrite(arith::TruncFOp op,
209209
}
210210
VectorType outType = op.getOut().getType().cast<VectorType>();
211211
int64_t numElements = outType.getNumElements();
212-
Value zero = rewriter.createOrFold<arith::ConstantOp>(
212+
Value zero = rewriter.create<arith::ConstantOp>(
213213
loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
214214
Value result = rewriter.createOrFold<vector::SplatOp>(loc, outType, zero);
215215
if (outType.getShape().empty()) {

mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,8 @@ static bool canBeCalledWithBarePointers(gpu::GPUFuncOp func) {
6767
Value getLaneId(ConversionPatternRewriter &rewriter, Location loc,
6868
const unsigned indexBitwidth) {
6969
auto int32Type = IntegerType::get(rewriter.getContext(), 32);
70-
Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32);
71-
Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32);
70+
Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32);
71+
Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32);
7272
Value mbcntLo = rewriter.create<ROCDL::MbcntLoOp>(loc, int32Type,
7373
ValueRange{minus1, zero});
7474
Value laneId = rewriter.create<ROCDL::MbcntHiOp>(loc, int32Type,
@@ -89,8 +89,8 @@ struct GPULaneIdOpToROCDL : ConvertOpToLLVMPattern<gpu::LaneIdOp> {
8989
// followed by: %lid = call @llvm.amdgcn.mbcnt.hi(-1, %mlo)
9090

9191
Type intTy = IntegerType::get(context, 32);
92-
Value zero = rewriter.createOrFold<arith::ConstantIntOp>(loc, 0, 32);
93-
Value minus1 = rewriter.createOrFold<arith::ConstantIntOp>(loc, -1, 32);
92+
Value zero = rewriter.create<arith::ConstantIntOp>(loc, 0, 32);
93+
Value minus1 = rewriter.create<arith::ConstantIntOp>(loc, -1, 32);
9494
Value mbcntLo =
9595
rewriter.create<ROCDL::MbcntLoOp>(loc, intTy, ValueRange{minus1, zero});
9696
Value laneId = rewriter.create<ROCDL::MbcntHiOp>(

mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ class PadConverter : public OpRewritePattern<tosa::PadOp> {
327327
highValues.reserve(rank);
328328

329329
for (int i = 0; i < rank; i++) {
330-
Value inputIndex = rewriter.createOrFold<arith::ConstantIndexOp>(loc, i);
330+
Value inputIndex = rewriter.create<arith::ConstantIndexOp>(loc, i);
331331
Value lowVal = rewriter.createOrFold<tensor::ExtractOp>(
332332
loc, padding, ValueRange({inputIndex, lowIndex}));
333333
Value highVal = rewriter.createOrFold<tensor::ExtractOp>(
@@ -360,8 +360,8 @@ struct ConcatConverter : public OpConversionPattern<tosa::ConcatOp> {
360360

361361
Location loc = op.getLoc();
362362
int axis = op.getAxis();
363-
Value axisValue = rewriter.createOrFold<arith::ConstantOp>(
364-
loc, rewriter.getIndexAttr(axis));
363+
Value axisValue =
364+
rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(axis));
365365
int64_t rank = resultType.getRank();
366366

367367
SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));

mlir/lib/Dialect/Tensor/Transforms/ConcatOpPatterns.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,8 @@ struct DecomposeTensorConcatOp : public OpRewritePattern<ConcatOp> {
4444
return failure();
4545

4646
int64_t dim = concatOp.getDim();
47-
Value dimValue = rewriter.createOrFold<arith::ConstantOp>(
48-
loc, rewriter.getIndexAttr(dim));
47+
Value dimValue =
48+
rewriter.create<arith::ConstantOp>(loc, rewriter.getIndexAttr(dim));
4949

5050
int64_t rank = concatOp.getResultType().getRank();
5151
SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));

0 commit comments

Comments
 (0)