Skip to content

Switch member calls to isa/dyn_cast/cast/... to free function calls. #89356

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion mlir/examples/transform/Ch4/lib/MyExtension.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ mlir::transform::HasOperandSatisfyingOp::apply(
transform::detail::prepareValueMappings(
yieldedMappings, getBody().front().getTerminator()->getOperands(),
state);
results.setParams(getPosition().cast<OpResult>(),
results.setParams(cast<OpResult>(getPosition()),
{rewriter.getI32IntegerAttr(operand.getOperandNumber())});
for (auto &&[result, mapping] : llvm::zip(getResults(), yieldedMappings))
results.setMappedValues(result, mapping);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ struct IndependentParallelIteratorDomainShardingInterface
void
populateIteratorTypes(Type t,
SmallVector<utils::IteratorType> &iterTypes) const {
RankedTensorType rankedTensorType = t.dyn_cast<RankedTensorType>();
RankedTensorType rankedTensorType = dyn_cast<RankedTensorType>(t);
if (!rankedTensorType) {
return;
}
Expand All @@ -106,7 +106,7 @@ struct ElementwiseShardingInterface
ElementwiseShardingInterface<ElemwiseOp>, ElemwiseOp> {
SmallVector<utils::IteratorType> getLoopIteratorTypes(Operation *op) const {
Value val = op->getOperand(0);
auto type = val.getType().dyn_cast<RankedTensorType>();
auto type = dyn_cast<RankedTensorType>(val.getType());
if (!type)
return {};
SmallVector<utils::IteratorType> types(type.getRank(),
Expand All @@ -117,7 +117,7 @@ struct ElementwiseShardingInterface
SmallVector<AffineMap> getIndexingMaps(Operation *op) const {
MLIRContext *ctx = op->getContext();
Value val = op->getOperand(0);
auto type = val.getType().dyn_cast<RankedTensorType>();
auto type = dyn_cast<RankedTensorType>(val.getType());
if (!type)
return {};
int64_t rank = type.getRank();
Expand Down
6 changes: 3 additions & 3 deletions mlir/include/mlir/Dialect/Tosa/IR/TosaOps.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,11 @@ class MulOperandsAndResultElementType
if (llvm::isa<FloatType>(resElemType))
return impl::verifySameOperandsAndResultElementType(op);

if (auto resIntType = resElemType.dyn_cast<IntegerType>()) {
if (auto resIntType = dyn_cast<IntegerType>(resElemType)) {
IntegerType lhsIntType =
getElementTypeOrSelf(op->getOperand(0)).cast<IntegerType>();
cast<IntegerType>(getElementTypeOrSelf(op->getOperand(0)));
IntegerType rhsIntType =
getElementTypeOrSelf(op->getOperand(1)).cast<IntegerType>();
cast<IntegerType>(getElementTypeOrSelf(op->getOperand(1)));
if (lhsIntType != rhsIntType)
return op->emitOpError(
"requires the same element type for all operands");
Expand Down
2 changes: 1 addition & 1 deletion mlir/include/mlir/IR/Location.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ class FusedLocWith : public FusedLoc {
/// Support llvm style casting.
static bool classof(Attribute attr) {
auto fusedLoc = llvm::dyn_cast<FusedLoc>(attr);
return fusedLoc && fusedLoc.getMetadata().isa_and_nonnull<MetadataT>();
return fusedLoc && mlir::isa_and_nonnull<MetadataT>(fusedLoc.getMetadata());
}
};

Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/CAPI/Dialect/LLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ MlirAttribute mlirLLVMDIExpressionAttrGet(MlirContext ctx, intptr_t nOperations,
unwrap(ctx),
llvm::map_to_vector(
unwrapList(nOperations, operations, attrStorage),
[](Attribute a) { return a.cast<DIExpressionElemAttr>(); })));
[](Attribute a) { return cast<DIExpressionElemAttr>(a); })));
}

MlirAttribute mlirLLVMDINullTypeAttrGet(MlirContext ctx) {
Expand Down Expand Up @@ -165,7 +165,7 @@ MlirAttribute mlirLLVMDICompositeTypeAttrGet(
cast<DIScopeAttr>(unwrap(scope)), cast<DITypeAttr>(unwrap(baseType)),
DIFlags(flags), sizeInBits, alignInBits,
llvm::map_to_vector(unwrapList(nElements, elements, elementsStorage),
[](Attribute a) { return a.cast<DINodeAttr>(); })));
[](Attribute a) { return cast<DINodeAttr>(a); })));
}

MlirAttribute
Expand Down Expand Up @@ -259,7 +259,7 @@ MlirAttribute mlirLLVMDISubroutineTypeAttrGet(MlirContext ctx,
return wrap(DISubroutineTypeAttr::get(
unwrap(ctx), callingConvention,
llvm::map_to_vector(unwrapList(nTypes, types, attrStorage),
[](Attribute a) { return a.cast<DITypeAttr>(); })));
[](Attribute a) { return cast<DITypeAttr>(a); })));
}

MlirAttribute mlirLLVMDISubprogramAttrGet(
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/CAPI/IR/BuiltinTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -311,11 +311,11 @@ MlirType mlirVectorTypeGetScalableChecked(MlirLocation loc, intptr_t rank,
}

bool mlirVectorTypeIsScalable(MlirType type) {
return unwrap(type).cast<VectorType>().isScalable();
return cast<VectorType>(unwrap(type)).isScalable();
}

bool mlirVectorTypeIsDimScalable(MlirType type, intptr_t dim) {
return unwrap(type).cast<VectorType>().getScalableDims()[dim];
return cast<VectorType>(unwrap(type)).getScalableDims()[dim];
}

//===----------------------------------------------------------------------===//
Expand Down
11 changes: 5 additions & 6 deletions mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ static void wmmaPushInputOperand(ConversionPatternRewriter &rewriter,
bool isUnsigned, Value llvmInput,
SmallVector<Value, 4> &operands) {
Type inputType = llvmInput.getType();
auto vectorType = inputType.dyn_cast<VectorType>();
auto vectorType = dyn_cast<VectorType>(inputType);
Type elemType = vectorType.getElementType();

if (elemType.isBF16())
Expand Down Expand Up @@ -414,7 +414,7 @@ static void wmmaPushOutputOperand(ConversionPatternRewriter &rewriter,
Value output, int32_t subwordOffset,
bool clamp, SmallVector<Value, 4> &operands) {
Type inputType = output.getType();
auto vectorType = inputType.dyn_cast<VectorType>();
auto vectorType = dyn_cast<VectorType>(inputType);
Type elemType = vectorType.getElementType();
if (elemType.isBF16())
output = rewriter.create<LLVM::BitcastOp>(
Expand Down Expand Up @@ -569,9 +569,8 @@ static std::optional<StringRef> mfmaOpToIntrinsic(MFMAOp mfma,
/// on the architecture you are compiling for.
static std::optional<StringRef> wmmaOpToIntrinsic(WMMAOp wmma,
Chipset chipset) {

auto sourceVectorType = wmma.getSourceA().getType().dyn_cast<VectorType>();
auto destVectorType = wmma.getDestC().getType().dyn_cast<VectorType>();
auto sourceVectorType = dyn_cast<VectorType>(wmma.getSourceA().getType());
auto destVectorType = dyn_cast<VectorType>(wmma.getDestC().getType());
auto elemSourceType = sourceVectorType.getElementType();
auto elemDestType = destVectorType.getElementType();

Expand Down Expand Up @@ -727,7 +726,7 @@ LogicalResult ExtPackedFp8OpLowering::matchAndRewrite(
Type f32 = getTypeConverter()->convertType(op.getResult().getType());

Value source = adaptor.getSource();
auto sourceVecType = op.getSource().getType().dyn_cast<VectorType>();
auto sourceVecType = dyn_cast<VectorType>(op.getSource().getType());
Type sourceElemType = getElementTypeOrSelf(op.getSource());
// Extend to a v4i8
if (!sourceVecType || sourceVecType.getNumElements() < 4) {
Expand Down
12 changes: 6 additions & 6 deletions mlir/lib/Conversion/ArithToAMDGPU/ArithToAMDGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ static Value castF32To(Type elementType, Value f32, Location loc,

LogicalResult ExtFOnFloat8RewritePattern::match(arith::ExtFOp op) const {
Type inType = op.getIn().getType();
if (auto inVecType = inType.dyn_cast<VectorType>()) {
if (auto inVecType = dyn_cast<VectorType>(inType)) {
if (inVecType.isScalable())
return failure();
if (inVecType.getShape().size() > 1)
Expand All @@ -81,13 +81,13 @@ void ExtFOnFloat8RewritePattern::rewrite(arith::ExtFOp op,
Location loc = op.getLoc();
Value in = op.getIn();
Type outElemType = getElementTypeOrSelf(op.getOut().getType());
if (!in.getType().isa<VectorType>()) {
if (!isa<VectorType>(in.getType())) {
Value asFloat = rewriter.create<amdgpu::ExtPackedFp8Op>(
loc, rewriter.getF32Type(), in, 0);
Value result = castF32To(outElemType, asFloat, loc, rewriter);
return rewriter.replaceOp(op, result);
}
VectorType inType = in.getType().cast<VectorType>();
VectorType inType = cast<VectorType>(in.getType());
int64_t numElements = inType.getNumElements();
Value zero = rewriter.create<arith::ConstantOp>(
loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
Expand Down Expand Up @@ -179,7 +179,7 @@ LogicalResult TruncFToFloat8RewritePattern::match(arith::TruncFOp op) const {
if (op.getRoundingmodeAttr())
return failure();
Type outType = op.getOut().getType();
if (auto outVecType = outType.dyn_cast<VectorType>()) {
if (auto outVecType = dyn_cast<VectorType>(outType)) {
if (outVecType.isScalable())
return failure();
if (outVecType.getShape().size() > 1)
Expand All @@ -202,15 +202,15 @@ void TruncFToFloat8RewritePattern::rewrite(arith::TruncFOp op,
if (saturateFP8)
in = clampInput(rewriter, loc, outElemType, in);
VectorType truncResType = VectorType::get(4, outElemType);
if (!in.getType().isa<VectorType>()) {
if (!isa<VectorType>(in.getType())) {
Value asFloat = castToF32(in, loc, rewriter);
Value asF8s = rewriter.create<amdgpu::PackedTrunc2xFp8Op>(
loc, truncResType, asFloat, /*sourceB=*/nullptr, 0,
/*existing=*/nullptr);
Value result = rewriter.create<vector::ExtractOp>(loc, asF8s, 0);
return rewriter.replaceOp(op, result);
}
VectorType outType = op.getOut().getType().cast<VectorType>();
VectorType outType = cast<VectorType>(op.getOut().getType());
int64_t numElements = outType.getNumElements();
Value zero = rewriter.create<arith::ConstantOp>(
loc, outElemType, rewriter.getFloatAttr(outElemType, 0.0));
Expand Down
7 changes: 3 additions & 4 deletions mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
llvm::enumerate(gpuFuncOp.getArgumentTypes())) {
auto remapping = signatureConversion.getInputMapping(idx);
NamedAttrList argAttr =
argAttrs ? argAttrs[idx].cast<DictionaryAttr>() : NamedAttrList();
argAttrs ? cast<DictionaryAttr>(argAttrs[idx]) : NamedAttrList();
auto copyAttribute = [&](StringRef attrName) {
Attribute attr = argAttr.erase(attrName);
if (!attr)
Expand All @@ -234,9 +234,8 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
return;
}
for (size_t i = 0, e = remapping->size; i < e; ++i) {
if (llvmFuncOp.getArgument(remapping->inputNo + i)
.getType()
.isa<LLVM::LLVMPointerType>()) {
if (isa<LLVM::LLVMPointerType>(
llvmFuncOp.getArgument(remapping->inputNo + i).getType())) {
llvmFuncOp.setArgAttr(remapping->inputNo + i, attrName, attr);
}
}
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -668,7 +668,7 @@ static int32_t getCuSparseLtDataTypeFrom(Type type) {
static int32_t getCuSparseDataTypeFrom(Type type) {
if (llvm::isa<ComplexType>(type)) {
// get the element type
auto elementType = type.cast<ComplexType>().getElementType();
auto elementType = cast<ComplexType>(type).getElementType();
if (elementType.isBF16())
return 15; // CUDA_C_16BF
if (elementType.isF16())
Expand Down
18 changes: 7 additions & 11 deletions mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1579,7 +1579,7 @@ struct NVGPUWarpgroupMmaStoreOpLowering
if (offset)
ti = makeAdd(ti, makeConst(offset));

auto structType = matrixD.getType().cast<LLVM::LLVMStructType>();
auto structType = cast<LLVM::LLVMStructType>(matrixD.getType());

// Number of 32-bit registers owns per thread
constexpr unsigned numAdjacentRegisters = 2;
Expand All @@ -1606,9 +1606,9 @@ struct NVGPUWarpgroupMmaStoreOpLowering
int offset = 0;
ImplicitLocOpBuilder b(op->getLoc(), rewriter);
Value matriDValue = adaptor.getMatrixD();
auto stype = matriDValue.getType().cast<LLVM::LLVMStructType>();
auto stype = cast<LLVM::LLVMStructType>(matriDValue.getType());
for (auto [idx, matrixD] : llvm::enumerate(stype.getBody())) {
auto structType = matrixD.cast<LLVM::LLVMStructType>();
auto structType = cast<LLVM::LLVMStructType>(matrixD);
Value innerStructValue = b.create<LLVM::ExtractValueOp>(matriDValue, idx);
storeFragmentedMatrix(b, innerStructValue, op.getDstMemref(), offset);
offset += structType.getBody().size();
Expand All @@ -1626,21 +1626,17 @@ struct NVGPUWarpgroupMmaInitAccumulatorOpLowering
matchAndRewrite(nvgpu::WarpgroupMmaInitAccumulatorOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
ImplicitLocOpBuilder b(op->getLoc(), rewriter);
LLVM::LLVMStructType packStructType =
getTypeConverter()
->convertType(op.getMatrixC().getType())
.cast<LLVM::LLVMStructType>();
Type elemType = packStructType.getBody()
.front()
.cast<LLVM::LLVMStructType>()
LLVM::LLVMStructType packStructType = cast<LLVM::LLVMStructType>(
getTypeConverter()->convertType(op.getMatrixC().getType()));
Type elemType = cast<LLVM::LLVMStructType>(packStructType.getBody().front())
.getBody()
.front();
Value zero = b.create<LLVM::ConstantOp>(elemType, b.getZeroAttr(elemType));
Value packStruct = b.create<LLVM::UndefOp>(packStructType);
SmallVector<Value> innerStructs;
// Unpack the structs and set all values to zero
for (auto [idx, s] : llvm::enumerate(packStructType.getBody())) {
auto structType = s.cast<LLVM::LLVMStructType>();
auto structType = cast<LLVM::LLVMStructType>(s);
Value structValue = b.create<LLVM::ExtractValueOp>(packStruct, idx);
for (unsigned i = 0; i < structType.getBody().size(); ++i) {
structValue = b.create<LLVM::InsertValueOp>(
Expand Down
20 changes: 9 additions & 11 deletions mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -618,7 +618,7 @@ static Value expandRank(PatternRewriter &rewriter, Location loc, Value tensor,
static SmallVector<Value> expandInputRanks(PatternRewriter &rewriter,
Location loc, Operation *operation) {
auto rank =
operation->getResultTypes().front().cast<RankedTensorType>().getRank();
cast<RankedTensorType>(operation->getResultTypes().front()).getRank();
return llvm::map_to_vector(operation->getOperands(), [&](Value operand) {
return expandRank(rewriter, loc, operand, rank);
});
Expand Down Expand Up @@ -680,15 +680,15 @@ computeTargetSize(PatternRewriter &rewriter, Location loc, IndexPool &indexPool,
// dimension, that is the target size. An occurrence of an additional static
// dimension greater than 1 with a different value is undefined behavior.
for (auto operand : operands) {
auto size = operand.getType().cast<RankedTensorType>().getDimSize(dim);
auto size = cast<RankedTensorType>(operand.getType()).getDimSize(dim);
if (!ShapedType::isDynamic(size) && size > 1)
return {rewriter.getIndexAttr(size), operand};
}

// Filter operands with dynamic dimension
auto operandsWithDynamicDim =
llvm::to_vector(llvm::make_filter_range(operands, [&](Value operand) {
return operand.getType().cast<RankedTensorType>().isDynamicDim(dim);
return cast<RankedTensorType>(operand.getType()).isDynamicDim(dim);
}));

// If no operand has a dynamic dimension, it means all sizes were 1
Expand Down Expand Up @@ -718,7 +718,7 @@ static std::pair<SmallVector<OpFoldResult>, SmallVector<Value>>
computeTargetShape(PatternRewriter &rewriter, Location loc,
IndexPool &indexPool, ValueRange operands) {
assert(!operands.empty());
auto rank = operands.front().getType().cast<RankedTensorType>().getRank();
auto rank = cast<RankedTensorType>(operands.front().getType()).getRank();
SmallVector<OpFoldResult> targetShape;
SmallVector<Value> masterOperands;
for (auto dim : llvm::seq<int64_t>(0, rank)) {
Expand All @@ -735,7 +735,7 @@ static Value broadcastDynamicDimension(PatternRewriter &rewriter, Location loc,
int64_t dim, OpFoldResult targetSize,
Value masterOperand) {
// Nothing to do if this is a static dimension
auto rankedTensorType = operand.getType().cast<RankedTensorType>();
auto rankedTensorType = cast<RankedTensorType>(operand.getType());
if (!rankedTensorType.isDynamicDim(dim))
return operand;

Expand Down Expand Up @@ -817,7 +817,7 @@ static Value broadcastDynamicDimensions(PatternRewriter &rewriter, Location loc,
IndexPool &indexPool, Value operand,
ArrayRef<OpFoldResult> targetShape,
ArrayRef<Value> masterOperands) {
int64_t rank = operand.getType().cast<RankedTensorType>().getRank();
int64_t rank = cast<RankedTensorType>(operand.getType()).getRank();
assert((int64_t)targetShape.size() == rank);
assert((int64_t)masterOperands.size() == rank);
for (auto index : llvm::seq<int64_t>(0, rank))
Expand Down Expand Up @@ -848,8 +848,7 @@ emitElementwiseComputation(PatternRewriter &rewriter, Location loc,
Operation *operation, ValueRange operands,
ArrayRef<OpFoldResult> targetShape) {
// Generate output tensor
auto resultType =
operation->getResultTypes().front().cast<RankedTensorType>();
auto resultType = cast<RankedTensorType>(operation->getResultTypes().front());
Value outputTensor = rewriter.create<tensor::EmptyOp>(
loc, targetShape, resultType.getElementType());

Expand Down Expand Up @@ -2274,8 +2273,7 @@ struct RFFT2dConverter final : public OpRewritePattern<RFFT2dOp> {
llvm::SmallVector<int64_t, 3> staticSizes;
dispatchIndexOpFoldResults(dims, dynamicSizes, staticSizes);

auto elementType =
input.getType().cast<RankedTensorType>().getElementType();
auto elementType = cast<RankedTensorType>(input.getType()).getElementType();
return RankedTensorType::get(staticSizes, elementType);
}

Expand Down Expand Up @@ -2327,7 +2325,7 @@ struct RFFT2dConverter final : public OpRewritePattern<RFFT2dOp> {
auto loc = rfft2d.getLoc();
auto input = rfft2d.getInput();
auto elementType =
input.getType().cast<ShapedType>().getElementType().cast<FloatType>();
cast<FloatType>(cast<ShapedType>(input.getType()).getElementType());

// Compute the output type and set of dynamic sizes
llvm::SmallVector<Value> dynamicSizes;
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1204,10 +1204,10 @@ convertElementwiseOp(RewriterBase &rewriter, Operation *op,
return rewriter.notifyMatchFailure(op, "no mapping");
matrixOperands.push_back(it->second);
}
auto resultType = matrixOperands[0].getType().cast<gpu::MMAMatrixType>();
auto resultType = cast<gpu::MMAMatrixType>(matrixOperands[0].getType());
if (opType == gpu::MMAElementwiseOp::EXTF) {
// The floating point extension case has a different result type.
auto vectorType = op->getResultTypes()[0].cast<VectorType>();
auto vectorType = cast<VectorType>(op->getResultTypes()[0]);
resultType = gpu::MMAMatrixType::get(resultType.getShape(),
vectorType.getElementType(),
resultType.getOperand());
Expand Down
3 changes: 1 addition & 2 deletions mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -631,8 +631,7 @@ static Value createMaskNeutralValue(ConversionPatternRewriter &rewriter,
Type vectorType) {
const auto &floatSemantics = cast<FloatType>(llvmType).getFloatSemantics();
auto value = getMaskNeutralValue(MaskNeutral{}, floatSemantics);
auto denseValue =
DenseElementsAttr::get(vectorType.cast<ShapedType>(), value);
auto denseValue = DenseElementsAttr::get(cast<ShapedType>(vectorType), value);
return rewriter.create<LLVM::ConstantOp>(loc, vectorType, denseValue);
}

Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,8 @@ LogicalResult WMMAOp::verify() {
Type sourceAType = getSourceA().getType();
Type destType = getDestC().getType();

VectorType sourceVectorAType = sourceAType.dyn_cast<VectorType>();
VectorType destVectorType = destType.dyn_cast<VectorType>();
VectorType sourceVectorAType = dyn_cast<VectorType>(sourceAType);
VectorType destVectorType = dyn_cast<VectorType>(destType);

Type sourceAElemType = sourceVectorAType.getElementType();
Type destElemType = destVectorType.getElementType();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ struct ConstantOpInterface
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
const BufferizationOptions &options) const {
auto constantOp = cast<arith::ConstantOp>(op);
auto type = constantOp.getType().dyn_cast<RankedTensorType>();
auto type = dyn_cast<RankedTensorType>(constantOp.getType());

// Only ranked tensors are supported.
if (!type)
Expand Down
Loading