Skip to content

Commit 26f1dd8

Browse files
committed
[mlir][tosa] Update value to values for ConstOp
Signed-off-by: Jerry Ge <[email protected]> Change-Id: I59501284d8612038f819002b250ed4d86a0b05cd
1 parent 46236f4 commit 26f1dd8

25 files changed

+563
-563
lines changed

mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2338,7 +2338,7 @@ def Tosa_RescaleOp: Tosa_Op<"rescale", [Pure,
23382338
// Operator: const
23392339
//===----------------------------------------------------------------------===//
23402340
def Tosa_ConstOp : Tosa_Op<"const", [ConstantLike, Pure,
2341-
AllShapesMatch<["value", "output"]>,
2341+
AllShapesMatch<["values", "output"]>,
23422342
FirstAttrDerivedResultType]> {
23432343
let summary = "Constant op.";
23442344

@@ -2350,12 +2350,12 @@ def Tosa_ConstOp : Tosa_Op<"const", [ConstantLike, Pure,
23502350

23512351
```mlir
23522352
// Generic form
2353-
%out = "tosa.const"() {value = dense<0> : tensor<2x3xi32>} : () -> tensor<2x3xi32>
2353+
%out = "tosa.const"() {values = dense<0> : tensor<2x3xi32>} : () -> tensor<2x3xi32>
23542354
```
23552355
}];
23562356

23572357
let arguments = (ins
2358-
ElementsAttr:$value
2358+
ElementsAttr:$values
23592359
);
23602360

23612361
let results = (outs

mlir/lib/Conversion/TosaToArith/TosaToArith.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class ConstOpConverter : public OpRewritePattern<tosa::ConstOp> {
2828

2929
LogicalResult matchAndRewrite(tosa::ConstOp op,
3030
PatternRewriter &rewriter) const final {
31-
rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, op.getValue());
31+
rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, op.getValues());
3232
return success();
3333
}
3434
};

mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -882,7 +882,7 @@ OpFoldResult CastOp::fold(FoldAdaptor adaptor) {
882882
return {};
883883
}
884884

885-
OpFoldResult ConstOp::fold(FoldAdaptor adaptor) { return getValueAttr(); }
885+
OpFoldResult ConstOp::fold(FoldAdaptor adaptor) { return getValuesAttr(); }
886886

887887
OpFoldResult ConstShapeOp::fold(FoldAdaptor adaptor) { return getValueAttr(); }
888888

mlir/lib/Dialect/Tosa/IR/TosaOps.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -352,7 +352,7 @@ static LogicalResult verifyConvOp(T op) {
352352

353353
LogicalResult tosa::ConstOp::verify() {
354354

355-
auto attrType = llvm::dyn_cast<TensorType>(getValueAttr().getType());
355+
auto attrType = llvm::dyn_cast<TensorType>(getValuesAttr().getType());
356356
auto outputType = llvm::dyn_cast<TensorType>(getOutput().getType());
357357

358358
if (!attrType || !outputType) {
@@ -3037,7 +3037,7 @@ LogicalResult tosa::ConstShapeOp::verify() {
30373037
auto valuesRank = getValue().getType().getRank();
30383038
if (valuesRank != 1)
30393039
return emitOpError("expect elements in attribute value with rank 1");
3040-
// check that number of elements in value attr equal to rank of result shape
3040+
// check that number of elements in values attr equal to rank of result shape
30413041
auto count = getValue().getNumElements();
30423042
auto rank = (cast<tosa::shapeType>(getResult().getType())).getRank();
30433043
if (!(count == rank || (count == 1 && rank == 0))) {

mlir/lib/Dialect/Tosa/Transforms/TosaFolders.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ struct ReduceConstantOptimization : public OpRewritePattern<OperationType> {
363363
return rewriter.notifyMatchFailure(op, "result type shape is not static");
364364

365365
auto reductionAxis = op.getAxis();
366-
const auto denseElementsAttr = constOp.getValue();
366+
const auto denseElementsAttr = constOp.getValues();
367367
const auto shapedOldElementsValues =
368368
cast<ShapedType>(denseElementsAttr.getType());
369369

mlir/lib/Dialect/Tosa/Transforms/TosaReduceTransposes.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -418,7 +418,7 @@ std::optional<Value> TosaReduceTransposes::buildMappedToValue(
418418
std::optional<Value> TosaReduceTransposes::buildMappedToValue(
419419
ConstOp constOp, const DenseMap<Value, Value> &valuesMap,
420420
IRRewriter &rewriter, ArrayRef<int32_t> hoistedPerms) {
421-
auto denseAttr = llvm::dyn_cast<DenseElementsAttr>(constOp.getValue());
421+
auto denseAttr = llvm::dyn_cast<DenseElementsAttr>(constOp.getValues());
422422
if (!denseAttr)
423423
return std::nullopt;
424424
auto maybeNewDenseAttr = transposeDenseAttribute(denseAttr, hoistedPerms);

mlir/test/Conversion/TosaToArith/tosa-to-arith.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
// CHECK-LABEL: func @const_test
55
func.func @const_test() -> (tensor<i32>) {
66
// CHECK: [[C3:%.+]] = arith.constant dense<3> : tensor<i32>
7-
%result = "tosa.const"() {value = dense<3> : tensor<i32>} : () -> tensor<i32>
7+
%result = "tosa.const"() {values = dense<3> : tensor<i32>} : () -> tensor<i32>
88

99
// CHECK: return [[C3]]
1010
return %result : tensor<i32>

mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir

Lines changed: 44 additions & 44 deletions
Large diffs are not rendered by default.

mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -472,7 +472,7 @@ func.func @test_simple_f32(%arg0: tensor<1xf32>) -> () {
472472

473473
// CHECK: linalg.generic
474474
// CHECK: arith.mulf
475-
%shift = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8>
475+
%shift = "tosa.const"() <{values = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8>
476476
%4 = tosa.mul %0, %1, %shift : (tensor<1xf32>, tensor<1xf32>, tensor<1xi8>) -> tensor<1xf32>
477477

478478
// CHECK: linalg.generic
@@ -619,7 +619,7 @@ func.func @test_simple_i16(%arg0: tensor<1xi16>) -> () {
619619
// CHECK: arith.extsi
620620
// CHECK: arith.extsi
621621
// CHECK: arith.muli
622-
%shift = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8>
622+
%shift = "tosa.const"() <{values = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8>
623623
%0 = tosa.mul %arg0, %arg0, %shift : (tensor<1xi16>, tensor<1xi16>, tensor<1xi8>) -> tensor<1xi32>
624624

625625
return
@@ -648,13 +648,13 @@ func.func @test_simple_i32(%arg0: tensor<1xi32>, %unsigned: tensor<1xui32>, %uns
648648

649649
// CHECK: linalg.generic
650650
// CHECK: arith.muli
651-
%shift1 = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8>
651+
%shift1 = "tosa.const"() <{values = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8>
652652
%2 = tosa.mul %arg0, %arg0, %shift1 : (tensor<1xi32>, tensor<1xi32>, tensor<1xi8>) -> tensor<1xi32>
653653

654654
// CHECK: linalg.generic
655655
// CHECK: arith.constant 2
656656
// CHECK: apply_scale
657-
%shift2 = "tosa.const"() <{value = dense<2> : tensor<1xi8>}> : () -> tensor<1xi8>
657+
%shift2 = "tosa.const"() <{values = dense<2> : tensor<1xi8>}> : () -> tensor<1xi8>
658658
%3 = tosa.mul %arg0, %arg0, %shift2: (tensor<1xi32>, tensor<1xi32>, tensor<1xi8>) -> tensor<1xi32>
659659

660660
// CHECK: linalg.generic

mlir/test/Conversion/TosaToSCF/tosa-to-scf.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ func.func @while_test(%arg0 : tensor<i32>) -> (tensor<i32>) {
66
// CHECK: [[WHILE:%.+]] = scf.while ([[ARG1:%.+]] = [[ARG0]])
77
%0 = tosa.while_loop (%arg1 = %arg0) : (tensor<i32>) -> tensor<i32> {
88
// CHECK: tosa.const
9-
%1 = "tosa.const"() {value = dense<3> : tensor<i32>} : () -> tensor<i32>
9+
%1 = "tosa.const"() {values = dense<3> : tensor<i32>} : () -> tensor<i32>
1010

1111
// CHECK: [[COMPARE:%.+]] = tosa.greater_equal
1212
%2 = tosa.greater_equal %1, %arg1 : (tensor<i32>, tensor<i32>) -> tensor<i1>
@@ -18,7 +18,7 @@ func.func @while_test(%arg0 : tensor<i32>) -> (tensor<i32>) {
1818
// CHECK: ^bb0([[ARG1:%.+]]: tensor<i32>)
1919
^bb0(%arg1: tensor<i32>):
2020
// CHECK: tosa.const
21-
%1 = "tosa.const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>
21+
%1 = "tosa.const"() {values = dense<1> : tensor<i32>} : () -> tensor<i32>
2222

2323
// CHECK: [[ADD:%.+]] = tosa.add
2424
%2 = tosa.add %arg1, %1 : (tensor<i32>, tensor<i32>) -> tensor<i32>

mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -498,7 +498,7 @@ func.func @slice_dyn(%arg0: tensor<?xf32>) -> (tensor<?xf32>) {
498498
// CHECK-SAME: (%[[ARG0:[0-9a-zA-Z_]*]]:
499499
func.func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
500500
%0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
501-
%pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
501+
%pad_const = "tosa.const"() {values = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
502502
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
503503
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
504504
// CHECK-DAG: [[INDEX3:%.+]] = arith.constant 3 : index
@@ -514,7 +514,7 @@ func.func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
514514

515515
func.func @pad_int(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
516516
%0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
517-
%pad_const = "tosa.const"() {value = dense<3> : tensor<1xi32>} : () -> tensor<1xi32>
517+
%pad_const = "tosa.const"() {values = dense<3> : tensor<1xi32>} : () -> tensor<1xi32>
518518
// CHECK: [[CST:%.+]] = arith.constant 3 : i32
519519
// CHECK: tensor.pad
520520
// CHECK: tensor.yield [[CST]]
@@ -525,7 +525,7 @@ func.func @pad_int(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
525525

526526
func.func @pad_quant(%arg0 : tensor<1x2xi32>) -> (tensor<4x9xi32>) {
527527
%0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
528-
%pad_const = "tosa.const"() {value = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
528+
%pad_const = "tosa.const"() {values = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
529529
// CHECK: [[CST:%.+]] = arith.constant 0 : i32
530530
// CHECK: tensor.pad
531531
// CHECK: tensor.yield [[CST]]
@@ -554,7 +554,7 @@ func.func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
554554

555555
func.func @pad_dyn_input(%arg0 : tensor<?x2xf32>) -> (tensor<?x9xf32>) {
556556
%0 = tosa.const_shape {value = dense<[1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
557-
%pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
557+
%pad_const = "tosa.const"() {values = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
558558
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant 1 : index
559559
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
560560
// CHECK-DAG: [[INDEX3:%.+]] = arith.constant 3 : index
@@ -570,7 +570,7 @@ func.func @pad_dyn_input(%arg0 : tensor<?x2xf32>) -> (tensor<?x9xf32>) {
570570

571571
func.func @pad_dyn_padding(%arg0 : tensor<1x2xf32>) -> (tensor<?x9xf32>) {
572572
%0 = tosa.const_shape {value = dense<[-1, 2, 3, 4]> : tensor<4xindex>} : () -> !tosa.shape<4>
573-
%pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
573+
%pad_const = "tosa.const"() {values = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
574574
// CHECK-DAG: [[INDEX1:%.+]] = arith.constant -1 : index
575575
// CHECK-DAG: [[INDEX2:%.+]] = arith.constant 2 : index
576576
// CHECK-DAG: [[INDEX3:%.+]] = arith.constant 3 : index

mlir/test/Dialect/Tosa/availability.mlir

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@ func.func @test_argmax(%arg0: tensor<14x19xf32>) -> tensor<14xi32> {
1919
func.func @test_avg_pool2d(%arg0: tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> {
2020
// CHECK: profiles: [ [pro_int, pro_fp] ]
2121
// CHECK: extensions: [ [int16, fp8e4m3, fp8e5m2, bf16] ]
22-
%input_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
23-
%output_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
22+
%input_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
23+
%output_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
2424
%0 = tosa.avg_pool2d %arg0, %input_zp, %output_zp {acc_type = f32, kernel = array<i64: 2, 2>, pad = array<i64: 0, 1, 0, 1>, stride = array<i64: 1, 1>} : (tensor<1x7x7x9xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x7x7x9xf32>
2525
return %0 : tensor<1x7x7x9xf32>
2626
}
@@ -30,8 +30,8 @@ func.func @test_avg_pool2d(%arg0: tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> {
3030
func.func @test_conv2d(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>) -> tensor<1x4x4x8xf32> {
3131
// CHECK: profiles: [ [pro_int, pro_fp] ]
3232
// CHECK: extensions: [ [int4, int16, fp8e4m3, fp8e5m2, bf16] ]
33-
%input_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
34-
%weight_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
33+
%input_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
34+
%weight_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
3535
%0 = tosa.conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = f32, dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>, local_bound = true} : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x4x8xf32>
3636
return %0 : tensor<1x4x4x8xf32>
3737
}
@@ -41,8 +41,8 @@ func.func @test_conv2d(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %
4141
func.func @test_conv3d(%arg0: tensor<1x4x8x21x17xf32>, %arg1: tensor<34x1x1x1x17xf32>, %arg2: tensor<34xf32>) -> tensor<1x4x8x21x34xf32> {
4242
// CHECK: profiles: [ [pro_int, pro_fp] ]
4343
// CHECK: extensions: [ [int4, int16, fp8e4m3, fp8e5m2, bf16] ]
44-
%input_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
45-
%weight_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
44+
%input_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
45+
%weight_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
4646
%0 = tosa.conv3d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = f32, dilation = array<i64: 1, 1, 1>, pad = array<i64: 0, 0, 0, 0, 0, 0>, stride = array<i64: 1, 1, 1>} : (tensor<1x4x8x21x17xf32>, tensor<34x1x1x1x17xf32>, tensor<34xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x8x21x34xf32>
4747
return %0 : tensor<1x4x8x21x34xf32>
4848
}
@@ -52,8 +52,8 @@ func.func @test_conv3d(%arg0: tensor<1x4x8x21x17xf32>, %arg1: tensor<34x1x1x1x17
5252
func.func @test_depthwise_conv2d(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<1x1x4x2xf32>, %arg2: tensor<8xf32>) -> tensor<1x4x4x8xf32> {
5353
// CHECK: profiles: [ [pro_int, pro_fp] ]
5454
// CHECK: extensions: [ [int4, int16, fp8e4m3, fp8e5m2, bf16] ]
55-
%input_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
56-
%weight_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
55+
%input_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
56+
%weight_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
5757
%0 = tosa.depthwise_conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = f32, dilation = array<i64: 1, 1>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>} : (tensor<1x4x4x4xf32>, tensor<1x1x4x2xf32>, tensor<8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x4x8xf32>
5858
return %0 : tensor<1x4x4x8xf32>
5959
}
@@ -99,8 +99,8 @@ func.func @test_rfft2d(%arg0: tensor<13x8x16xf32>) -> (tensor<13x8x9xf32>, tenso
9999
func.func @test_transpose_conv2d(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> {
100100
// CHECK: profiles: [ [pro_int, pro_fp] ]
101101
// CHECK: extensions: [ [int4, int16, fp8e4m3, fp8e5m2, bf16] ]
102-
%input_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
103-
%weight_zp = "tosa.const"() <{value = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
102+
%input_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
103+
%weight_zp = "tosa.const"() <{values = dense<0.0> : tensor<1xf32>}> : () -> tensor<1xf32>
104104
%0 = tosa.transpose_conv2d %arg0, %arg1, %arg2, %input_zp, %weight_zp {acc_type = f32, out_pad = array<i64: 0, 0, 0, 0>, out_shape = array<i64: 1, 32, 32, 16>, stride = array<i64: 1, 1>} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x32x32x16xf32>
105105
return %0 : tensor<1x32x32x16xf32>
106106
}
@@ -260,7 +260,7 @@ func.func @test_min(%arg0: tensor<13x21x3xf32>, %arg1: tensor<1x21x3xf32>) -> te
260260
// -----
261261
// CHECK-LABEL: mul
262262
func.func @test_mul(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x1x3xf32>) -> tensor<13x21x3xf32> {
263-
%shift = "tosa.const"() <{value = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8>
263+
%shift = "tosa.const"() <{values = dense<0> : tensor<1xi8>}> : () -> tensor<1xi8>
264264
// CHECK: profiles: [ [pro_int, pro_fp] ]
265265
// CHECK: extensions: [ [bf16] ]
266266
%0 = tosa.mul %arg0, %arg1, %shift : (tensor<13x21x3xf32>, tensor<13x1x3xf32>, tensor<1xi8>) -> tensor<13x21x3xf32>
@@ -514,7 +514,7 @@ func.func @test_concat(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -
514514
// CHECK-LABEL: pad
515515
func.func @test_pad(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
516516
%padding = tosa.const_shape {value = dense<0> : tensor<6xindex>} : () -> !tosa.shape<6>
517-
%pad_const = "tosa.const"() {value = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
517+
%pad_const = "tosa.const"() {values = dense<3.14> : tensor<1xf32>} : () -> tensor<1xf32>
518518
// CHECK: profiles: [ [pro_int, pro_fp] ]
519519
// CHECK: extensions: [ [fp8e4m3, fp8e5m2, bf16] ]
520520
%0 = tosa.pad %arg0, %padding, %pad_const : (tensor<13x21x3xf32>, !tosa.shape<6>, tensor<1xf32>) -> tensor<13x21x3xf32>
@@ -623,7 +623,7 @@ func.func @test_rescale(%arg0: tensor<13x21x3x!quant.uniform<u8:f32, 0.015655439
623623
func.func @test_const(%arg0 : index) -> tensor<4xi32> {
624624
// CHECK: profiles: [ [pro_int, pro_fp] ]
625625
// CHECK: extensions: [ [int4, int16, fp8e4m3, fp8e5m2, bf16] ]
626-
%0 = "tosa.const"() {value = dense<[3, 0, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
626+
%0 = "tosa.const"() {values = dense<[3, 0, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
627627
return %0 : tensor<4xi32>
628628
}
629629

@@ -654,7 +654,7 @@ func.func @test_cond_if(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<i1
654654
// -----
655655
// CHECK-LABEL: while_loop
656656
func.func @test_while_loop(%arg0: tensor<10xi32>, %arg1: tensor<i32>) {
657-
%0 = "tosa.const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
657+
%0 = "tosa.const"() {values = dense<0> : tensor<i32>} : () -> tensor<i32>
658658
// CHECK: profiles: [ ]
659659
// CHECK: extensions: [ [controlflow] ]
660660
%1:3 = tosa.while_loop (%arg2 = %0, %arg3 = %0, %arg4 = %arg0) : (tensor<i32>, tensor<i32>, tensor<10xi32>) -> (tensor<i32>, tensor<i32>, tensor<10xi32>) {
@@ -663,7 +663,7 @@ func.func @test_while_loop(%arg0: tensor<10xi32>, %arg1: tensor<i32>) {
663663
tosa.yield %3 : tensor<i1>
664664
} do {
665665
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<10xi32>):
666-
%2 = "tosa.const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>
666+
%2 = "tosa.const"() {values = dense<1> : tensor<i32>} : () -> tensor<i32>
667667
%3 = tosa.add %arg3, %2 : (tensor<i32>, tensor<i32>) -> tensor<i32>
668668
%7 = tosa.const_shape {value = dense<[1]> : tensor<1xindex>} : () -> !tosa.shape<1>
669669
%4 = tosa.reshape %2, %7 : (tensor<i32>, !tosa.shape<1>) -> tensor<1xi32>

0 commit comments

Comments
 (0)