@@ -469,10 +469,10 @@ func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_s
469
469
//CHECK-LABEL: func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_sizes(
470
470
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?x?xf32>, %[[ARG1:.+]]: tensor<?x?x?x?xf32>,
471
471
// CHECK-SAME: %[[ARG2:.+]]: tensor<?x?x?x?x?x?x?xf32>, %[[ARG3:.+]]: index, %[[ARG4:.+]]: index, %[[ARG5:.+]]: index) -> tensor<?x?x?x?x?x?x?xf32> {
472
- // CHECK: %[[C0:.+]] = arith.constant 0 : index
473
- // CHECK: %[[C1:.+]] = arith.constant 1 : index
474
- // CHECK: %[[C2:.+]] = arith.constant 2 : index
475
- // CHECK: %[[C3:.+]] = arith.constant 3 : index
472
+ // CHECK-DAG : %[[C0:.+]] = arith.constant 0 : index
473
+ // CHECK-DAG : %[[C1:.+]] = arith.constant 1 : index
474
+ // CHECK-DAG : %[[C2:.+]] = arith.constant 2 : index
475
+ // CHECK-DAG : %[[C3:.+]] = arith.constant 3 : index
476
476
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?x?x?xf32>
477
477
// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?x?x?xf32>
478
478
// CHECK: %[[DIM1:.+]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<?x?x?x?xf32>
@@ -509,8 +509,8 @@ func.func @linalg_transpose_tensor_pack_multiple_tiles(%arg0: tensor<?x32x128xbf
509
509
// CHECK: #[[map:.+]] = affine_map<()[s0] -> (s0 ceildiv 16)>
510
510
//CHECK-LABEL: func.func @linalg_transpose_tensor_pack_multiple_tiles(
511
511
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x32x128xbf16>) -> tensor<32x?x64x16x2xbf16> {
512
- // CHECK: %[[C0:.+]] = arith.constant 0 : index
513
- // CHECK: %[[CST:.+]] = arith.constant 0.000000e+00 : bf16
512
+ // CHECK-DAG : %[[C0:.+]] = arith.constant 0 : index
513
+ // CHECK-DAG : %[[CST:.+]] = arith.constant 0.000000e+00 : bf16
514
514
// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x32x128xbf16>
515
515
// CHECK: %[[VAL0:.+]] = affine.apply #[[map:.+]]()[%[[DIM]]]
516
516
// CHECK: %[[VAL1:.+]] = tensor.empty(%[[VAL0]]) : tensor<32x?x64x16x2xbf16>
0 commit comments