@@ -25,22 +25,18 @@ func.func @drop_one_trip_loops(%arg0 : tensor<?x1x?xf32>, %arg1 : f32, %shape: t
25
25
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
26
26
// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1, d2) -> ()>
27
27
// CHECK-DAG: #[[$MAP3:.*]] = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
28
- // CHECK-DAG: #[[$MAP4:.*]] = affine_map<()[s0, s1] -> (s0 * s1)>
29
28
// CHECK-LABEL: func @drop_one_trip_loops
30
29
// CHECK: %[[C2:.*]] = arith.constant 2 : index
31
- // CHECK: %[[C1:.*]] = arith.constant 1 : index
32
30
// CHECK: %[[C0:.*]] = arith.constant 0 : index
33
31
// CHECK: tensor.collapse_shape %{{.*}} {{\[\[}}0, 1], [2]]
34
32
// CHECK: tensor.collapse_shape %{{.*}} {{\[\[}}0, 1], [2, 3], [4]]
35
33
// CHECK: linalg.generic
36
34
// CHECK-SAME: indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP3]]]
37
35
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]
38
36
// CHECK: %[[DIM:.*]] = tensor.dim %{{.*}}, %[[C0]]
39
- // CHECK: %[[VAL_1:.*]] = affine.apply #[[$MAP4]]()[%[[DIM]], %[[C1]]]
40
37
// CHECK: %[[DIM_1:.*]] = tensor.dim %{{.*}}, %[[C2]]
41
- // CHECK: %[[VAL_2:.*]] = affine.apply #[[$MAP4]]()[%[[DIM_1]], %[[C1]]]
42
38
// CHECK: %[[DIM_2:.*]] = tensor.dim %{{.*}}, %[[C2]]
43
- // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %{{.*}} {{\[\[}}0, 1], [2, 3], [4]] output_shape [%[[VAL_1 ]], 1, %[[VAL_2 ]], 1, %[[DIM_2]]] : tensor<?x?x?xf32> into tensor<?x1x?x1x?xf32>
39
+ // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %{{.*}} {{\[\[}}0, 1], [2, 3], [4]] output_shape [%[[DIM ]], 1, %[[DIM_1 ]], 1, %[[DIM_2]]] : tensor<?x?x?xf32> into tensor<?x1x?x1x?xf32>
44
40
45
41
// CHECK-SLICES-DAG: #[[$MAP1:.*]] = affine_map<(d0, d1, d2) -> (d0, d2)>
46
42
// CHECK-SLICES-DAG: #[[$MAP2:.*]] = affine_map<(d0, d1, d2) -> ()>
@@ -79,18 +75,15 @@ func.func @drop_one_trip_loops_all_ones(%arg0 : tensor<1x1x1xf32>, %arg1 : f32,
79
75
}
80
76
// CHECK-DAG: #[[$MAP1:.*]] = affine_map<(d0) -> ()>
81
77
// CHECK-DAG: #[[$MAP2:.*]] = affine_map<(d0) -> (d0)>
82
- // CHECK-DAG: #[[$MAP3:.*]] = affine_map<()[s0, s1, s2, s3, s4] -> ((((s0 * s1) * s2) * s3) * s4)>
83
78
// CHECK-LABEL: func @drop_one_trip_loops_all_ones
84
79
// CHECK: %[[C2:.*]] = arith.constant 2 : index
85
- // CHECK: %[[C1:.*]] = arith.constant 1 : index
86
80
// CHECK: tensor.collapse_shape %{{.*}} []
87
81
// CHECK: tensor.collapse_shape %{{.*}} {{\[}}[0, 1, 2, 3, 4]]
88
82
// CHECK: linalg.generic
89
83
// CHECK-SAME: indexing_maps = [#[[$MAP1]], #[[$MAP1]], #[[$MAP2]]]
90
84
// CHECK-SAME: iterator_types = ["parallel"]
91
85
// CHECK: %[[DIM:.*]] = tensor.dim %{{.*}}, %[[C2]] : tensor<1x1x?x1x1xf32>
92
- // CHECK: %[[SZ:.*]] = affine.apply #[[$MAP3]]()[%[[C1]], %[[C1]], %[[DIM]], %[[C1]], %[[C1]]]
93
- // CHECK: %[[EXPAND:.*]] = tensor.expand_shape %{{.*}} {{\[\[}}0, 1, 2, 3, 4]] output_shape [1, 1, %[[SZ]], 1, 1] : tensor<?xf32> into tensor<1x1x?x1x1xf32>
86
+ // CHECK: %[[EXPAND:.*]] = tensor.expand_shape %{{.*}} {{\[\[}}0, 1, 2, 3, 4]] output_shape [1, 1, %[[DIM]], 1, 1] : tensor<?xf32> into tensor<1x1x?x1x1xf32>
94
87
95
88
// -----
96
89
@@ -406,7 +399,6 @@ func.func @unit_dim_for_reduction(%arg0: tensor<1x?x1x?xf32>) -> tensor<1x?xf32>
406
399
}
407
400
// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1) -> (d0, d1)>
408
401
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0)>
409
- // CHECK-DAG: #[[MAP3:.+]] = affine_map<()[s0, s1, s2] -> ((s0 * s1) * s2)>
410
402
// CHECK: func @unit_dim_for_reduction
411
403
// CHECK-SAME: %[[ARG0:.+]]: tensor<1x?x1x?xf32>
412
404
// CHECK: %[[C1:.+]] = arith.constant 1 : index
@@ -422,8 +414,7 @@ func.func @unit_dim_for_reduction(%arg0: tensor<1x?x1x?xf32>) -> tensor<1x?xf32>
422
414
// CHECK-SAME: ins(%[[RESHAPE]] : tensor<?x?xf32>)
423
415
// CHECK-SAME: outs(%[[FILL]] : tensor<?xf32>)
424
416
// CHECK: %[[DIM_0:.*]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<1x?x1x?xf32>
425
- // CHECK: %[[VAL_3:.*]] = affine.apply #[[$MAP3]]()[%[[C1]], %[[DIM_0]], %[[C1]]]
426
- // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[GENERIC]] {{\[\[}}0, 1]] output_shape [1, %[[VAL_3]]] : tensor<?xf32> into tensor<1x?xf32>
417
+ // CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[GENERIC]] {{\[\[}}0, 1]] output_shape [1, %[[DIM_0]]] : tensor<?xf32> into tensor<1x?xf32>
427
418
// CHECK: return %[[EXPANDED]] : tensor<1x?xf32>
428
419
429
420
// -----
@@ -482,10 +473,8 @@ func.func @unit_dim_for_reduction_inner(%arg0: tensor<?x1x?x1xf32>) -> tensor<?x
482
473
}
483
474
// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1) -> (d0, d1)>
484
475
// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1) -> (d0)>
485
- // CHECK-DAG: #[[MAP3:.+]] = affine_map<()[s0, s1] -> (s0 * s1)>
486
476
// CHECK: func @unit_dim_for_reduction_inner
487
477
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x1x?x1xf32>
488
- // CHECK: %[[C1:.*]] = arith.constant 1 : index
489
478
// CHECK: %[[C0:.*]] = arith.constant 0 : index
490
479
// CHECK: %[[CST:.*]] = arith.constant 1.000000e+00 : f32
491
480
// CHECK: %[[C2:.*]] = arith.constant 2 : index
@@ -499,8 +488,7 @@ func.func @unit_dim_for_reduction_inner(%arg0: tensor<?x1x?x1xf32>) -> tensor<?x
499
488
// CHECK-SAME: ins(%[[RESHAPE]] : tensor<?x?xf32>)
500
489
// CHECK-SAME: outs(%[[FILL]] : tensor<?xf32>)
501
490
// CHECK: %[[DIM_0:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x1x?x1xf32>
502
- // CHECK: %[[VAL_3:.+]] = affine.apply #[[$MAP3]]()[%[[DIM_0]], %[[C1]]]
503
- // CHECK: %[[RESULT_RESHAPE:.+]] = tensor.expand_shape %[[RESULT]] {{\[}}[0, 1]] output_shape [%[[VAL_3]], 1] : tensor<?xf32> into tensor<?x1xf32>
491
+ // CHECK: %[[RESULT_RESHAPE:.+]] = tensor.expand_shape %[[RESULT]] {{\[}}[0, 1]] output_shape [%[[DIM_0]], 1] : tensor<?xf32> into tensor<?x1xf32>
504
492
// CHECK: return %[[RESULT_RESHAPE]]
505
493
506
494
// -----
@@ -1017,7 +1005,6 @@ func.func @drop_unit_pad_dynamic_dims(%arg0: tensor<1x?xf32>) -> tensor<1x?xf32>
1017
1005
return %0 : tensor <1 x?xf32 >
1018
1006
}
1019
1007
1020
- // CHECK-DAG: #[[$MAP:.+]] = affine_map<()[s0, s1] -> (s0 * s1)>
1021
1008
// CHECK-DAG: #[[$MAP1:.+]] = affine_map<()[s0] -> (s0 + 11)>
1022
1009
// CHECK-LABEL: func @drop_unit_pad_dynamic_dims
1023
1010
// CHECK: %[[C1:.*]] = arith.constant 1 : index
@@ -1027,8 +1014,7 @@ func.func @drop_unit_pad_dynamic_dims(%arg0: tensor<1x?xf32>) -> tensor<1x?xf32>
1027
1014
// CHECK: %[[PADDED:.+]] = tensor.pad %[[COLLAPSE]] low[5] high[6]
1028
1015
// CHECK: } : tensor<?xf32> to tensor<?xf32>
1029
1016
// CHECK: %[[DIM:.+]] = tensor.dim %{{.*}}, %[[C1]] : tensor<1x?xf32>
1030
- // CHECK: %[[VAL_0:.+]] = affine.apply #[[$MAP]]()[%[[C1]], %[[DIM]]]
1031
- // CHECK: %[[VAL_1:.+]] = affine.apply #[[$MAP1]]()[%[[VAL_0]]]
1017
+ // CHECK: %[[VAL_1:.+]] = affine.apply #[[$MAP1]]()[%[[DIM]]]
1032
1018
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[PADDED]] {{\[\[}}0, 1]] output_shape [1, %[[VAL_1]]] : tensor<?xf32> into tensor<1x?xf32>
1033
1019
1034
1020
// CHECK-SLICES: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 + 11)>
@@ -1090,20 +1076,17 @@ func.func @drop_known_unit_constant_low_high(%arg0: tensor<1x383x128xf32>) -> te
1090
1076
1091
1077
// -----
1092
1078
1093
- // CHECK: #[[$MAP0:.+]] = affine_map<()[s0, s1] -> (s0 * s1)>
1094
1079
// CHECK: #[[$MAP1:.+]] = affine_map<(d0) -> (0, d0)>
1095
1080
// CHECK: #[[$MAP2:.+]] = affine_map<(d0) -> ()>
1096
1081
1097
1082
// CHECK-LABEL: func @drop_unit_dim_corresponding_to_dynamic_dim
1098
1083
// CHECK-SAME: %[[ARG0:.*]]: tensor<1x?x?x1xf32>,
1099
1084
// CHECK-SAME: %[[ARG1:.*]]: index) -> tensor<?x1x61x1xf32> {
1100
1085
// CHECK: %[[VAL_0:.*]] = arith.constant 0 : index
1101
- // CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
1102
1086
// CHECK: %[[VAL_2:.*]] = arith.constant dense<1.000000e+00> : tensor<f32>
1103
1087
// CHECK: %[[VAL_3:.*]] = tensor.collapse_shape %[[ARG0]] {{\[\[}}0, 1], [2, 3]] : tensor<1x?x?x1xf32> into tensor<?x?xf32>
1104
1088
// CHECK: %[[VAL_4:.*]] = tensor.empty(%[[ARG1]]) : tensor<?x61xf32>
1105
- // CHECK: %[[VAL_5:.*]] = affine.apply #[[$MAP0]](){{\[}}%[[ARG1]], %[[VAL_1]]]
1106
- // CHECK: %[[VAL_6:.*]] = tensor.empty(%[[VAL_5]]) : tensor<?x61xf32>
1089
+ // CHECK: %[[VAL_6:.*]] = tensor.empty(%[[ARG1]]) : tensor<?x61xf32>
1107
1090
// CHECK: %[[VAL_7:.*]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel"]} ins(%[[VAL_3]], %[[VAL_2]], %[[VAL_4]] : tensor<?x?xf32>, tensor<f32>, tensor<?x61xf32>) outs(%[[VAL_6]] : tensor<?x61xf32>) {
1108
1091
// CHECK: ^bb0(%[[VAL_8:.*]]: f32, %[[VAL_9:.*]]: f32, %[[VAL_10:.*]]: f32, %[[VAL_11:.*]]: f32):
1109
1092
// CHECK: %[[VAL_12:.*]] = arith.mulf %[[VAL_8]], %[[VAL_9]] : f32
0 commit comments