@@ -199,31 +199,3 @@ func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
199
199
// CHECK-SAME: ins(%[[ARG0]], %[[ARG1]]
200
200
// CHECK: %[[RESULT:.+]] = linalg.tensor_collapse_shape %[[NOFUSE]]
201
201
// CHECK: return %[[RESULT]]
202
-
203
-
204
- // -----
205
-
206
- func @generic_op_permultation_reshape_consumer_fusion_unused_dim (%arg0 : tensor <6 x1 xf32 >) -> tensor <6 xi32 > {
207
- %0 = linalg.init_tensor [6 , 1 ] : tensor <6 x1 xi32 >
208
- %1 = linalg.generic {index ing_maps = [affine_map <(d0 , d1 ) -> (d0 , d1 )>,
209
- affine_map <(d0 , d1 ) -> (d0 , d1 )>],
210
- iterator_types = [" parallel" , " parallel" ]}
211
- ins (%arg0 : tensor <6 x1 xf32 >) outs (%0 : tensor <6 x1 xi32 >) {
212
- ^bb0 (%arg3: f32 , %arg4: i32 ): // no predecessors
213
- %5 = arith.fptosi %arg3 : f32 to i32
214
- linalg.yield %5 : i32
215
- } -> tensor <6 x1 xi32 >
216
- %6 = linalg.tensor_collapse_shape %1 [[0 , 1 ]] : tensor <6 x1 xi32 > into tensor <6 xi32 >
217
- return %6 : tensor <6 xi32 >
218
- }
219
- // CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)>
220
- // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1) -> (d0)>
221
- // CHECK: func @generic_op_permultation_reshape_consumer_fusion_unused_dim
222
- // CHECK-SAME: %[[ARG0:.+]]: tensor<6x1xf32>
223
- // CHECK: %[[T0:.+]] = linalg.init_tensor [6, 1]
224
- // CHECK: %[[T1:.+]] = linalg.tensor_collapse_shape %[[T0]]
225
- // CHECK-SAME: [0, 1]
226
- // CHECK: linalg.generic
227
- // CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]]]
228
- // CHECK-SAME: ins(%[[ARG0]] : tensor<6x1xf32>)
229
- // CHECK-SAME: outs(%[[T1]] : tensor<6xi32>)
0 commit comments