|
32 | 32 | dtype_abbrs,
|
33 | 33 | IS_MACOS,
|
34 | 34 | IS_X86,
|
35 |
| - is_navi_arch, |
36 | 35 | skipCUDAMemoryLeakCheckIf,
|
37 | 36 | skipIfCrossRef,
|
38 | 37 | skipIfTorchDynamo,
|
@@ -204,19 +203,6 @@ def format_op(op):
|
204 | 203 | # Tensors are not alike
|
205 | 204 | inductor_skips["cuda"]["logcumsumexp"] = {f32}
|
206 | 205 | inductor_skips["cuda"]["special.modified_bessel_i1"] = {f64}
|
207 |
| - if is_navi_arch(): |
208 |
| - inductor_skips["cuda"]["aminmax"] = {b8, f16, f32, f64, i32, i64} |
209 |
| - inductor_skips["cuda"]["dist"] = {b8, f16, f32, f64, i32, i64} |
210 |
| - inductor_skips["cuda"]["kron"] = {b8, f16, f32, f64, i32, i64} |
211 |
| - inductor_skips["cuda"]["masked.std"] = {b8, f16, f32, f64, i32, i64} |
212 |
| - inductor_skips["cuda"]["masked.var"] = {b8, f16, f32, f64, i32, i64} |
213 |
| - inductor_skips["cuda"][("max", "reduction_no_dim")] = {b8, f16, f32, f64, i32, i64} |
214 |
| - inductor_skips["cuda"][("min", "reduction_no_dim")] = {b8, f16, f32, f64, i32, i64} |
215 |
| - inductor_skips["cuda"]["nn.functional.conv_transpose3d"] = {b8, f16, f32, f64, i32, i64} |
216 |
| - inductor_skips["cuda"]["std"] = {b8, f16, f32, f64, i32, i64} |
217 |
| - inductor_skips["cuda"]["std_mean"] = {b8, f16, f32, f64, i32, i64} |
218 |
| - inductor_skips["cuda"]["var"] = {b8, f16, f32, f64, i32, i64} |
219 |
| - inductor_skips["cuda"]["var_mean"] = {b8, f16, f32, f64, i32, i64} |
220 | 206 |
|
221 | 207 | inductor_expected_failures_single_sample = defaultdict(dict)
|
222 | 208 |
|
|
0 commit comments