@@ -1804,7 +1804,6 @@ def fn(x):
1804
1804
# make sure things also work if they aren't unrolled
1805
1805
self .common (fn , (torch .randn (8 , 3 ),))
1806
1806
1807
- @skipIfRocmArch (NAVI_ARCH )
1808
1807
def test_multilayer_sum_low_prec (self ):
1809
1808
# fp16 nyi for cpu
1810
1809
if self .device == "cpu" :
@@ -1815,7 +1814,6 @@ def fn(a):
1815
1814
1816
1815
self .common (fn , ((torch .rand ((10 , 3 , 352 , 352 ), dtype = torch .float16 ),)))
1817
1816
1818
- @skipIfRocmArch (NAVI_ARCH )
1819
1817
def test_multilayer_prime_size (self ):
1820
1818
def fn (a ):
1821
1819
return torch .max (a ), torch .sum (a )
@@ -1827,7 +1825,6 @@ def fn(a):
1827
1825
1828
1826
@skip_if_gpu_halide
1829
1827
@skipCPUIf (IS_MACOS , "fails on macos" )
1830
- @skipIfRocmArch (NAVI_ARCH )
1831
1828
def test_multilayer_var (self ):
1832
1829
def fn (a ):
1833
1830
return torch .var (a )
@@ -2939,8 +2936,6 @@ def fn(a, b):
2939
2936
2940
2937
self .common (fn , (torch .randn (8 , 8 ), torch .randn (8 , 8 )))
2941
2938
2942
- @skip_if_halide # only 32-bit indexing
2943
- @skipIfRocmArch (NAVI_ARCH )
2944
2939
def test_large_tensor_reduction (self ):
2945
2940
if not _has_sufficient_memory (self .device , 4.5 * 1024 ** 3 ): # 4.5 GiB
2946
2941
raise unittest .SkipTest ("insufficient memory" )
@@ -2961,8 +2956,6 @@ def fn(a):
2961
2956
expect = torch .tensor (2 , dtype = torch .int8 , device = self .device )
2962
2957
self .assertEqual (actual , expect )
2963
2958
2964
- @skip_if_gpu_halide # only 32-bit indexing
2965
- @skipIfRocmArch (NAVI_ARCH )
2966
2959
def test_large_broadcast_reduction (self ):
2967
2960
if self .device == "cpu" :
2968
2961
raise unittest .SkipTest ("Fails on CPU" )
@@ -4113,7 +4106,6 @@ def test_conv2d_channels_last(self):
4113
4106
check_lowp = False ,
4114
4107
)
4115
4108
4116
- @skipIfRocmArch (NAVI_ARCH )
4117
4109
def test_conv2d_backward_channels_last (self ):
4118
4110
def fn (grad_output , inp , weight ):
4119
4111
convolution_backward_8 = torch .ops .aten .convolution_backward .default (
@@ -4898,8 +4890,6 @@ def fn(x, y):
4898
4890
self .assertEqual (a .stride (), c .stride ())
4899
4891
self .assertEqual (c .stride ()[2 ], 1 )
4900
4892
4901
- @skip_if_gpu_halide
4902
- @skipIfRocmArch (NAVI_ARCH )
4903
4893
def test_std (self ):
4904
4894
def fn (x ):
4905
4895
return (
@@ -4942,7 +4932,6 @@ def test_batch_norm_2d(self):
4942
4932
4943
4933
# From yolov3
4944
4934
@with_tf32_off
4945
- @skipIfRocmArch (NAVI_ARCH )
4946
4935
def test_batch_norm_2d_2 (self ):
4947
4936
if self .device == "cpu" :
4948
4937
raise unittest .SkipTest (f"requires { GPU_TYPE } " )
@@ -5079,18 +5068,6 @@ def fn(x):
5079
5068
5080
5069
self .common (fn , (x ,))
5081
5070
5082
- def test_polar (self ):
5083
- def fn (dist , angle ):
5084
- return torch .polar (dist , angle )
5085
-
5086
- inp = (
5087
- torch .tensor ([1 , 2 ], dtype = torch .float64 ),
5088
- torch .tensor ([np .pi / 2 , 5 * np .pi / 4 ], dtype = torch .float64 ),
5089
- )
5090
- self .common (fn , (* inp ,))
5091
-
5092
- @skip_if_gpu_halide # incorrect result on CUDA
5093
- @skipIfRocmArch (NAVI_ARCH )
5094
5071
def test_cauchy (self ):
5095
5072
def fn (x , y ):
5096
5073
return torch .sum (1 / (torch .unsqueeze (x , - 1 ) - y ))
@@ -6491,7 +6468,6 @@ def fn(a):
6491
6468
y = fn_compiled (x )
6492
6469
self .assertTrue (y is not x )
6493
6470
6494
- @skipIfRocmArch (NAVI_ARCH )
6495
6471
def test_l1_loss (self ):
6496
6472
def fn (a , b ):
6497
6473
return torch .nn .functional .l1_loss (a , b ), torch .nn .functional .mse_loss (a , b )
@@ -6899,7 +6875,6 @@ def fn(x):
6899
6875
fn , (torch .tensor ([1 , float ("inf" ), 2 , float ("-inf" ), float ("nan" )]),)
6900
6876
)
6901
6877
6902
- @skipIfRocmArch (NAVI_ARCH )
6903
6878
def test_any (self ):
6904
6879
def fn (x ):
6905
6880
return (
@@ -7652,7 +7627,6 @@ def fn(a, dim, index, b, reduce):
7652
7627
7653
7628
@skip_if_gpu_halide
7654
7629
# issue #1150
7655
- @skipIfRocmArch (NAVI_ARCH )
7656
7630
def test_dense_mask_index (self ):
7657
7631
r"""
7658
7632
There will be a little difference for reduce order between aten and inductor
@@ -8662,7 +8636,6 @@ def fn(a, b):
8662
8636
b = torch .rand (2 , 2 , 1 , 4 , 1 ).int ()
8663
8637
self .common (fn , (a , b ))
8664
8638
8665
- @skipIfRocmArch (NAVI_ARCH )
8666
8639
def test_argmax_argmin1 (self ):
8667
8640
def fn (x ):
8668
8641
return (aten .argmax (x ), aten .argmin (x ))
@@ -8674,7 +8647,6 @@ def fn(x):
8674
8647
],
8675
8648
)
8676
8649
8677
- @skipIfRocmArch (NAVI_ARCH )
8678
8650
def test_argmax_argmin2 (self ):
8679
8651
def fn (x ):
8680
8652
return (
@@ -8686,7 +8658,6 @@ def fn(x):
8686
8658
8687
8659
self .common (fn , (torch .randn ([144 , 144 ]),))
8688
8660
8689
- @skipIfRocmArch (NAVI_ARCH )
8690
8661
def test_argmax_argmin_with_duplicates (self ):
8691
8662
def fn (x ):
8692
8663
return (
@@ -8708,8 +8679,6 @@ def fn(x):
8708
8679
t1 = torch .randint (8 , size = (1028 , 1028 ))
8709
8680
self .common (fn , (t1 ,))
8710
8681
8711
- @skip_if_halide # nan behavior
8712
- @skipIfRocmArch (NAVI_ARCH )
8713
8682
def test_argmax_argmin_with_nan (self ):
8714
8683
def fn (x ):
8715
8684
return (
@@ -8833,7 +8802,6 @@ def fn(x):
8833
8802
],
8834
8803
)
8835
8804
8836
- @skipIfRocmArch (NAVI_ARCH )
8837
8805
def test_tmp_not_defined_issue1 (self ):
8838
8806
def forward (
8839
8807
primals_3 ,
@@ -9234,7 +9202,6 @@ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
9234
9202
else :
9235
9203
self .assertEqual (len (inps ), 0 )
9236
9204
9237
- @skipIfRocmArch (NAVI_ARCH )
9238
9205
def test_dtype_mismatch_issue (self ):
9239
9206
def fn (x ):
9240
9207
attn = torch .nn .functional .pad (x , [0 , 1 ])
@@ -12235,7 +12202,6 @@ def test_rnn_compile_safe(self):
12235
12202
12236
12203
class NanCheckerTest (TestCase ):
12237
12204
@config .patch ("nan_asserts" , True )
12238
- @skipIfRocmArch (NAVI_ARCH )
12239
12205
def test_nan_checker_pass (self ):
12240
12206
def f (x ):
12241
12207
return torch .softmax (x , dim = - 1 )
0 commit comments