Skip to content

Commit 7c3b97c

Browse files
authored
Unskip Navi inductor UTs
1 parent ed9a87a commit 7c3b97c

File tree

1 file changed

+0
-19
lines changed

1 file changed

+0
-19
lines changed

test/inductor/test_torchinductor.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1654,7 +1654,6 @@ def fn(x):
16541654
# make sure things also work if they aren't unrolled
16551655
self.common(fn, (torch.randn(8, 3),))
16561656

1657-
@skipIfRocmArch(NAVI_ARCH)
16581657
def test_multilayer_sum_low_prec(self):
16591658
# fp16 nyi for cpu
16601659
if self.device == "cpu":
@@ -1665,7 +1664,6 @@ def fn(a):
16651664

16661665
self.common(fn, ((torch.rand((10, 3, 352, 352), dtype=torch.float16),)))
16671666

1668-
@skipIfRocmArch(NAVI_ARCH)
16691667
def test_multilayer_prime_size(self):
16701668
def fn(a):
16711669
return torch.max(a), torch.sum(a)
@@ -1676,7 +1674,6 @@ def fn(a):
16761674
self.common(fn, (sample,))
16771675

16781676
@skipCPUIf(IS_MACOS, "fails on macos")
1679-
@skipIfRocmArch(NAVI_ARCH)
16801677
def test_multilayer_var(self):
16811678
def fn(a):
16821679
return torch.var(a)
@@ -2676,7 +2673,6 @@ def fn(a, b):
26762673

26772674
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
26782675

2679-
@skipIfRocmArch(NAVI_ARCH)
26802676
def test_large_tensor_reduction(self):
26812677
if not _has_sufficient_memory(self.device, 4.5 * 1024**3): # 4.5 GiB
26822678
raise unittest.SkipTest("insufficient memory")
@@ -2697,7 +2693,6 @@ def fn(a):
26972693
expect = torch.tensor(2, dtype=torch.int8, device=self.device)
26982694
self.assertEqual(actual, expect)
26992695

2700-
@skipIfRocmArch(NAVI_ARCH)
27012696
def test_large_broadcast_reduction(self):
27022697
if self.device == "cpu":
27032698
raise unittest.SkipTest("Fails on CPU")
@@ -3772,7 +3767,6 @@ def test_conv2d_channels_last(self):
37723767
check_lowp=False,
37733768
)
37743769

3775-
@skipIfRocmArch(NAVI_ARCH)
37763770
def test_conv2d_backward_channels_last(self):
37773771
def fn(grad_output, inp, weight):
37783772
convolution_backward_8 = torch.ops.aten.convolution_backward.default(
@@ -4532,7 +4526,6 @@ def fn(x, y):
45324526
self.assertEqual(a.stride(), c.stride())
45334527
self.assertEqual(c.stride()[2], 1)
45344528

4535-
@skipIfRocmArch(NAVI_ARCH)
45364529
def test_std(self):
45374530
def fn(x):
45384531
return (
@@ -4575,7 +4568,6 @@ def test_batch_norm_2d(self):
45754568

45764569
# From yolov3
45774570
@with_tf32_off
4578-
@skipIfRocmArch(NAVI_ARCH)
45794571
def test_batch_norm_2d_2(self):
45804572
if self.device == "cpu":
45814573
raise unittest.SkipTest(f"requires {GPU_TYPE}")
@@ -4711,7 +4703,6 @@ def fn(x):
47114703

47124704
self.common(fn, (x,))
47134705

4714-
@skipIfRocmArch(NAVI_ARCH)
47154706
def test_cauchy(self):
47164707
def fn(x, y):
47174708
return torch.sum(1 / (torch.unsqueeze(x, -1) - y))
@@ -6049,7 +6040,6 @@ def fn(a):
60496040
y = fn_compiled(x)
60506041
self.assertTrue(y is not x)
60516042

6052-
@skipIfRocmArch(NAVI_ARCH)
60536043
def test_l1_loss(self):
60546044
def fn(a, b):
60556045
return torch.nn.functional.l1_loss(a, b), torch.nn.functional.mse_loss(a, b)
@@ -6447,7 +6437,6 @@ def fn(x):
64476437
fn, (torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]),)
64486438
)
64496439

6450-
@skipIfRocmArch(NAVI_ARCH)
64516440
def test_any(self):
64526441
def fn(x):
64536442
return (
@@ -7196,7 +7185,6 @@ def fn(a, dim, index, b, reduce):
71967185
)
71977186

71987187
# issue #1150
7199-
@skipIfRocmArch(NAVI_ARCH)
72007188
def test_dense_mask_index(self):
72017189
r"""
72027190
There will be a little difference for reduce order between aten and inductor
@@ -8172,7 +8160,6 @@ def fn(a, b):
81728160
b = torch.rand(2, 2, 1, 4, 1).int()
81738161
self.common(fn, (a, b))
81748162

8175-
@skipIfRocmArch(NAVI_ARCH)
81768163
def test_argmax_argmin1(self):
81778164
def fn(x):
81788165
return (aten.argmax(x), aten.argmin(x))
@@ -8184,7 +8171,6 @@ def fn(x):
81848171
],
81858172
)
81868173

8187-
@skipIfRocmArch(NAVI_ARCH)
81888174
def test_argmax_argmin2(self):
81898175
def fn(x):
81908176
return (
@@ -8196,7 +8182,6 @@ def fn(x):
81968182

81978183
self.common(fn, (torch.randn([144, 144]),))
81988184

8199-
@skipIfRocmArch(NAVI_ARCH)
82008185
def test_argmax_argmin_with_duplicates(self):
82018186
def fn(x):
82028187
return (
@@ -8218,7 +8203,6 @@ def fn(x):
82188203
t1 = torch.randint(8, size=(1028, 1028))
82198204
self.common(fn, (t1,))
82208205

8221-
@skipIfRocmArch(NAVI_ARCH)
82228206
def test_argmax_argmin_with_nan(self):
82238207
def fn(x):
82248208
return (
@@ -8351,7 +8335,6 @@ def fn(x):
83518335
],
83528336
)
83538337

8354-
@skipIfRocmArch(NAVI_ARCH)
83558338
def test_tmp_not_defined_issue1(self):
83568339
def forward(
83578340
primals_3,
@@ -8746,7 +8729,6 @@ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
87468729
else:
87478730
self.assertEqual(len(inps), 0)
87488731

8749-
@skipIfRocmArch(NAVI_ARCH)
87508732
def test_dtype_mismatch_issue(self):
87518733
def fn(x):
87528734
attn = torch.nn.functional.pad(x, [0, 1])
@@ -11414,7 +11396,6 @@ def test_rnn_compile_safe(self):
1141411396

1141511397
class NanCheckerTest(TestCase):
1141611398
@config.patch("nan_asserts", True)
11417-
@skipIfRocmArch(NAVI_ARCH)
1141811399
def test_nan_checker_pass(self):
1141911400
def f(x):
1142011401
return torch.softmax(x, dim=-1)

0 commit comments

Comments
 (0)