We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 87dda1b commit d4e4d41Copy full SHA for d4e4d41
test/inductor/test_kernel_benchmark.py
@@ -12,7 +12,7 @@
12
from torch._inductor.utils import fresh_inductor_cache
13
from torch.testing import FileCheck
14
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
15
-
+from torch.testing._internal.common_utils import skipIfRocm
16
17
class TestKernelBenchmark(TestCase):
18
@classmethod
@@ -97,6 +97,7 @@ def f(a, b):
97
98
@config.patch(max_autotune=True, max_autotune_gemm_backends="TRITON")
99
@fresh_inductor_cache()
100
+ @skipIfRocm #This seems to be disabled upstream https://github.com/pytorch/pytorch/issues/118346
101
def test_mm_triton_kernel_benchmark(self):
102
M = 2048
103
N = 2432
0 commit comments