Skip to content

Commit c35065e

Browse files
committed
follow-up f8 scaled_gemm, skip some more failing tests
1 parent 332b505 commit c35065e

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

test/test_matmul_cuda.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,9 @@ def test_float8_basics(self, device) -> None:
263263
def test_float8_out_dtype(self, device) -> None:
264264
self._test_tautological_mm(device, size=64, out_dtype=torch.float16)
265265
self._test_tautological_mm(device, size=96, out_dtype=torch.float32)
266-
self._test_tautological_mm(device, size=80, out_dtype=torch.bfloat16)
266+
# hipblaslt does not yet support bfloat16 output
267+
if torch.version.hip is None:
268+
self._test_tautological_mm(device, size=80, out_dtype=torch.bfloat16)
267269
with self.assertRaises(RuntimeError):
268270
self._test_tautological_mm(device, out_dtype=e5m2_type)
269271

0 commit comments

Comments
 (0)