Skip to content
This repository was archived by the owner on Aug 7, 2024. It is now read-only.

Commit d4ade87

Browse files
alugoreyfacebook-github-bot
authored andcommitted
Unskip passing torch.compile test (#286)
Summary: drisspg Looks like this test was originally failing on my end due to an environmental issue. Pushing this, and assuming it passes internal CI, should be fine Pull Request resolved: #286 Reviewed By: y-sq Differential Revision: D58887341 Pulled By: drisspg fbshipit-source-id: e1d6592944a29735002c6c5a57e83ea1c82a87f3
1 parent 0bd374d commit d4ade87

File tree

1 file changed

+1
-3
lines changed

1 file changed

+1
-3
lines changed

test/test_compile.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
sync_float8_amax_and_scale_history,
2323
)
2424
from float8_experimental.float8_tensor import Float8Tensor, ScaledMMConfig
25-
from float8_experimental.float8_utils import e4m3_dtype, IS_ROCM
25+
from float8_experimental.float8_utils import e4m3_dtype
2626

2727
from torch._dynamo.test_case import TestCase as DynamoTestCase
2828
from torch._dynamo.testing import CompileCounterWithBackend
@@ -128,14 +128,12 @@ def forward(self, x):
128128
return x_fp8
129129

130130
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
131-
@unittest.skipIf(IS_ROCM, "test doesn't currently work on the ROCm stack")
132131
def test_float8_with_graph_break_in_the_middle(self):
133132
"""Test that having Float8Tensor object at the boundary of a subgraph"""
134133
cnts = CompileCounterWithBackend("inductor")
135134
mod = self.MockLinear(graph_break=True).cuda()
136135
compiled_mod = copy.deepcopy(mod)
137136
compiled_mod = torch.compile(compiled_mod, backend=cnts)
138-
torch.manual_seed(0)
139137
x = torch.randn(16, 16, device="cuda")
140138
y_eager = mod(x)
141139
y_compiled = compiled_mod(x)

0 commit comments

Comments
 (0)