Skip to content
This repository was archived by the owner on Aug 7, 2024. It is now read-only.

Commit 73fd168

Browse files
weifengpyfacebook-github-bot
authored andcommitted
fix linter error in CI (#313)
Summary: `pre-commit run --all-files` complains about linter error from trunk. fix it in this PR Pull Request resolved: #313 Reviewed By: drisspg Differential Revision: D59562565 Pulled By: weifengpy fbshipit-source-id: b276413d2a6b25632690d59ea8d4b3f5b680a66a
1 parent 13d3198 commit 73fd168

12 files changed

+12
-19
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ m = Model(...)
7171
# convert all `torch.nn.Linear` modules to `Float8Linear`, specifying scaling
7272
# type
7373
swap_linear_with_float8_linear(
74-
m,
74+
m,
7575
Float8Linear,
7676
scaling_type_x=TensorScalingType.DELAYED,
7777
scaling_type_w=TensorScalingType.DELAYED,

benchmarks/bench_multi_gpu.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
import torch.multiprocessing as mp
1515
import torch.nn as nn
1616
import torch.utils.benchmark as benchmark
17-
from float8_experimental.float8_linear import Float8Linear, TensorScalingType
17+
from float8_experimental.float8_linear import TensorScalingType
1818
from float8_experimental.float8_linear_utils import (
1919
swap_linear_with_float8_linear,
2020
sync_float8_amax_and_scale_history,

benchmarks/profile_linear_float8.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
import torch
1919
import torch.nn as nn
2020
import torch.nn.functional as F
21-
from float8_experimental.float8_linear import Float8Linear, TensorScalingType
21+
from float8_experimental.float8_linear import TensorScalingType
2222
from float8_experimental.float8_linear_utils import (
2323
linear_requires_sync,
2424
swap_linear_with_float8_linear,

float8_experimental/float8_dynamic_utils.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,7 @@
99

1010
from typing import Any, Optional, Tuple
1111

12-
import float8_experimental.config as config
13-
1412
import torch
15-
import torch.nn as nn
1613
import torch.utils._pytree as pytree
1714

1815
from float8_experimental.float8_tensor import (

float8_experimental/float8_linear_utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,8 @@
33
#
44
# This source code is licensed under the BSD 3-Clause license found in the
55
# LICENSE file in the root directory of this source tree.
6-
import copy
76
import logging
8-
from enum import auto, Enum
9-
from typing import Callable, List, Optional, Type, Union
7+
from typing import Callable, List, Optional
108

119
import torch
1210
import torch.distributed as dist

test/test_dtensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import torch.nn.functional as F
1616

1717
from float8_experimental.float8_dynamic_utils import NoopFwToFloat8E5M2Bw
18-
from float8_experimental.float8_linear import Float8Linear, TensorScalingType
18+
from float8_experimental.float8_linear import TensorScalingType
1919
from float8_experimental.float8_linear_utils import swap_linear_with_float8_linear
2020
from float8_experimental.float8_tensor import Float8Tensor, ScaledMMConfig
2121
from float8_experimental.float8_tensor_parallel import (

test/test_fsdp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
import torch.distributed as dist
2222
import torch.multiprocessing as mp
2323
import torch.nn as nn
24-
from float8_experimental.float8_linear import Float8Linear, TensorScalingType
24+
from float8_experimental.float8_linear import TensorScalingType
2525
from float8_experimental.float8_linear_utils import (
2626
linear_requires_sync,
2727
swap_linear_with_float8_linear,
@@ -149,7 +149,7 @@ def forward_backward(model, optim, is_fp8, i):
149149
model_fp8 = torch.compile(model_fp8)
150150
y_local = forward_backward(model, optimizer, is_fp8=False, i=i)
151151
y_local_fp8 = forward_backward(model_fp8, optimizer_fp8, is_fp8=True, i=i)
152-
local_sqnr = compute_error(y_local, y_local_fp8)
152+
local_sqnr = compute_error(y_local, y_local_fp8) # noqa: F841
153153

154154
# get global y
155155
y_global = [

test/test_fsdp2/test_fsdp2_common.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,11 @@
11
import contextlib
2-
from typing import List, Type
2+
from typing import List
33

44
import float8_experimental.config as config
55

66
import torch
77
import torch.distributed as dist
88
import torch.nn as nn
9-
from float8_experimental.float8_linear import Float8Linear
109

1110

1211
def check_parity_no_mp(

test/test_fsdp2/test_fsdp2_eager.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import copy
2-
import itertools
32
import threading
43
import unittest
54
from typing import Any, List
@@ -9,7 +8,7 @@
98
import torch.distributed as dist
109
import torch.nn as nn
1110
from float8_experimental.float8_dynamic_utils import WeightWithDynamicFloat8CastTensor
12-
from float8_experimental.float8_linear import Float8Linear, TensorScalingType
11+
from float8_experimental.float8_linear import TensorScalingType
1312
from float8_experimental.float8_linear_utils import swap_linear_with_float8_linear
1413
from test_fsdp2_common import (
1514
check_parity_bf16_mp,

test/test_fsdp_compile.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
import torch.multiprocessing as mp
1919
import torch.nn as nn
2020
from float8_experimental import config
21-
from float8_experimental.float8_linear import Float8Linear, TensorScalingType
21+
from float8_experimental.float8_linear import TensorScalingType
2222
from float8_experimental.float8_linear_utils import (
2323
swap_linear_with_float8_linear,
2424
sync_float8_amax_and_scale_history,

test/test_inference_flows.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
import torch
1414
import torch.nn as nn
1515
import torch.nn.functional as F
16-
from float8_experimental.float8_linear import Float8Linear, TensorScalingType
16+
from float8_experimental.float8_linear import TensorScalingType
1717
from float8_experimental.float8_linear_utils import swap_linear_with_float8_linear
1818
from float8_experimental.float8_tensor import Float8Tensor
1919
from float8_experimental.float8_utils import compute_error

test/test_numerics_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
import torch
1515
import torch.nn as nn
1616
import torch.nn.functional as F
17-
from float8_experimental.float8_linear import Float8Linear, TensorScalingType
17+
from float8_experimental.float8_linear import TensorScalingType
1818
from float8_experimental.float8_linear_utils import (
1919
linear_requires_sync,
2020
swap_linear_with_float8_linear,

0 commit comments

Comments
 (0)