Skip to content

Commit 3c091bf

Browse files
mergennachinmalfet
authored andcommitted
Fix lints (#456)
1 parent 775a68b commit 3c091bf

17 files changed

+53
-50
lines changed

.ci/scripts/gather_test_models.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
"mistralai/Mistral-7B-v0.1": "https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/generation_config.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/pytorch_model-00001-of-00002.bin,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/pytorch_model-00002-of-00002.bin,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/pytorch_model.bin.index.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/special_tokens_map.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/tokenizer.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/tokenizer.model,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/tokenizer_config.json",
2020
"mistralai/Mistral-7B-Instruct-v0.1": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/config.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/generation_config.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/pytorch_model-00001-of-00002.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/pytorch_model-00002-of-00002.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/pytorch_model.bin.index.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/special_tokens_map.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/tokenizer.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/tokenizer.model,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/tokenizer_config.json",
2121
"mistralai/Mistral-7B-Instruct-v0.2": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/config.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/generation_config.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/pytorch_model-00001-of-00003.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/pytorch_model-00002-of-00003.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/pytorch_model-00003-of-00003.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/pytorch_model.bin.index.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/special_tokens_map.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/tokenizer.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/tokenizer.model,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/tokenizer_config.json",
22-
2322
# huggingface-cli prefixed Models will download using the huggingface-cli tool
2423
# TODO: Convert all of the MODEL_REPOS with a NamedTuple that includes the install_method
2524
"huggingface-cli/meta-llama/Meta-Llama-3-8B": "",
@@ -73,7 +72,10 @@ def model_should_run_on_event(model: str, event: str, backend: str) -> bool:
7372
elif event == "periodic":
7473
# test llama3 on gpu only, see description in https://github.com/pytorch/torchchat/pull/399 for reasoning
7574
if backend == "gpu":
76-
return model in ["openlm-research/open_llama_7b", "huggingface-cli/meta-llama/Meta-Llama-3-8B"]
75+
return model in [
76+
"openlm-research/open_llama_7b",
77+
"huggingface-cli/meta-llama/Meta-Llama-3-8B",
78+
]
7779
else:
7880
return model in ["openlm-research/open_llama_7b"]
7981
else:

GPTQ.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ def faster_quant(self, H, W):
360360
groupsize = self.groupsize
361361
orig_dtype = W.dtype
362362
W = W.detach().float()
363-
rows, columns = W.shape[0], W.shape[1]
363+
_, columns = W.shape[0], W.shape[1]
364364
device = W.device
365365

366366
if groupsize == -1:

build/builder.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -150,9 +150,7 @@ def __post_init__(self):
150150
try:
151151
from tokenizer.tiktoken import Tokenizer as TiktokenTokenizer
152152

153-
self.t = TiktokenTokenizer(
154-
model_path=str(self.tokenizer_path)
155-
)
153+
self.t = TiktokenTokenizer(model_path=str(self.tokenizer_path))
156154
self.is_tiktoken = True
157155
self.is_sentencepiece = False
158156
return
@@ -162,9 +160,7 @@ def __post_init__(self):
162160
try:
163161
from sentencepiece import SentencePieceProcessor
164162

165-
self.t = SentencePieceProcessor(
166-
model_file=str(self.tokenizer_path)
167-
)
163+
self.t = SentencePieceProcessor(model_file=str(self.tokenizer_path))
168164
self.is_tiktoken = False
169165
self.is_sentencepiece = True
170166
return

build/convert_torchtune_checkpoint.py

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@
44
# This source code is licensed under the license found in the
55
# LICENSE file in the root directory of this source tree.
66

7+
import logging
78
import os
89
import re
910
import sys
10-
import logging
1111
from pathlib import Path
12-
from typing import Dict, List, Optional
12+
from typing import Dict, List
1313

1414
import torch
1515

@@ -46,7 +46,7 @@ def from_hf(
4646
merged_result: Dict[str, torch.Tensor],
4747
num_heads: int = 32,
4848
num_kv_heads: int = 32,
49-
dim: int = 4096
49+
dim: int = 4096,
5050
) -> Dict[str, torch.Tensor]:
5151
"""
5252
Utility function which converts the given state_dict from the HF format
@@ -56,7 +56,7 @@ def from_hf(
5656
"""
5757

5858
def permute(w, n_heads):
59-
head_dim = dim // n_heads
59+
head_dim = dim // n_heads
6060
return (
6161
w.view(n_heads, 2, head_dim // 2, dim)
6262
.transpose(1, 2)
@@ -114,7 +114,7 @@ def convert_torchtune_checkpoint(
114114
raise RuntimeError(f"{checkpoint_dir / file} is not a file")
115115

116116
# If the model is already in meta format, simply rename it
117-
if checkpoint_format == 'meta':
117+
if checkpoint_format == "meta":
118118
if len(checkpoint_files) > 1:
119119
raise RuntimeError("Multiple meta format checkpoint files not supported")
120120

@@ -127,23 +127,27 @@ def convert_torchtune_checkpoint(
127127
os.rename(checkpoint_path, Path.joinpath(checkpoint_dir, "model.pth"))
128128

129129
# If the model is in HF format, merge all of the checkpoints and then convert
130-
elif checkpoint_format == 'hf':
130+
elif checkpoint_format == "hf":
131131
merged_result = {}
132132
for file in checkpoint_files:
133133
state_dict = torch.load(
134-
Path.joinpath(checkpoint_dir, file), map_location="cpu", mmap=True, weights_only=True
134+
Path.joinpath(checkpoint_dir, file),
135+
map_location="cpu",
136+
mmap=True,
137+
weights_only=True,
135138
)
136139
merged_result.update(state_dict)
137140

138141
model_config = MODEL_CONFIGS[model_name]
139142
final_result = from_hf(merged_result, **model_config)
140143

141-
print(f"Saving checkpoint to {checkpoint_dir / 'model.pth'}. This may take a while.")
144+
print(
145+
f"Saving checkpoint to {checkpoint_dir / 'model.pth'}. This may take a while."
146+
)
142147
torch.save(final_result, Path.joinpath(checkpoint_dir, "model.pth"))
143148
print("Done.")
144149

145150

146-
147151
if __name__ == "__main__":
148152
import argparse
149153

@@ -155,19 +159,19 @@ def convert_torchtune_checkpoint(
155159
)
156160
parser.add_argument(
157161
"--checkpoint-files",
158-
nargs='+',
162+
nargs="+",
159163
required=True,
160164
)
161165
parser.add_argument(
162166
"--checkpoint-format",
163167
type=str,
164168
required=True,
165-
choices=['meta', 'hf'],
169+
choices=["meta", "hf"],
166170
)
167171
parser.add_argument(
168172
"--model-name",
169173
type=str,
170-
choices=['llama2_7B', 'llama3_8B'],
174+
choices=["llama2_7B", "llama3_8B"],
171175
)
172176

173177
args = parser.parse_args()

build/gguf_loader.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,20 +7,17 @@
77

88
import copy
99
import logging
10-
import sys
11-
from pathlib import Path
1210
from typing import Any
1311

1412
import gguf
1513

1614
import torch
1715

18-
1916
from gguf import GGUFValueType
20-
from .model import ModelArgs, Transformer
2117
from quantize import pack_scales_and_zeros, WeightOnlyInt4Linear
2218

2319
from build.gguf_util import Q4_0, to_float
20+
from .model import ModelArgs, Transformer
2421

2522
logger: logging.Logger = logging.getLogger(__name__)
2623

build/model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from torch import Tensor
1616
from torch.nn import functional as F
1717

18-
from build.utils import find_multiple, get_precision, use_aoti_backend
18+
from build.utils import find_multiple, get_precision
1919

2020
config_path = Path(f"{str(Path(__file__).parent)}/known_model_params")
2121

cli.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,14 +57,17 @@ def add_arguments_for_export(parser):
5757
# Only export specific options should be here
5858
_add_arguments_common(parser)
5959

60+
6061
def add_arguments_for_list(parser):
6162
# Only list specific options should be here
6263
_add_arguments_common(parser)
6364

65+
6466
def add_arguments_for_remove(parser):
6567
# Only remove specific options should be here
6668
_add_arguments_common(parser)
6769

70+
6871
def _add_arguments_common(parser):
6972
# Model specification. TODO Simplify this.
7073
# A model can be specified using a positional model name or HuggingFace

config/model_config.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@ def load_model_configs() -> Dict[str, ModelConfig]:
7777

7878
return model_configs
7979

80+
8081
def resolve_model_config(model: str) -> ModelConfig:
8182
model = model.lower()
8283
# Lazy load model config from JSON.

download.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77
import shutil
88
import urllib.request
99
from pathlib import Path
10-
from typing import Optional, Sequence
10+
from typing import Optional
1111

1212
from build.convert_hf_checkpoint import convert_hf_checkpoint
1313
from config.model_config import (
14+
load_model_configs,
1415
ModelConfig,
1516
ModelDistributionChannel,
16-
load_model_configs,
1717
resolve_model_config,
1818
)
1919

@@ -129,16 +129,14 @@ def list_main(args) -> None:
129129

130130
name_col.append(name)
131131
aliases_col.append(", ".join(config.aliases))
132-
installed_col.append('Yes' if is_downloaded else "")
132+
installed_col.append("Yes" if is_downloaded else "")
133133

134-
cols = {
135-
"Model": name_col,
136-
"Aliases": aliases_col,
137-
"Downloaded": installed_col
138-
}
134+
cols = {"Model": name_col, "Aliases": aliases_col, "Downloaded": installed_col}
139135

140136
# Find the length of the longest value in each column.
141-
col_widths = {key:max(*[len(s) for s in vals], len(key)) + 1 for (key,vals) in cols.items()}
137+
col_widths = {
138+
key: max(*[len(s) for s in vals], len(key)) + 1 for (key, vals) in cols.items()
139+
}
142140

143141
# Display header.
144142
print()

export.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
TokenizerArgs,
1919
)
2020

21-
from build.utils import set_backend, set_precision, use_aoti_backend, use_et_backend
21+
from build.utils import set_backend, set_precision
2222
from cli import add_arguments, add_arguments_for_export, arg_init, check_args
2323
from export_aoti import export_model as export_model_aoti
2424

export_et.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111

1212
from executorch.backends.xnnpack.partition.xnnpack_partitioner import (
1313
XnnpackDynamicallyQuantizedPartitioner,
14-
XnnpackPartitioner,
1514
)
1615

1716
# TODO: change back to executorch.examples.portable.utils

hqq/core/bitpack.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11
# Written by Dr. Hicham Badri @Mobius Labs GmbH - 2023
22
#####################################################
33

4-
import torch
5-
from torch import uint8, int32, Tensor
64
import numpy as np
5+
import torch
6+
from torch import int32, Tensor, uint8
7+
78
from .utils import is_divisible
89

910

hqq/core/optimize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
# Written by Dr. Hicham Badri @Mobius Labs GmbH - 2023
22
#####################################################
3+
import numpy as np
34
import torch
45
from torch import Tensor
5-
import numpy as np
66

77

88
# re-estimate the scale based on the inverse median

hqq/core/peft.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
11
# Written by Dr. Hicham Badri @Mobius Labs GmbH - 2023
22
#####################################################
3-
import torch
4-
from torch import float16, bfloat16, float32
5-
from torch import Tensor
6-
from torch import nn
73
import numpy as np
4+
import torch
5+
from torch import bfloat16, float16, float32, nn, Tensor
6+
87
from .quantize import HQQLinear, Quantizer
98
from .utils import cleanup
109

hqq/core/quantize.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,15 @@
11
# Written by Dr. Hicham Badri @Mobius Labs GmbH - 2023
22
#####################################################
3-
import torch
4-
from torch import uint8, int32, float16, nn, Tensor
53
import copy
64
from enum import Enum
75

8-
from .utils import is_divisible
9-
from .optimize import optimize_weights_proximal
6+
import torch
7+
from torch import float16, int32, nn, Tensor, uint8
8+
109
from .bitpack import BitPack
10+
from .optimize import optimize_weights_proximal
11+
12+
from .utils import is_divisible
1113

1214

1315
# Main HQQ Quantizer

hqq/core/utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
# Written by Dr. Hicham Badri @Mobius Labs GmbH - 2023
22
#####################################################
3-
import torch
43
import gc
4+
55
import numpy as np
6+
import torch
67

78

89
def cleanup() -> None:

torchchat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
add_help=True,
3535
)
3636
# Default command is to print help
37-
parser.set_defaults(func=lambda args: self._parser.print_help())
37+
parser.set_defaults(func=parser.print_help())
3838

3939
add_arguments(parser)
4040
subparsers = parser.add_subparsers(

0 commit comments

Comments
 (0)