Skip to content

Commit dc77c85

Browse files
mergennachinfacebook-github-bot
authored andcommitted
Update black linter in OSS lintrunner (#2229)
Summary: Pull Request resolved: #2229 Black linter is upgraded to v24.2.0 in fbsource. Let's also upgrade the OSS linter to match this to reduce inconsistency between internal linter and OSS linters. Currently lintrunner is failing as is: https://hud.pytorch.org/hud/pytorch/executorch/main/1?per_page=50 Context: https://fb.workplace.com/groups/pyfmt/posts/1391116614859184/?fbclid=IwAR1Gag0Bkq2OE_4EeH5XY_iOFgwF6VE-7OAr9kmHeyB3QzkfaGvsWWY3nCo Reviewed By: kimishpatel Differential Revision: D54487606 fbshipit-source-id: 82d5eb011e6e96dd2b369299cd4a74724564b94c
1 parent d25b57b commit dc77c85

File tree

10 files changed

+39
-33
lines changed

10 files changed

+39
-33
lines changed

backends/apple/mps/operators/node_visitor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,6 @@ def get_serialized_data(
235235
def get_serialized_id(
236236
self, node: Union[torch.fx.Node, float, int], mps_graph: MPSGraph
237237
) -> int:
238-
239238
"""
240239
Map a tensor to a unique id. If the tensor was already mapped, return
241240
the existent id.

backends/arm/arm_backend.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -128,9 +128,11 @@ def preprocess( # noqa: C901
128128
# Add output to TOSA graph
129129
tosa_graph.currRegion.currBasicBlock.addTensor(
130130
output.name,
131-
inputs[0].shape
132-
if is_permute_node_before_addmm(node)
133-
else output.shape,
131+
(
132+
inputs[0].shape
133+
if is_permute_node_before_addmm(node)
134+
else output.shape
135+
),
134136
ts.DType.INT8 if is_quant_node(node) else output.dtype,
135137
)
136138

backends/arm/arm_vela.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212

1313
import numpy as np
1414

15+
1516
# Pack either input or output tensor block, compose the related arrays into
1617
# per-io structs to simplify runtime use.
1718
def vela_bin_pack_io(prefix, data):

backends/qualcomm/passes/decompose_scaled_dot_product_attention.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -42,9 +42,9 @@ def call(self, graph_module: torch.fx.GraphModule):
4242
# In decomposed module, there are only input tensors for placeholder op.
4343
for decomposed_node in decomposed_module.graph.nodes:
4444
if decomposed_node.op == "placeholder":
45-
decomposed_node_to_subgraph_node[
46-
decomposed_node
47-
] = name_to_input_tensor_map[decomposed_node.name]
45+
decomposed_node_to_subgraph_node[decomposed_node] = (
46+
name_to_input_tensor_map[decomposed_node.name]
47+
)
4848

4949
if decomposed_node.op == "output":
5050
last_decomposed_node = decomposed_node.args[0]
@@ -76,9 +76,9 @@ def call(self, graph_module: torch.fx.GraphModule):
7676
subgraph_node.meta["source_fn_stack"] = [
7777
(subgraph_node, subgraph_node.target)
7878
]
79-
decomposed_node_to_subgraph_node[
80-
decomposed_node
81-
] = subgraph_node
79+
decomposed_node_to_subgraph_node[decomposed_node] = (
80+
subgraph_node
81+
)
8282

8383
graph.erase_node(node)
8484

backends/qualcomm/passes/i64_to_i32.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,11 @@ def _update_meta(self, node: torch.fx.node) -> None:
2121
meta_val = node.meta["val"]
2222
if isinstance(meta_val, tuple):
2323
node.meta["val"] = (
24-
fake_tensor.to(torch.int32)
25-
if fake_tensor.dtype == torch.int64
26-
else fake_tensor
24+
(
25+
fake_tensor.to(torch.int32)
26+
if fake_tensor.dtype == torch.int64
27+
else fake_tensor
28+
)
2729
for fake_tensor in meta_val
2830
)
2931
else:

examples/arm/aot_arm_compiler.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
# quantization step in our example. This will take the models
2727
# from examples/models/ and quantize then export to delegate.
2828

29+
2930
# Two simple models
3031
class AddModule(torch.nn.Module):
3132
def __init__(self):

examples/models/llama2/quantize.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@ def dynamically_quantize_per_channel(
6464
with a final group of a size less than group size.
6565
6666
Assumptions:
67-
This function assumes symmetric quantization, axis ==0 and a dense memory format."""
67+
This function assumes symmetric quantization, axis ==0 and a dense memory format.
68+
"""
6869

6970
# assumes symmetric quantization
7071
# assumes axis == 0

exir/passes/_quant_patterns_and_replacements.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -337,9 +337,9 @@ def binary_relu_op_replacement(
337337
]
338338

339339

340-
def _get_binary_ops_patterns_and_replacements() -> List[
341-
Tuple[Callable, Callable, List[Callable]]
342-
]:
340+
def _get_binary_ops_patterns_and_replacements() -> (
341+
List[Tuple[Callable, Callable, List[Callable]]]
342+
):
343343

344344
# TODO: replace qbinary op with the ops implemented in lean mode
345345
binary_op_to_qbinary_ops = {
@@ -360,9 +360,9 @@ def _get_binary_ops_patterns_and_replacements() -> List[
360360
return pattern_and_replacements
361361

362362

363-
def _get_reshape_patterns_and_replacements() -> List[
364-
Tuple[Callable, Callable, List[Callable]]
365-
]:
363+
def _get_reshape_patterns_and_replacements() -> (
364+
List[Tuple[Callable, Callable, List[Callable]]]
365+
):
366366
def pattern(
367367
x,
368368
arg0,
@@ -413,9 +413,9 @@ def replacement(
413413
]
414414

415415

416-
def _get_slice_patterns_and_replacements() -> List[
417-
Tuple[Callable, Callable, List[Callable]]
418-
]:
416+
def _get_slice_patterns_and_replacements() -> (
417+
List[Tuple[Callable, Callable, List[Callable]]]
418+
):
419419
def pattern(x, dim, start, end, x_scale, x_zero_point, x_qmin, x_qmax):
420420
x = torch.ops.quantized_decomposed.dequantize_per_tensor.default(
421421
x, x_scale, x_zero_point, x_qmin, x_qmax, torch.uint8
@@ -439,9 +439,9 @@ def replacement(x, dim, start, end, x_scale, x_zero_point, x_qmin, x_qmax):
439439
]
440440

441441

442-
def _get_embedding_ops_patterns_and_replacements() -> List[
443-
Tuple[Callable, Callable, List[Callable]]
444-
]:
442+
def _get_embedding_ops_patterns_and_replacements() -> (
443+
List[Tuple[Callable, Callable, List[Callable]]]
444+
):
445445
def get_pattern_and_replacement():
446446
@bind_pattern_to_op(quantized_decomposed_lib, "embedding_byte")
447447
def pattern(
@@ -569,9 +569,9 @@ def replacement(x, x_scale, x_zero_point, x_qmin, x_qmax):
569569
"""
570570

571571

572-
def get_quant_patterns_and_replacements() -> List[
573-
Tuple[Callable, Callable, List[Callable]]
574-
]:
572+
def get_quant_patterns_and_replacements() -> (
573+
List[Tuple[Callable, Callable, List[Callable]]]
574+
):
575575

576576
return copy.copy(
577577
[

exir/serde/export_serialize.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1728,9 +1728,9 @@ def deserialize(
17281728
symbol_name_to_range,
17291729
res.names_to_symbols,
17301730
)
1731-
model_opset_version: Optional[
1732-
Dict[str, int]
1733-
] = serialized_artifact.exported_program.opset_version # pyre-ignore
1731+
model_opset_version: Optional[Dict[str, int]] = (
1732+
serialized_artifact.exported_program.opset_version # pyre-ignore
1733+
)
17341734
self._validate_model_opset_version(model_opset_version)
17351735

17361736
upgrader = GraphModuleOpUpgrader(

requirements-lintrunner.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ pycodestyle==2.10.0
1313
torchfix==0.1.1
1414

1515
# UFMT
16-
black==22.12.0
16+
black==24.2.0
1717
ufmt==2.0.1
1818
usort==1.0.5
1919

0 commit comments

Comments
 (0)