Skip to content

Bump torchfix from 0.1.1 to 0.5.0 #3220

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion backends/arm/test/models/test_mobilenet_v2_arm.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@
import unittest

import torch
import torchvision.models as models
from executorch.backends.arm.test import common

from executorch.backends.arm.test.tester.arm_tester import ArmTester
from executorch.backends.xnnpack.test.tester.tester import Quantize
from torchvision import models
from torchvision.models.mobilenetv2 import MobileNet_V2_Weights


Expand Down
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/inception_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
import unittest

import torch
import torchvision.models as models
from executorch.backends.xnnpack.test.tester import Tester
from executorch.backends.xnnpack.test.tester.tester import Quantize
from torchvision import models


class TestInceptionV3(unittest.TestCase):
Expand Down
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/mobilenet_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
import unittest

import torch
import torchvision.models as models
from executorch.backends.xnnpack.test.tester import Tester
from executorch.backends.xnnpack.test.tester.tester import Quantize
from torchvision import models
from torchvision.models.mobilenetv2 import MobileNet_V2_Weights


Expand Down
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/mobilenet_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
import unittest

import torch
import torchvision.models as models
from executorch.backends.xnnpack.test.tester import Tester
from executorch.backends.xnnpack.test.tester.tester import Quantize
from torchvision import models


class TestMobileNetV3(unittest.TestCase):
Expand Down
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/torchvision_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
import unittest

import torch
import torchvision.models as models
from executorch.backends.xnnpack.test.tester import Tester
from torchvision import models


class TestViT(unittest.TestCase):
Expand Down
6 changes: 5 additions & 1 deletion examples/qualcomm/oss_scripts/ssd300_vgg16.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,11 @@ def SSD300VGG16(pretrained_weight_model):
from model import SSD300

model = SSD300(n_classes=21)
checkpoint = torch.load(pretrained_weight_model, map_location="cpu")
# TODO: If possible, it's better to set weights_only to True
# https://pytorch.org/docs/stable/generated/torch.load.html
checkpoint = torch.load(
pretrained_weight_model, map_location="cpu", weights_only=False
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wouldn't weights_only=True work here and in other places?

weights_only=False is a potential security issue.
I guess it's fine to don't cause any compatibility problems right now, but maybe add a TODO to investigate later?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let me add them as TODO then, these are examples from different partners, so I guess setting weights_only to True would better be reviewed separately.

)
model.load_state_dict(checkpoint["model"].state_dict())

return model.eval()
Expand Down
3 changes: 3 additions & 0 deletions examples/qualcomm/scripts/mobilebert_fine_tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,13 +204,16 @@ def get_fine_tuned_mobilebert(artifacts_dir, pretrained_weight, batch_size):
)

model.load_state_dict(
# TODO: If possible, it's better to set weights_only to True
# https://pytorch.org/docs/stable/generated/torch.load.html
torch.load(
(
f"{artifacts_dir}/finetuned_mobilebert_epoch_{epochs}.model"
if pretrained_weight is None
else pretrained_weight
),
map_location=torch.device("cpu"),
weights_only=False,
),
)

Expand Down
4 changes: 3 additions & 1 deletion exir/serde/export_serialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,9 @@ def deserialize_torch_artifact(serialized: bytes):
return {}
buffer = io.BytesIO(serialized)
buffer.seek(0)
return torch.load(buffer)
# TODO: If possible, it's better to set weights_only to True
# https://pytorch.org/docs/stable/generated/torch.load.html
return torch.load(buffer, weights_only=False)


def _sympy_int_to_int(val: sympy.Expr):
Expand Down
2 changes: 1 addition & 1 deletion requirements-lintrunner.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ flake8-comprehensions==3.12.0
flake8-pyi==23.5.0
mccabe==0.7.0
pycodestyle==2.10.0
torchfix==0.1.1
torchfix==0.5.0

# UFMT
black==24.2.0
Expand Down