Skip to content

Commit b687fd9

Browse files
committed
up
1 parent c05764c commit b687fd9

File tree

2 files changed

+0
-10
lines changed

2 files changed

+0
-10
lines changed

examples/models/llama/model.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
from executorch.examples.models.llama.llama_transformer import Transformer
1919

2020
from executorch.examples.models.llama.model_args import ModelArgs
21-
from torchao.utils import TorchAOBaseTensor
2221

2322
try:
2423
from .fairseq2 import convert_to_llama_checkpoint
@@ -258,9 +257,6 @@ def __init__(self, **kwargs):
258257
strict=False,
259258
assign=True,
260259
) # self.model_ = Transformer(gptconf)
261-
for param in self.model_.parameters():
262-
if isinstance(param, TorchAOBaseTensor):
263-
param.requires_grad = False
264260
else:
265261
print("Checkpoint not provided, defaulting weights to zeros.")
266262
self.model_.to_empty(device="cpu")

extension/llm/export/builder.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
from torch.ao.quantization.quantizer.composable_quantizer import ComposableQuantizer
4242
from torch.export import export_for_training, ExportedProgram
4343
from torch.nn.attention import SDPBackend
44-
from torchao.utils import unwrap_tensor_subclass
4544

4645
FORMAT = "[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s"
4746
logging.basicConfig(level=logging.INFO, format=FORMAT)
@@ -200,11 +199,6 @@ def _get_edge_config(self) -> EdgeCompileConfig:
200199
return edge_config
201200

202201
def _export(self, module: Optional[torch.nn.Module] = None) -> ExportedProgram:
203-
if module is not None:
204-
unwrap_tensor_subclass(module)
205-
else:
206-
unwrap_tensor_subclass(self.model)
207-
208202
dynamic_shape = self._get_dynamic_shape()
209203
# 1. torch.nn.attention.sdpa_kernel([SDPBackend.MATH]) is for bypassing the dynamo error when tracing
210204
# 2. torch.no_grad() is for getting rid of the dropout (not sure why training ops will show up)

0 commit comments

Comments
 (0)