We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 46ea733 commit 5db136cCopy full SHA for 5db136c
examples/models/llama3_2_vision/runner/native.py
@@ -44,9 +44,10 @@ def __init__(self, args):
44
use_kv_cache=args.kv_cache,
45
vocab_size=params["vocab_size"],
46
)
47
+ self.model_bytes = None
48
with open(args.pte, "rb") as f:
- model_bytes = f.read()
49
- self.model = _load_for_executorch_from_buffer(model_bytes)
+ self.model_bytes = f.read()
50
+ self.model = _load_for_executorch_from_buffer(self.model_bytes)
51
# self.model = _load_for_executorch(args.pte)
52
self.use_kv_cache = args.kv_cache
53
0 commit comments