Skip to content

Commit bee8149

Browse files
author
Martin Yuan
committed
Restore CMake configure comment (#2723)
Summary: A comment at the top of the top-level CMakeLists.txt file was removed previously as part of an update to automatically download buck. I removed more than was needed. This change restores the comment (minus the explicit buck path). Test Plan: This is a comment-only change. I did configure and build on linux x86-64 to validate syntax. Reviewed By: mcr229 Differential Revision: D55445749 Pulled By: GregoryComer fbshipit-source-id: ff8cb601e71be4231f4e853b993bad9f54ea7144 [ghstack-poisoned]
1 parent 9922c54 commit bee8149

File tree

9 files changed

+175
-1
lines changed

9 files changed

+175
-1
lines changed

.ci/scripts/gather_test_models.py

Lines changed: 61 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,54 @@
1313
from examples.models import MODEL_NAME_TO_MODEL
1414
from examples.xnnpack import MODEL_NAME_TO_OPTIONS
1515

16+
# MODEL_NAME_TO_MODEL = {
17+
# "mul": ("toy_model", "MulModule"),
18+
# "linear": ("toy_model", "LinearModule"),
19+
# "add": ("toy_model", "AddModule"),
20+
# "add_mul": ("toy_model", "AddMulModule"),
21+
# "softmax": ("toy_model", "SoftmaxModule"),
22+
# "dl3": ("deeplab_v3", "DeepLabV3ResNet50Model"),
23+
# "edsr": ("edsr", "EdsrModel"),
24+
# "emformer_transcribe": ("emformer_rnnt", "EmformerRnntTranscriberModel"),
25+
# "emformer_predict": ("emformer_rnnt", "EmformerRnntPredictorModel"),
26+
# "emformer_join": ("emformer_rnnt", "EmformerRnntJoinerModel"),
27+
# "llama2": ("llama2", "Llama2Model"),
28+
# "mobilebert": ("mobilebert", "MobileBertModelExample"),
29+
# "mv2": ("mobilenet_v2", "MV2Model"),
30+
# "mv2_untrained": ("mobilenet_v2", "MV2UntrainedModel"),
31+
# "mv3": ("mobilenet_v3", "MV3Model"),
32+
# "vit": ("torchvision_vit", "TorchVisionViTModel"),
33+
# "w2l": ("wav2letter", "Wav2LetterModel"),
34+
# "ic3": ("inception_v3", "InceptionV3Model"),
35+
# "ic4": ("inception_v4", "InceptionV4Model"),
36+
# "resnet18": ("resnet", "ResNet18Model"),
37+
# "resnet50": ("resnet", "ResNet50Model"),
38+
# "llava_encoder": ("llava_encoder", "LlavaModel"),
39+
# }
40+
41+
# from dataclasses import dataclass
42+
# @dataclass
43+
# class XNNPACKOptions(object):
44+
# quantization: bool
45+
# delegation: bool
46+
#
47+
# MODEL_NAME_TO_OPTIONS = {
48+
# "linear": XNNPACKOptions(True, True),
49+
# "add": XNNPACKOptions(True, True),
50+
# "add_mul": XNNPACKOptions(True, True),
51+
# "dl3": XNNPACKOptions(True, True),
52+
# "ic3": XNNPACKOptions(True, True),
53+
# "ic4": XNNPACKOptions(True, True),
54+
# "mv2": XNNPACKOptions(True, True),
55+
# "mv3": XNNPACKOptions(True, True),
56+
# "resnet18": XNNPACKOptions(True, True),
57+
# "resnet50": XNNPACKOptions(True, True),
58+
# "vit": XNNPACKOptions(False, True),
59+
# "w2l": XNNPACKOptions(False, True),
60+
# "edsr": XNNPACKOptions(True, True),
61+
# "mobilebert": XNNPACKOptions(False, True), # T170286473
62+
# "llama2": XNNPACKOptions(False, True),
63+
# }
1664

1765
DEFAULT_RUNNERS = {
1866
"linux": "linux.2xlarge",
@@ -24,6 +72,7 @@
2472
"w2l": "linux.12xlarge",
2573
"ic4": "linux.12xlarge",
2674
"resnet50": "linux.12xlarge",
75+
"llava_encoder": "linux.4xlarge",
2776
# This one causes timeout on smaller runner, the root cause is unclear (T161064121)
2877
"dl3": "linux.12xlarge",
2978
"emformer_join": "linux.12xlarge",
@@ -83,9 +132,17 @@ def model_should_run_on_event(model: str, event: str) -> bool:
83132
We put higher priority and fast models to pull request and rest to push.
84133
"""
85134
if event == "pull_request":
86-
return model in ["add", "ic3", "mv2", "mv3", "resnet18", "vit"]
135+
return model in ["add", "ic3", "mv2", "mv3", "resnet18", "vit", "llava_encoder"]
87136
return True
88137

138+
def model_should_run_on_target_os(model: str, target_os: str) -> bool:
139+
"""
140+
A helper function to decide whether a model should be tested on a target os (linux/macos).
141+
For example, a big model can be disabled in macos due to the limited macos resources.
142+
"""
143+
if target_os == "macos":
144+
return model not in ["llava_encoder",]
145+
return True
89146

90147
def export_models_for_ci() -> dict[str, dict]:
91148
"""
@@ -119,6 +176,9 @@ def export_models_for_ci() -> dict[str, dict]:
119176
if not model_should_run_on_event(name, event):
120177
continue
121178

179+
if not model_should_run_on_target_os(name, target_os):
180+
continue
181+
122182
if backend == "xnnpack":
123183
if name not in MODEL_NAME_TO_OPTIONS:
124184
continue

.ci/scripts/test.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,10 @@ test_model() {
6767
run_portable_executor_runner
6868
rm "./${MODEL_NAME}.pte"
6969
fi
70+
if [[ "${MODEL_NAME}" == "llava_encoder" ]]; then
71+
# Install requirements for llava
72+
bash examples/models/llava_encoder/install_requirements.sh
73+
fi
7074
# python3 -m examples.portable.scripts.export --model_name="llama2" should works too
7175
"${PYTHON_EXECUTABLE}" -m examples.portable.scripts.export --model_name="${MODEL_NAME}"
7276
run_portable_executor_runner

.gitmodules

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,3 +62,6 @@
6262
[submodule "kernels/optimized/third-party/eigen"]
6363
path = kernels/optimized/third-party/eigen
6464
url = https://gitlab.com/libeigen/eigen.git
65+
[submodule "examples/third-party/LLaVA"]
66+
path = examples/third-party/LLaVA
67+
url = https://github.com/haotian-liu/LLaVA.git

examples/models/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
"ic4": ("inception_v4", "InceptionV4Model"),
2727
"resnet18": ("resnet", "ResNet18Model"),
2828
"resnet50": ("resnet", "ResNet50Model"),
29+
"llava_encoder": ("llava_encoder", "LlavaModel"),
2930
}
3031

3132
__all__ = [
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
## Summary
2+
In this example, we initiate the process of running multi modality through ExecuTorch.
3+
- Demonstrate how to export the image encoder model in the [LLava](https://github.com/haotian-liu/LLaVA) multimodal model.
4+
- Provide TODO steps on how to use the exported .pte file and the existing [exported Llama2 model](https://github.com/pytorch/executorch/tree/main/examples/models/llama2), to build the multimodal pipeline.
5+
6+
## Instructions
7+
Note that this folder does not host the pretrained LLava model.
8+
- To have Llava available, follow the [Install instructions](https://github.com/haotian-liu/LLaVA?tab=readme-ov-file#install) in the LLava github. Follow the licence in the specific repo when using L
9+
- Since the pytorch model version may not be updated, `cd executorch`, run `./install_requirements.sh`.
10+
- If there is numpy compatibility issue, run `pip install bitsandbytes -I`.
11+
- Alternatively, run `examples/models/llava_encoder/install_requirements.sh`, to replace the steps above.
12+
- Run `python3 -m examples.portable.scripts.export --model_name="llava_encoder"`. The llava_encoder.pte file will be generated.
13+
- Run `./cmake-out/executor_runner --model_path ./llava_encoder.pte` to verify the exported model with ExecuTorch runtime with portable kernels. Note that the portable kernels are not performance optimized. Please refer to other examples like those in llama2 folder for optimization.
14+
15+
## TODO
16+
- Write the pipeline in cpp
17+
- Have image and text prompts as inputs.
18+
- Call image processing functions to preprocess the image tensor.
19+
- Load the llava_encoder.pte model, run it using the image tensor.
20+
- The output of the encoder can be combined with the prompt, as inputs to the llama model. Call functions in llama_runner.cpp to run the llama model and get outputs. The ExecuTorch end to end flow for the llama model is located at `examples/models/llama2`.
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from .model import LlavaModel
8+
9+
__all__ = [
10+
LlavaModel,
11+
]
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
#!/bin/bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
# install llava from the submodule
9+
pip install --force-reinstall -e examples/third-party/LLaVA
10+
11+
# not included in the pip install package, but needed in llava
12+
pip install protobuf
13+
14+
# The deps of llava can have different versions than deps of ExecuTorch.
15+
# For example, torch version required from llava is older than ExecuTorch.
16+
# To make both work, recover ExecuTorch's original dependencies by rerunning
17+
# the install_requirements.sh.
18+
./install_requirements.sh
19+
20+
# bitsandbytes depends on numpy 1.x, which is not compatible with numpy 2.x.
21+
# Reinstall bitsandbytes to make it compatible.
22+
pip install bitsandbytes -I
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import torch
8+
9+
from examples.models.model_base import EagerModelBase
10+
from llava.eval.run_llava import load_images, process_images
11+
from llava.mm_utils import get_model_name_from_path
12+
13+
from llava.model.builder import load_pretrained_model
14+
from torch import nn
15+
16+
17+
class EncoderModel(nn.Module):
18+
def __init__(self, llava_model):
19+
super().__init__()
20+
self.model_ = llava_model
21+
22+
def forward(self, images_tensor):
23+
features = self.model_.get_model().get_vision_tower()(images_tensor)
24+
features = self.model_.get_model().mm_projector(features)
25+
return features
26+
27+
28+
class LlavaModel(EagerModelBase):
29+
def __init__(self):
30+
model_path = "liuhaotian/llava-v1.5-7b"
31+
tokenizer, self.model_, self.image_processor_, context_len = (
32+
load_pretrained_model(
33+
model_path=model_path,
34+
model_base=None,
35+
model_name=get_model_name_from_path(model_path),
36+
)
37+
)
38+
self.device = "cpu"
39+
self.dtype = torch.float32
40+
self.model_.to(device=self.device, dtype=self.dtype)
41+
42+
def get_eager_model(self):
43+
model = EncoderModel(self.model_)
44+
return model
45+
46+
def get_example_inputs(self):
47+
image_file = "https://llava-vl.github.io/static/images/view.jpg"
48+
images = load_images([image_file])
49+
images_tensor = process_images(
50+
images, self.image_processor_, self.model_.config
51+
).to(self.model_.device)
52+
return (images_tensor,)

examples/third-party/LLaVA

Submodule LLaVA added at 7440ec9

0 commit comments

Comments
 (0)