Skip to content

Commit 8c8a3dc

Browse files
Guang Yangguangy10
authored andcommitted
Clean up llava deps
1 parent 69dcfe0 commit 8c8a3dc

File tree

6 files changed

+60
-20
lines changed

6 files changed

+60
-20
lines changed

.ci/scripts/test.sh

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,10 +71,9 @@ test_model() {
7171
if [[ "${MODEL_NAME}" == "llava" ]]; then
7272
# Install requirements for llava
7373
bash examples/models/llava/install_requirements.sh
74-
STRICT="--no-strict"
7574
fi
7675
# python3 -m examples.portable.scripts.export --model_name="llama2" should works too
77-
"${PYTHON_EXECUTABLE}" -m examples.portable.scripts.export --model_name="${MODEL_NAME}" "${STRICT}"
76+
"${PYTHON_EXECUTABLE}" -m examples.portable.scripts.export --model_name="${MODEL_NAME}"
7877
run_portable_executor_runner
7978
}
8079

.github/workflows/pull.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ jobs:
193193
strategy:
194194
fail-fast: false
195195
with:
196-
runner: linux.12xlarge
196+
runner: linux.24xlarge
197197
docker-image: executorch-ubuntu-22.04-clang12
198198
submodules: 'true'
199199
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}

examples/models/llava/README.md

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,13 @@ In this example, we initiate the process of running multi modality through Execu
77
Note that this folder does not host the pretrained LLava model.
88
- To have Llava available, follow the [Install instructions](https://github.com/haotian-liu/LLaVA?tab=readme-ov-file#install) in the LLava github. Follow the licence in the specific repo when using L
99
- Since the pytorch model version may not be updated, `cd executorch`, run `./install_requirements.sh`.
10-
- If there is numpy compatibility issue, run `pip install bitsandbytes -I`.
11-
- Alternatively, run `examples/models/llava_encoder/install_requirements.sh`, to replace the steps above.
12-
- Run `python3 -m examples.portable.scripts.export --model_name="llava_encoder"`. The llava_encoder.pte file will be generated.
13-
- Run `./cmake-out/executor_runner --model_path ./llava_encoder.pte` to verify the exported model with ExecuTorch runtime with portable kernels. Note that the portable kernels are not performance optimized. Please refer to other examples like those in llama2 folder for optimization.
10+
- Run `examples/models/llava/install_requirements.sh`, to install llava specific deps.
11+
- Run `python3 -m examples.portable.scripts.export --model_name="llava"`. The llava.pte file will be generated.
12+
- Run `./cmake-out/executor_runner --model_path ./llava.pte` to verify the exported model with ExecuTorch runtime with portable kernels. Note that the portable kernels are not performance optimized. Please refer to other examples like those in llama2 folder for optimization.
1413

1514
## TODO
1615
- Write the pipeline in cpp
1716
- Have image and text prompts as inputs.
1817
- Call image processing functions to preprocess the image tensor.
19-
- Load the llava_encoder.pte model, run it using the image tensor.
18+
- Load the llava.pte model, run it using the image tensor.
2019
- The output of the encoder can be combined with the prompt, as inputs to the llama model. Call functions in llama_runner.cpp to run the llama model and get outputs. The ExecuTorch end to end flow for the llama model is located at `examples/models/llama2`.

examples/models/llava/install_requirements.sh

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -26,19 +26,10 @@ fi
2626

2727
# not included in the pip install package, but needed in llava
2828
pip install protobuf
29-
30-
# bitsandbytes depends on numpy 1.x, which is not compatible with numpy 2.x.
31-
# Reinstall bitsandbytes to make it compatible.
32-
pip install bitsandbytes -I
29+
pip install triton==3.0.0
3330

3431
# The deps of llava can have different versions than deps of ExecuTorch.
3532
# For example, torch version required from llava is older than ExecuTorch.
3633
# To make both work, recover ExecuTorch's original dependencies by rerunning
3734
# the install_requirements.sh. Notice this won't install executorch.
3835
bash -x ./install_requirements.sh --pybind xnnpack
39-
40-
# Newer transformer (4.38) will give TypeError: LlavaLlamaForCausalLM.forward() got an unexpected keyword argument 'cache_position'
41-
pip install timm==0.6.13
42-
pip install transformers==4.37.2
43-
44-
pip list

examples/models/llava/model.py

Lines changed: 52 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import re
1212

1313
from dataclasses import dataclass
14-
from typing import Any, Dict, Optional
14+
from typing import Any, Dict, List, Optional
1515

1616
import requests
1717
import torch
@@ -315,6 +315,57 @@ def __init__(self):
315315
self.input = None
316316
self.resized_image = None
317317

318+
def forward(
319+
self,
320+
input_ids: torch.LongTensor = None,
321+
attention_mask: Optional[torch.Tensor] = None,
322+
position_ids: Optional[torch.LongTensor] = None,
323+
past_key_values: Optional[List[torch.FloatTensor]] = None,
324+
inputs_embeds: Optional[torch.FloatTensor] = None,
325+
labels: Optional[torch.LongTensor] = None,
326+
use_cache: Optional[bool] = None,
327+
output_attentions: Optional[bool] = None,
328+
output_hidden_states: Optional[bool] = None,
329+
images: Optional[torch.FloatTensor] = None,
330+
image_sizes: Optional[List[List[int]]] = None,
331+
return_dict: Optional[bool] = None,
332+
cache_position: Optional[torch.LongTensor] = None,
333+
):
334+
"""
335+
An adapter to the llava_llama.forward(), making it compatible with latest HF interface.
336+
"""
337+
# Do not pass 'cache_position' down to forward() as this old third-party llava can not recongize it.
338+
return self.model.forward(
339+
input_ids=input_ids,
340+
attention_mask=attention_mask,
341+
position_ids=position_ids,
342+
past_key_values=past_key_values,
343+
inputs_embeds=inputs_embeds,
344+
labels=labels,
345+
use_cache=use_cache,
346+
output_attentions=output_attentions,
347+
output_hidden_states=output_hidden_states,
348+
return_dict=return_dict,
349+
)
350+
351+
@torch.no_grad()
352+
def generate(
353+
self,
354+
inputs: Optional[torch.Tensor] = None,
355+
images: Optional[torch.Tensor] = None,
356+
image_sizes: Optional[torch.Tensor] = None,
357+
**kwargs,
358+
):
359+
"""
360+
A adapter to the llava_llama.generate(), make it compatible with latest HF interface.
361+
"""
362+
return self.model.generate(
363+
position_ids=position_ids,
364+
attention_mask=attention_mask,
365+
inputs_embeds=inputs_embeds,
366+
**kwargs,
367+
)
368+
318369
def get_eager_model(self):
319370
model = Llava(self.model, self.image_processor, self.config)
320371
model.to(dtype=torch.float32)

examples/models/llava/test/test_llava.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def test_generated_output(self):
3636
# source of truth, using HF llava
3737
preprocessed = self.llava.image_preprocess(self.resized)
3838
with torch.inference_mode():
39-
output_ids = self.llava_model.model.generate(
39+
output_ids = self.llava_model.generate(
4040
self.llava_model.input_ids,
4141
images=preprocessed,
4242
image_sizes=[preprocessed.size],

0 commit comments

Comments
 (0)