Skip to content

Commit c21b99b

Browse files
authored
[Bugfix][VLM] fix llava processor (#15285)
Signed-off-by: Mengqing Cao <[email protected]>
1 parent 93a00d7 commit c21b99b

File tree

1 file changed

+7
-1
lines changed

1 file changed

+7
-1
lines changed

vllm/model_executor/models/llava.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,13 @@ def get_dummy_processor_inputs(
233233
class LlavaProcessingInfo(BaseLlavaProcessingInfo):
234234

235235
def get_hf_processor(self, **kwargs: object):
236-
return self.ctx.get_hf_processor(LlavaProcessor, **kwargs)
236+
hf_processor = self.ctx.get_hf_processor(LlavaProcessor, **kwargs)
237+
# In case patch_size is omitted from `processor_config.json`
238+
# e.g. for E5-V: https://huggingface.co/royokong/e5-v
239+
if hf_processor.patch_size is None:
240+
patch_size = self.get_vision_encoder_info().get_patch_size()
241+
hf_processor.patch_size = patch_size
242+
return hf_processor
237243

238244

239245
class BaseLlavaMultiModalProcessor(BaseMultiModalProcessor[_I]):

0 commit comments

Comments
 (0)