We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 93a00d7 commit c21b99bCopy full SHA for c21b99b
vllm/model_executor/models/llava.py
@@ -233,7 +233,13 @@ def get_dummy_processor_inputs(
233
class LlavaProcessingInfo(BaseLlavaProcessingInfo):
234
235
def get_hf_processor(self, **kwargs: object):
236
- return self.ctx.get_hf_processor(LlavaProcessor, **kwargs)
+ hf_processor = self.ctx.get_hf_processor(LlavaProcessor, **kwargs)
237
+ # In case patch_size is omitted from `processor_config.json`
238
+ # e.g. for E5-V: https://huggingface.co/royokong/e5-v
239
+ if hf_processor.patch_size is None:
240
+ patch_size = self.get_vision_encoder_info().get_patch_size()
241
+ hf_processor.patch_size = patch_size
242
+ return hf_processor
243
244
245
class BaseLlavaMultiModalProcessor(BaseMultiModalProcessor[_I]):
0 commit comments