Skip to content

Commit 26ef43d

Browse files
ggerganovhodlen
authored andcommitted
Revert "llava : add a MobileVLM_V2-1.7B backup (ggml-org#6152)"
This reverts commit f8c4e74.
1 parent 2213ba0 commit 26ef43d

File tree

3 files changed

+6
-59
lines changed

3 files changed

+6
-59
lines changed

examples/llava/MobileVLM-README.md

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,11 @@
11
# MobileVLM
22

3-
Currently this implementation supports [MobileVLM-1.7B](https://huggingface.co/mtgv/MobileVLM-1.7B) / [MobileVLM_V2-1.7B](https://huggingface.co/mtgv/MobileVLM_V2-1.7B) variants.
3+
Currently this implementation supports [MobileVLM-v1.7](https://huggingface.co/mtgv/MobileVLM-1.7B) variants.
44

55
for more information, please go to [Meituan-AutoML/MobileVLM](https://github.com/Meituan-AutoML/MobileVLM)
66

77
The implementation is based on llava, and is compatible with llava and mobileVLM. The usage is basically same as llava.
88

9-
Notice: The overall process of model inference for both **MobilVLM** and **MobilVLM_V2** models is the same, but the process of model conversion is a little different. Therefore, using MobiVLM as an example, the different conversion step will be shown.
10-
119
## Usage
1210
Build with cmake or run `make llava-cli` to build it.
1311

@@ -36,7 +34,7 @@ git clone https://huggingface.co/openai/clip-vit-large-patch14-336
3634
python ./examples/llava/llava-surgery.py -m path/to/MobileVLM-1.7B
3735
```
3836

39-
3. Use `convert-image-encoder-to-gguf.py` with `--projector-type ldp` (for **V2** the arg is `--projector-type ldpv2`) to convert the LLaVA image encoder to GGUF:
37+
3. Use `convert-image-encoder-to-gguf.py` with `--projector-type ldp` to convert the LLaVA image encoder to GGUF:
4038

4139
```sh
4240
python ./examples/llava/convert-image-encoder-to-gguf \
@@ -46,14 +44,6 @@ python ./examples/llava/convert-image-encoder-to-gguf \
4644
--projector-type ldp
4745
```
4846

49-
```sh
50-
python ./examples/llava/convert-image-encoder-to-gguf \
51-
-m path/to/clip-vit-large-patch14-336 \
52-
--llava-projector path/to/MobileVLM-1.7B_V2/llava.projector \
53-
--output-dir path/to/MobileVLM-1.7B_V2 \
54-
--projector-type ldpv2
55-
```
56-
5747
4. Use `convert.py` to convert the LLaMA part of LLaVA to GGUF:
5848

5949
```sh

examples/llava/clip.cpp

Lines changed: 1 addition & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -119,22 +119,19 @@ static std::string format(const char * fmt, ...) {
119119
#define TN_LLAVA_PROJ "mm.%d.%s"
120120
#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s"
121121
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
122-
#define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s"
123122
#define TN_IMAGE_NEWLINE "model.image_newline"
124123

125124

126125
enum projector_type {
127126
PROJECTOR_TYPE_MLP,
128127
PROJECTOR_TYPE_MLP_NORM,
129128
PROJECTOR_TYPE_LDP,
130-
PROJECTOR_TYPE_LDPV2,
131129
PROJECTOR_TYPE_UNKNOWN,
132130
};
133131

134132
static std::map<projector_type, std::string> PROJECTOR_TYPE_NAMES = {
135133
{ PROJECTOR_TYPE_MLP, "mlp" },
136134
{ PROJECTOR_TYPE_LDP, "ldp" },
137-
{ PROJECTOR_TYPE_LDPV2, "ldpv2"},
138135
};
139136

140137

@@ -810,29 +807,6 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
810807
}
811808
embeddings = block_1;
812809
}
813-
else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2)
814-
{
815-
int n_patch = 24;
816-
struct ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
817-
mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b);
818-
mlp_0 = ggml_gelu(ctx0, mlp_0);
819-
struct ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0);
820-
mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b);
821-
// mlp_2 ne = [2048, 576, 1, 1]
822-
// // AVG Pool Layer 2*2, strides = 2
823-
mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3));
824-
// mlp_2 ne = [576, 2048, 1, 1]
825-
mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]);
826-
// mlp_2 ne [24, 24, 2048, 1]
827-
mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0);
828-
// weight ne = [3, 3, 2048, 1]
829-
struct ggml_tensor * peg_0 = ggml_conv_depthwise_2d(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1);
830-
peg_0 = ggml_add(ctx0, peg_0, mlp_2);
831-
peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3));
832-
peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b);
833-
peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]);
834-
embeddings = peg_0;
835-
}
836810
else {
837811
GGML_ASSERT(false);
838812
}
@@ -1203,18 +1177,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
12031177
vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
12041178
vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
12051179
vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
1206-
}
1207-
else if (new_clip->proj_type == PROJECTOR_TYPE_LDPV2)
1208-
{
1209-
// MobilVLM_V2 projection
1210-
vision_model.mm_model_mlp_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "weight"));
1211-
vision_model.mm_model_mlp_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "bias"));
1212-
vision_model.mm_model_mlp_2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "weight"));
1213-
vision_model.mm_model_mlp_2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "bias"));
1214-
vision_model.mm_model_peg_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "weight"));
1215-
vision_model.mm_model_peg_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "bias"));
1216-
}
1217-
else {
1180+
} else {
12181181
std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type];
12191182
throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str()));
12201183
}
@@ -2003,9 +1966,6 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
20031966
if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
20041967
return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
20051968
}
2006-
if (ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
2007-
return ctx->vision_model.mm_model_peg_0_b->ne[0];
2008-
}
20091969
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
20101970
return ctx->vision_model.mm_2_b->ne[0];
20111971
}

examples/llava/convert-image-encoder-to-gguf.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import argparse
22
import os
33
import json
4-
import re
54

65
import torch
76
import numpy as np
@@ -39,11 +38,9 @@ def should_skip_tensor(name: str, has_text: bool, has_vision: bool, has_llava: b
3938
def get_tensor_name(name: str) -> str:
4039
if "projection" in name:
4140
return name
41+
4242
if "mm_projector" in name:
43-
name = name.replace("model.mm_projector", "mm")
44-
name = re.sub(r'mm\.mlp\.mlp', 'mm.model.mlp', name, count=1)
45-
name = re.sub(r'mm\.peg\.peg', 'mm.model.peg', name, count=1)
46-
return name
43+
return name.replace("model.mm_projector", "mm")
4744

4845
return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln")
4946

@@ -86,7 +83,7 @@ def bytes_to_unicode():
8683
ap.add_argument("--clip-model-is-openclip", action="store_true", required=False,
8784
help="The clip model is from openclip (for ViT-SO400M type))")
8885
ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
89-
ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp, ldpv2", choices=["mlp", "ldp", "ldpv2"], default="mlp")
86+
ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp", choices=["mlp", "ldp"], default="mlp")
9087
ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
9188
# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
9289
# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5

0 commit comments

Comments
 (0)