Skip to content

Commit 0e44ef1

Browse files
danbevNeoZhangJianyu
authored andcommitted
clip : suppress unused variable warnings (ggml-org#8105)
* clip : suppress unused variable warnings This commit suppresses unused variable warnings for the variables e in the catch blocks. The motivation for this change is to suppress the warnings that are generated on Windows when using the MSVC compiler. The warnings are not displayed when using GCC because GCC will mark all catch parameters as used. Signed-off-by: Daniel Bevenius <[email protected]> * squash! clip : suppress unused variable warnings Remove e (/*e*/) instead instead of using GGML_UNUSED. --------- Signed-off-by: Daniel Bevenius <[email protected]>
1 parent 7dad15c commit 0e44ef1

File tree

1 file changed

+13
-13
lines changed

1 file changed

+13
-13
lines changed

examples/llava/clip.cpp

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1121,20 +1121,20 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
11211121
}
11221122
if (n < 32)
11231123
hparams.image_grid_pinpoints[n] = 0;
1124-
} catch (std::runtime_error & e) {
1124+
} catch (std::runtime_error & /*e*/) {
11251125
hparams.image_grid_pinpoints[0]=0;
11261126
}
11271127

11281128
try {
11291129
int idx = get_key_idx(ctx, KEY_MM_PATCH_MERGE_TYPE);
11301130
strcpy(hparams.mm_patch_merge_type, gguf_get_val_str(ctx, idx));
1131-
} catch (std::runtime_error & e) {
1131+
} catch (std::runtime_error & /*e*/) {
11321132
strcpy(hparams.mm_patch_merge_type, "flat");
11331133
}
11341134

11351135
try {
11361136
hparams.image_crop_resolution = get_u32(ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6
1137-
} catch(const std::exception& e) {
1137+
} catch(const std::exception& /*e*/) {
11381138
hparams.image_crop_resolution = hparams.image_size;
11391139
}
11401140

@@ -1173,37 +1173,37 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
11731173
try {
11741174
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
11751175
new_clip->has_class_embedding = true;
1176-
} catch (const std::exception& e) {
1176+
} catch (const std::exception& /*e*/) {
11771177
new_clip->has_class_embedding = false;
11781178
}
11791179

11801180
try {
11811181
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
11821182
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
11831183
new_clip->has_pre_norm = true;
1184-
} catch (std::exception & e) {
1184+
} catch (std::exception & /*e*/) {
11851185
new_clip->has_pre_norm = false;
11861186
}
11871187

11881188
try {
11891189
vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
11901190
vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
11911191
new_clip->has_post_norm = true;
1192-
} catch (std::exception & e) {
1192+
} catch (std::exception & /*e*/) {
11931193
new_clip->has_post_norm = false;
11941194
}
11951195

11961196
try {
11971197
vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
11981198
new_clip->has_patch_bias = true;
1199-
} catch (std::exception & e) {
1199+
} catch (std::exception & /*e*/) {
12001200
new_clip->has_patch_bias = false;
12011201
}
12021202

12031203
try {
12041204
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
12051205
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
1206-
} catch(const std::exception& e) {
1206+
} catch(const std::exception& /*e*/) {
12071207
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
12081208
}
12091209

@@ -1215,26 +1215,26 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
12151215
// Yi-type llava
12161216
vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "weight"));
12171217
vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "bias"));
1218-
} catch (std::runtime_error & e) { }
1218+
} catch (std::runtime_error & /*e*/) { }
12191219
try {
12201220
// missing in Yi-type llava
12211221
vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight"));
12221222
vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias"));
1223-
} catch (std::runtime_error & e) { }
1223+
} catch (std::runtime_error & /*e*/) { }
12241224
try {
12251225
// Yi-type llava
12261226
vision_model.mm_3_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "weight"));
12271227
vision_model.mm_3_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "bias"));
1228-
} catch (std::runtime_error & e) { }
1228+
} catch (std::runtime_error & /*e*/) { }
12291229
try {
12301230
// Yi-type llava
12311231
vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight"));
12321232
vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias"));
1233-
} catch (std::runtime_error & e) { }
1233+
} catch (std::runtime_error & /*e*/) { }
12341234
try {
12351235
vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE);
12361236
// LOG_TEE("%s: image_newline tensor (llava-1.6) found\n", __func__);
1237-
} catch (std::runtime_error & e) { }
1237+
} catch (std::runtime_error & /*e*/) { }
12381238
} else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
12391239
// MobileVLM projection
12401240
vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));

0 commit comments

Comments
 (0)