@@ -1121,20 +1121,20 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
1121
1121
}
1122
1122
if (n < 32 )
1123
1123
hparams.image_grid_pinpoints [n] = 0 ;
1124
- } catch (std::runtime_error & e ) {
1124
+ } catch (std::runtime_error & /* e */ ) {
1125
1125
hparams.image_grid_pinpoints [0 ]=0 ;
1126
1126
}
1127
1127
1128
1128
try {
1129
1129
int idx = get_key_idx (ctx, KEY_MM_PATCH_MERGE_TYPE);
1130
1130
strcpy (hparams.mm_patch_merge_type , gguf_get_val_str (ctx, idx));
1131
- } catch (std::runtime_error & e ) {
1131
+ } catch (std::runtime_error & /* e */ ) {
1132
1132
strcpy (hparams.mm_patch_merge_type , " flat" );
1133
1133
}
1134
1134
1135
1135
try {
1136
1136
hparams.image_crop_resolution = get_u32 (ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6
1137
- } catch (const std::exception& e ) {
1137
+ } catch (const std::exception& /* e */ ) {
1138
1138
hparams.image_crop_resolution = hparams.image_size ;
1139
1139
}
1140
1140
@@ -1173,37 +1173,37 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
1173
1173
try {
1174
1174
vision_model.class_embedding = get_tensor (new_clip->ctx_data , TN_CLASS_EMBD);
1175
1175
new_clip->has_class_embedding = true ;
1176
- } catch (const std::exception& e ) {
1176
+ } catch (const std::exception& /* e */ ) {
1177
1177
new_clip->has_class_embedding = false ;
1178
1178
}
1179
1179
1180
1180
try {
1181
1181
vision_model.pre_ln_w = get_tensor (new_clip->ctx_data , format (TN_LN_PRE, " v" , " weight" ));
1182
1182
vision_model.pre_ln_b = get_tensor (new_clip->ctx_data , format (TN_LN_PRE, " v" , " bias" ));
1183
1183
new_clip->has_pre_norm = true ;
1184
- } catch (std::exception & e ) {
1184
+ } catch (std::exception & /* e */ ) {
1185
1185
new_clip->has_pre_norm = false ;
1186
1186
}
1187
1187
1188
1188
try {
1189
1189
vision_model.post_ln_w = get_tensor (new_clip->ctx_data , format (TN_LN_POST, " v" , " weight" ));
1190
1190
vision_model.post_ln_b = get_tensor (new_clip->ctx_data , format (TN_LN_POST, " v" , " bias" ));
1191
1191
new_clip->has_post_norm = true ;
1192
- } catch (std::exception & e ) {
1192
+ } catch (std::exception & /* e */ ) {
1193
1193
new_clip->has_post_norm = false ;
1194
1194
}
1195
1195
1196
1196
try {
1197
1197
vision_model.patch_bias = get_tensor (new_clip->ctx_data , TN_PATCH_BIAS);
1198
1198
new_clip->has_patch_bias = true ;
1199
- } catch (std::exception & e ) {
1199
+ } catch (std::exception & /* e */ ) {
1200
1200
new_clip->has_patch_bias = false ;
1201
1201
}
1202
1202
1203
1203
try {
1204
1204
vision_model.patch_embeddings = get_tensor (new_clip->ctx_data , TN_PATCH_EMBD);
1205
1205
vision_model.position_embeddings = get_tensor (new_clip->ctx_data , format (TN_POS_EMBD, " v" ));
1206
- } catch (const std::exception& e ) {
1206
+ } catch (const std::exception& /* e */ ) {
1207
1207
LOG_TEE (" %s: failed to load vision model tensors\n " , __func__);
1208
1208
}
1209
1209
@@ -1215,26 +1215,26 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
1215
1215
// Yi-type llava
1216
1216
vision_model.mm_1_w = get_tensor (new_clip->ctx_data , format (TN_LLAVA_PROJ, 1 , " weight" ));
1217
1217
vision_model.mm_1_b = get_tensor (new_clip->ctx_data , format (TN_LLAVA_PROJ, 1 , " bias" ));
1218
- } catch (std::runtime_error & e ) { }
1218
+ } catch (std::runtime_error & /* e */ ) { }
1219
1219
try {
1220
1220
// missing in Yi-type llava
1221
1221
vision_model.mm_2_w = get_tensor (new_clip->ctx_data , format (TN_LLAVA_PROJ, 2 , " weight" ));
1222
1222
vision_model.mm_2_b = get_tensor (new_clip->ctx_data , format (TN_LLAVA_PROJ, 2 , " bias" ));
1223
- } catch (std::runtime_error & e ) { }
1223
+ } catch (std::runtime_error & /* e */ ) { }
1224
1224
try {
1225
1225
// Yi-type llava
1226
1226
vision_model.mm_3_w = get_tensor (new_clip->ctx_data , format (TN_LLAVA_PROJ, 3 , " weight" ));
1227
1227
vision_model.mm_3_b = get_tensor (new_clip->ctx_data , format (TN_LLAVA_PROJ, 3 , " bias" ));
1228
- } catch (std::runtime_error & e ) { }
1228
+ } catch (std::runtime_error & /* e */ ) { }
1229
1229
try {
1230
1230
// Yi-type llava
1231
1231
vision_model.mm_4_w = get_tensor (new_clip->ctx_data , format (TN_LLAVA_PROJ, 4 , " weight" ));
1232
1232
vision_model.mm_4_b = get_tensor (new_clip->ctx_data , format (TN_LLAVA_PROJ, 4 , " bias" ));
1233
- } catch (std::runtime_error & e ) { }
1233
+ } catch (std::runtime_error & /* e */ ) { }
1234
1234
try {
1235
1235
vision_model.image_newline = get_tensor (new_clip->ctx_data , TN_IMAGE_NEWLINE);
1236
1236
// LOG_TEE("%s: image_newline tensor (llava-1.6) found\n", __func__);
1237
- } catch (std::runtime_error & e ) { }
1237
+ } catch (std::runtime_error & /* e */ ) { }
1238
1238
} else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
1239
1239
// MobileVLM projection
1240
1240
vision_model.mm_model_mlp_1_w = get_tensor (new_clip->ctx_data , format (TN_MVLM_PROJ_MLP, 1 , " weight" ));
0 commit comments