We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 969e6c9 commit e1d2d8fCopy full SHA for e1d2d8f
examples/llava/clip.cpp
@@ -2988,7 +2988,7 @@ size_t get_max_image_grid_pinpoints() {
2988
}
2989
2990
// Determine the number of encoder layers to iterate over
2991
-CLIP_API int get_deepest_feature_layer(const struct clip_ctx * ctx) {
+int get_deepest_feature_layer(const struct clip_ctx * ctx) {
2992
// Get the index of the second to last layer; this is the
2993
// default for models that have a llava projector
2994
int n_layer = ctx->vision_model.hparams.n_layer - 1;
0 commit comments