Skip to content

Commit 156dbcb

Browse files
ggerganovNeo Zhang
authored andcommitted
llama : fix compile warning (ggml-org#8304)
1 parent 70c494d commit 156dbcb

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7262,7 +7262,7 @@ static bool llm_load_tensors(
72627262

72637263
layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
72647264

7265-
if (i < hparams.n_layer_dense_lead) {
7265+
if (i < (int) hparams.n_layer_dense_lead) {
72667266
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
72677267
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
72687268
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});

0 commit comments

Comments
 (0)