Skip to content

Commit 29b9b2f

Browse files
committed
fix indent
1 parent 8079eb1 commit 29b9b2f

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7700,7 +7700,7 @@ static bool llm_load_tensors(
77007700
layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
77017701
layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
77027702
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
7703-
}
7703+
}
77047704

77057705
// optional bias tensors
77067706
layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
@@ -10816,7 +10816,7 @@ struct llm_build_context {
1081610816
model.layers[il].attn_norm, NULL,
1081710817
LLM_NORM_RMS, cb, il);
1081810818
cb(cur, "attn_norm", il);
10819-
}
10819+
}
1082010820

1082110821
if (n_head > 0 && n_head_kv == 0) { // "linear attention" of Llama-3_1-Nemotron-51B
1082210822
cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur);

0 commit comments

Comments
 (0)