Skip to content

Commit 259469c

Browse files
authored
Move GLM4 f32 attention fix to the correct function (#13750)
1 parent 4c32832 commit 259469c

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

src/llama-graph.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1287,6 +1287,10 @@ ggml_tensor * llm_graph_context::build_attn(
12871287

12881288
if (wo) {
12891289
cur = build_lora_mm(wo, cur);
1290+
if (arch == LLM_ARCH_GLM4) {
1291+
// GLM4 seems to have numerical issues with half-precision accumulators
1292+
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
1293+
}
12901294
}
12911295

12921296
if (wo_b) {
@@ -1367,10 +1371,6 @@ ggml_tensor * llm_graph_context::build_attn(
13671371

13681372
if (wo) {
13691373
cur = build_lora_mm(wo, cur);
1370-
if (arch == LLM_ARCH_GLM4) {
1371-
// GLM4 seems to have numerical issues with half-precision accumulators
1372-
ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
1373-
}
13741374
}
13751375

13761376
if (wo_b) {

0 commit comments

Comments
 (0)