Skip to content

Commit 5f37be3

Browse files
committed
feat(llama.cp logging): Add some helpful logs when tensors are missing
NOTE: This also adds the LLAMA_LOG_DEBUG macro for use in llama.cpp. This may have been omitted for a good reason! If so, this change is certainly optional. Branch: GraniteMoE Signed-off-by: Gabe Goodhart <[email protected]>
1 parent 3219f58 commit 5f37be3

File tree

2 files changed

+6
-0
lines changed

2 files changed

+6
-0
lines changed

src/llama-impl.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ LLAMA_ATTRIBUTE_FORMAT(2, 3)
2424
void llama_log_internal (ggml_log_level level, const char * format, ...);
2525
void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
2626

27+
#define LLAMA_LOG_DEBUG(...) llama_log_internal(GGML_LOG_LEVEL_DEBUG , __VA_ARGS__)
2728
#define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
2829
#define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
2930
#define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)

src/llama.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1484,34 +1484,39 @@ struct LLM_TN {
14841484

14851485
std::string operator()(llm_tensor tensor) const {
14861486
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
1487+
LLAMA_LOG_DEBUG("%s: Missing tensor %d for arch %s\n", __func__, tensor, LLM_ARCH_NAMES.at(arch));
14871488
return "__missing__";
14881489
}
14891490
return LLM_TENSOR_NAMES.at(arch).at(tensor);
14901491
}
14911492

14921493
std::string operator()(llm_tensor tensor, const std::string & suffix) const {
14931494
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
1495+
LLAMA_LOG_DEBUG("%s(%s): Missing tensor %d for arch %s\n", __func__, suffix.c_str(), tensor, LLM_ARCH_NAMES.at(arch));
14941496
return "__missing__";
14951497
}
14961498
return LLM_TENSOR_NAMES.at(arch).at(tensor) + "." + suffix;
14971499
}
14981500

14991501
std::string operator()(llm_tensor tensor, int bid) const {
15001502
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
1503+
LLAMA_LOG_DEBUG("%s(%d): Missing tensor %d for arch %s\n", __func__, bid, tensor, LLM_ARCH_NAMES.at(arch));
15011504
return "__missing__";
15021505
}
15031506
return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid);
15041507
}
15051508

15061509
std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
15071510
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
1511+
LLAMA_LOG_DEBUG("%s(%s, %d): Missing tensor %d for arch %s\n", __func__, suffix.c_str(), bid, tensor, LLM_ARCH_NAMES.at(arch));
15081512
return "__missing__";
15091513
}
15101514
return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid) + "." + suffix;
15111515
}
15121516

15131517
std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
15141518
if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
1519+
LLAMA_LOG_DEBUG("%s(%s, %d, %d): Missing tensor %d for arch %s\n", __func__, suffix.c_str(), bid, xid, tensor, LLM_ARCH_NAMES.at(arch));
15151520
return "__missing__";
15161521
}
15171522
return ::format(LLM_TENSOR_NAMES.at(arch).at(tensor).c_str(), bid, xid) + "." + suffix;

0 commit comments

Comments
 (0)