|
2 | 2 |
|
3 | 3 | #include "llama.h"
|
4 | 4 |
|
| 5 | +#include <cinttypes> |
5 | 6 | #include <climits>
|
6 | 7 | #include <cstdarg>
|
7 | 8 | #include <vector>
|
| 9 | +#include <sstream> |
8 | 10 |
|
9 | 11 | struct llama_logger_state {
|
10 | 12 | ggml_log_callback log_callback = llama_log_callback_default;
|
@@ -89,3 +91,75 @@ std::string format(const char * fmt, ...) {
|
89 | 91 | va_end(ap);
|
90 | 92 | return std::string(buf.data(), size);
|
91 | 93 | }
|
| 94 | + |
| 95 | +std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) { |
| 96 | + char buf[256]; |
| 97 | + snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0)); |
| 98 | + for (size_t i = 1; i < ne.size(); i++) { |
| 99 | + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i)); |
| 100 | + } |
| 101 | + return buf; |
| 102 | +} |
| 103 | + |
| 104 | +std::string llama_format_tensor_shape(const struct ggml_tensor * t) { |
| 105 | + char buf[256]; |
| 106 | + snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]); |
| 107 | + for (int i = 1; i < GGML_MAX_DIMS; i++) { |
| 108 | + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]); |
| 109 | + } |
| 110 | + return buf; |
| 111 | +} |
| 112 | + |
| 113 | +static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) { |
| 114 | + switch (type) { |
| 115 | + case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]); |
| 116 | + case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]); |
| 117 | + case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]); |
| 118 | + case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]); |
| 119 | + case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]); |
| 120 | + case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]); |
| 121 | + case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]); |
| 122 | + case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]); |
| 123 | + case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]); |
| 124 | + case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]); |
| 125 | + case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false"; |
| 126 | + default: return format("unknown type %d", type); |
| 127 | + } |
| 128 | +} |
| 129 | + |
| 130 | +std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) { |
| 131 | + const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); |
| 132 | + |
| 133 | + switch (type) { |
| 134 | + case GGUF_TYPE_STRING: |
| 135 | + return gguf_get_val_str(ctx_gguf, i); |
| 136 | + case GGUF_TYPE_ARRAY: |
| 137 | + { |
| 138 | + const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i); |
| 139 | + int arr_n = gguf_get_arr_n(ctx_gguf, i); |
| 140 | + const void * data = gguf_get_arr_data(ctx_gguf, i); |
| 141 | + std::stringstream ss; |
| 142 | + ss << "["; |
| 143 | + for (int j = 0; j < arr_n; j++) { |
| 144 | + if (arr_type == GGUF_TYPE_STRING) { |
| 145 | + std::string val = gguf_get_arr_str(ctx_gguf, i, j); |
| 146 | + // escape quotes |
| 147 | + replace_all(val, "\\", "\\\\"); |
| 148 | + replace_all(val, "\"", "\\\""); |
| 149 | + ss << '"' << val << '"'; |
| 150 | + } else if (arr_type == GGUF_TYPE_ARRAY) { |
| 151 | + ss << "???"; |
| 152 | + } else { |
| 153 | + ss << gguf_data_to_str(arr_type, data, j); |
| 154 | + } |
| 155 | + if (j < arr_n - 1) { |
| 156 | + ss << ", "; |
| 157 | + } |
| 158 | + } |
| 159 | + ss << "]"; |
| 160 | + return ss.str(); |
| 161 | + } |
| 162 | + default: |
| 163 | + return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0); |
| 164 | + } |
| 165 | +} |
0 commit comments