/src/llama.cpp/src/llama-impl.cpp
Line | Count | Source |
1 | | #include "llama-impl.h" |
2 | | |
3 | | #include "gguf.h" |
4 | | #include "llama.h" |
5 | | |
6 | | #include <cinttypes> |
7 | | #include <climits> |
8 | | #include <cstdarg> |
9 | | #include <cstring> |
10 | | #include <vector> |
11 | | #include <sstream> |
12 | | |
13 | | struct llama_logger_state { |
14 | | ggml_log_callback log_callback = llama_log_callback_default; |
15 | | void * log_callback_user_data = nullptr; |
16 | | }; |
17 | | |
18 | | static llama_logger_state g_logger_state; |
19 | | |
20 | 347 | time_meas::time_meas(int64_t & t_acc, bool disable) : t_start_us(disable ? -1 : ggml_time_us()), t_acc(t_acc) {} |
21 | | |
22 | 338 | time_meas::~time_meas() { |
23 | 338 | if (t_start_us >= 0) { |
24 | 338 | t_acc += ggml_time_us() - t_start_us; |
25 | 338 | } |
26 | 338 | } |
27 | | |
28 | 0 | void llama_log_get(ggml_log_callback * log_callback, void ** user_data) { |
29 | 0 | ggml_log_get(log_callback, user_data); |
30 | 0 | } |
31 | | |
32 | 0 | void llama_log_set(ggml_log_callback log_callback, void * user_data) { |
33 | 0 | ggml_log_set(log_callback, user_data); |
34 | 0 | g_logger_state.log_callback = log_callback ? log_callback : llama_log_callback_default; |
35 | 0 | g_logger_state.log_callback_user_data = user_data; |
36 | 0 | } |
37 | | |
38 | 2.00k | static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) { |
39 | 2.00k | va_list args_copy; |
40 | 2.00k | va_copy(args_copy, args); |
41 | 2.00k | char buffer[128]; |
42 | 2.00k | int len = vsnprintf(buffer, 128, format, args); |
43 | 2.00k | if (len < 128) { |
44 | 1.97k | g_logger_state.log_callback(level, buffer, g_logger_state.log_callback_user_data); |
45 | 1.97k | } else { |
46 | 32 | char * buffer2 = new char[len + 1]; |
47 | 32 | vsnprintf(buffer2, len + 1, format, args_copy); |
48 | 32 | buffer2[len] = 0; |
49 | 32 | g_logger_state.log_callback(level, buffer2, g_logger_state.log_callback_user_data); |
50 | 32 | delete[] buffer2; |
51 | 32 | } |
52 | 2.00k | va_end(args_copy); |
53 | 2.00k | } |
54 | | |
55 | 2.00k | void llama_log_internal(ggml_log_level level, const char * format, ...) { |
56 | 2.00k | va_list args; |
57 | 2.00k | va_start(args, format); |
58 | 2.00k | llama_log_internal_v(level, format, args); |
59 | 2.00k | va_end(args); |
60 | 2.00k | } |
61 | | |
62 | 2.00k | void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) { |
63 | 2.00k | (void) level; |
64 | 2.00k | (void) user_data; |
65 | 2.00k | fputs(text, stderr); |
66 | 2.00k | fflush(stderr); |
67 | 2.00k | } |
68 | | |
69 | 1.19k | void replace_all(std::string & s, const std::string & search, const std::string & replace) { |
70 | 1.19k | if (search.empty()) { |
71 | 0 | return; |
72 | 0 | } |
73 | 1.19k | std::string builder; |
74 | 1.19k | builder.reserve(s.length()); |
75 | 1.19k | size_t pos = 0; |
76 | 1.19k | size_t last_pos = 0; |
77 | 1.29k | while ((pos = s.find(search, last_pos)) != std::string::npos) { |
78 | 92 | builder.append(s, last_pos, pos - last_pos); |
79 | 92 | builder.append(replace); |
80 | 92 | last_pos = pos + search.length(); |
81 | 92 | } |
82 | 1.19k | builder.append(s, last_pos, std::string::npos); |
83 | 1.19k | s = std::move(builder); |
84 | 1.19k | } |
85 | | |
86 | 802 | std::string format(const char * fmt, ...) { |
87 | 802 | va_list ap; |
88 | 802 | va_list ap2; |
89 | 802 | va_start(ap, fmt); |
90 | 802 | va_copy(ap2, ap); |
91 | 802 | int size = vsnprintf(NULL, 0, fmt, ap); |
92 | 802 | GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT |
93 | 802 | std::vector<char> buf(size + 1); |
94 | 802 | int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); |
95 | 802 | GGML_ASSERT(size2 == size); |
96 | 802 | va_end(ap2); |
97 | 802 | va_end(ap); |
98 | 802 | return std::string(buf.data(), size); |
99 | 802 | } |
100 | | |
101 | 0 | std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) { |
102 | 0 | char buf[256]; |
103 | 0 | snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0)); |
104 | 0 | for (size_t i = 1; i < ne.size(); i++) { |
105 | 0 | snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i)); |
106 | 0 | } |
107 | 0 | return buf; |
108 | 0 | } |
109 | | |
110 | 0 | std::string llama_format_tensor_shape(const struct ggml_tensor * t) { |
111 | 0 | char buf[256]; |
112 | 0 | snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]); |
113 | 0 | for (int i = 1; i < GGML_MAX_DIMS; i++) { |
114 | 0 | snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]); |
115 | 0 | } |
116 | 0 | return buf; |
117 | 0 | } |
118 | | |
119 | 2.79k | static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) { |
120 | 2.79k | switch (type) { |
121 | 70 | case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]); |
122 | 54 | case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]); |
123 | 54 | case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]); |
124 | 276 | case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]); |
125 | 58 | case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]); |
126 | 58 | case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]); |
127 | 440 | case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]); |
128 | 733 | case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]); |
129 | 699 | case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]); |
130 | 294 | case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]); |
131 | 56 | case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false"; |
132 | 0 | default: return format("unknown type %d", type); |
133 | 2.79k | } |
134 | 2.79k | } |
135 | | |
136 | 866 | std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) { |
137 | 866 | const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); |
138 | | |
139 | 866 | switch (type) { |
140 | 58 | case GGUF_TYPE_STRING: |
141 | 58 | return gguf_get_val_str(ctx_gguf, i); |
142 | 178 | case GGUF_TYPE_ARRAY: |
143 | 178 | { |
144 | 178 | const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i); |
145 | 178 | int arr_n = gguf_get_arr_n(ctx_gguf, i); |
146 | 178 | const void * data = arr_type == GGUF_TYPE_STRING ? nullptr : gguf_get_arr_data(ctx_gguf, i); |
147 | 178 | std::stringstream ss; |
148 | 178 | ss << "["; |
149 | 2.50k | for (int j = 0; j < arr_n; j++) { |
150 | 2.33k | if (arr_type == GGUF_TYPE_STRING) { |
151 | 168 | std::string val = gguf_get_arr_str(ctx_gguf, i, j); |
152 | | // escape quotes |
153 | 168 | replace_all(val, "\\", "\\\\"); |
154 | 168 | replace_all(val, "\"", "\\\""); |
155 | 168 | ss << '"' << val << '"'; |
156 | 2.16k | } else if (arr_type == GGUF_TYPE_ARRAY) { |
157 | 0 | ss << "???"; |
158 | 2.16k | } else { |
159 | 2.16k | ss << gguf_data_to_str(arr_type, data, j); |
160 | 2.16k | } |
161 | 2.33k | if (j < arr_n - 1) { |
162 | 2.15k | ss << ", "; |
163 | 2.15k | } |
164 | 2.33k | } |
165 | 178 | ss << "]"; |
166 | 178 | return ss.str(); |
167 | 0 | } |
168 | 630 | default: |
169 | 630 | return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0); |
170 | 866 | } |
171 | 866 | } |