/src/llama.cpp/common/common.cpp
Line | Count | Source |
1 | | #if defined(_MSC_VER) |
2 | | #define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING |
3 | | #endif |
4 | | |
5 | | #include "ggml.h" |
6 | | #include "gguf.h" |
7 | | |
8 | | #include "common.h" |
9 | | #include "log.h" |
10 | | #include "llama.h" |
11 | | |
12 | | #include <algorithm> |
13 | | #include <cinttypes> |
14 | | #include <climits> |
15 | | #include <cmath> |
16 | | #include <codecvt> |
17 | | #include <chrono> |
18 | | #include <cstdarg> |
19 | | #include <cstring> |
20 | | #include <ctime> |
21 | | #include <filesystem> |
22 | | #include <fstream> |
23 | | #include <iostream> |
24 | | #include <iterator> |
25 | | #include <regex> |
26 | | #include <sstream> |
27 | | #include <string> |
28 | | #include <thread> |
29 | | #include <unordered_set> |
30 | | #include <vector> |
31 | | |
32 | | #if defined(__APPLE__) && defined(__MACH__) |
33 | | #include <sys/types.h> |
34 | | #include <sys/sysctl.h> |
35 | | #endif |
36 | | |
37 | | #if defined(_WIN32) |
38 | | #define WIN32_LEAN_AND_MEAN |
39 | | #ifndef NOMINMAX |
40 | | # define NOMINMAX |
41 | | #endif |
42 | | #include <locale> |
43 | | #include <windows.h> |
44 | | #include <string.h> |
45 | | #include <fcntl.h> |
46 | | #include <io.h> |
47 | | #else |
48 | | #include <sys/ioctl.h> |
49 | | #include <sys/stat.h> |
50 | | #include <unistd.h> |
51 | | #endif |
52 | | |
53 | | #if defined(__linux__) |
54 | | #include <sys/types.h> |
55 | | #include <pwd.h> |
56 | | #endif |
57 | | |
58 | | #if defined(_MSC_VER) |
59 | | #pragma warning(disable: 4244 4267) // possible loss of data |
60 | | #endif |
61 | | |
62 | 0 | common_time_meas::common_time_meas(int64_t & t_acc, bool disable) : t_start_us(disable ? -1 : ggml_time_us()), t_acc(t_acc) {} |
63 | | |
64 | 0 | common_time_meas::~common_time_meas() { |
65 | 0 | if (t_start_us >= 0) { |
66 | 0 | t_acc += ggml_time_us() - t_start_us; |
67 | 0 | } |
68 | 0 | } |
69 | | |
70 | | // |
71 | | // CPU utils |
72 | | // |
73 | | |
74 | 0 | int32_t cpu_get_num_physical_cores() { |
75 | 0 | #ifdef __linux__ |
76 | | // enumerate the set of thread siblings, num entries is num cores |
77 | 0 | std::unordered_set<std::string> siblings; |
78 | 0 | for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) { |
79 | 0 | std::ifstream thread_siblings("/sys/devices/system/cpu/cpu" |
80 | 0 | + std::to_string(cpu) + "/topology/thread_siblings"); |
81 | 0 | if (!thread_siblings.is_open()) { |
82 | 0 | break; // no more cpus |
83 | 0 | } |
84 | 0 | std::string line; |
85 | 0 | if (std::getline(thread_siblings, line)) { |
86 | 0 | siblings.insert(line); |
87 | 0 | } |
88 | 0 | } |
89 | 0 | if (!siblings.empty()) { |
90 | 0 | return static_cast<int32_t>(siblings.size()); |
91 | 0 | } |
92 | | #elif defined(__APPLE__) && defined(__MACH__) |
93 | | int32_t num_physical_cores; |
94 | | size_t len = sizeof(num_physical_cores); |
95 | | int result = sysctlbyname("hw.perflevel0.physicalcpu", &num_physical_cores, &len, NULL, 0); |
96 | | if (result == 0) { |
97 | | return num_physical_cores; |
98 | | } |
99 | | result = sysctlbyname("hw.physicalcpu", &num_physical_cores, &len, NULL, 0); |
100 | | if (result == 0) { |
101 | | return num_physical_cores; |
102 | | } |
103 | | #elif defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later |
104 | | // TODO: windows + arm64 + mingw64 |
105 | | unsigned int n_threads_win = std::thread::hardware_concurrency(); |
106 | | unsigned int default_threads = n_threads_win > 0 ? (n_threads_win <= 4 ? n_threads_win : n_threads_win / 2) : 4; |
107 | | |
108 | | DWORD buffer_size = 0; |
109 | | if (!GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &buffer_size)) { |
110 | | if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) { |
111 | | return default_threads; |
112 | | } |
113 | | } |
114 | | |
115 | | std::vector<char> buffer(buffer_size); |
116 | | if (!GetLogicalProcessorInformationEx(RelationProcessorCore, reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data()), &buffer_size)) { |
117 | | return default_threads; |
118 | | } |
119 | | |
120 | | int32_t num_physical_cores = 0; |
121 | | PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data()); |
122 | | while (buffer_size > 0) { |
123 | | if (info->Relationship == RelationProcessorCore) { |
124 | | num_physical_cores += info->Processor.GroupCount; |
125 | | } |
126 | | buffer_size -= info->Size; |
127 | | info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(reinterpret_cast<char*>(info) + info->Size); |
128 | | } |
129 | | |
130 | | return num_physical_cores > 0 ? num_physical_cores : default_threads; |
131 | | #endif |
132 | 0 | unsigned int n_threads = std::thread::hardware_concurrency(); |
133 | 0 | return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4; |
134 | 0 | } |
135 | | |
136 | | #if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__) |
137 | | #include <pthread.h> |
138 | | |
139 | | static void cpuid(unsigned leaf, unsigned subleaf, |
140 | 0 | unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *edx) { |
141 | 0 | __asm__("movq\t%%rbx,%%rsi\n\t" |
142 | 0 | "cpuid\n\t" |
143 | 0 | "xchgq\t%%rbx,%%rsi" |
144 | 0 | : "=a"(*eax), "=S"(*ebx), "=c"(*ecx), "=d"(*edx) |
145 | 0 | : "0"(leaf), "2"(subleaf)); |
146 | 0 | } |
147 | | |
148 | 0 | static int pin_cpu(int cpu) { |
149 | 0 | cpu_set_t mask; |
150 | 0 | CPU_ZERO(&mask); |
151 | 0 | CPU_SET(cpu, &mask); |
152 | 0 | return pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask); |
153 | 0 | } |
154 | | |
155 | 0 | static bool is_hybrid_cpu(void) { |
156 | 0 | unsigned eax, ebx, ecx, edx; |
157 | 0 | cpuid(7, 0, &eax, &ebx, &ecx, &edx); |
158 | 0 | return !!(edx & (1u << 15)); |
159 | 0 | } |
160 | | |
161 | 0 | static bool is_running_on_efficiency_core(void) { |
162 | 0 | unsigned eax, ebx, ecx, edx; |
163 | 0 | cpuid(0x1a, 0, &eax, &ebx, &ecx, &edx); |
164 | 0 | int intel_atom = 0x20; |
165 | 0 | int core_type = (eax & 0xff000000u) >> 24; |
166 | 0 | return core_type == intel_atom; |
167 | 0 | } |
168 | | |
169 | 0 | static int cpu_count_math_cpus(int n_cpu) { |
170 | 0 | int result = 0; |
171 | 0 | for (int cpu = 0; cpu < n_cpu; ++cpu) { |
172 | 0 | if (pin_cpu(cpu)) { |
173 | 0 | return -1; |
174 | 0 | } |
175 | 0 | if (is_running_on_efficiency_core()) { |
176 | 0 | continue; // efficiency cores harm lockstep threading |
177 | 0 | } |
178 | 0 | ++cpu; // hyperthreading isn't useful for linear algebra |
179 | 0 | ++result; |
180 | 0 | } |
181 | 0 | return result; |
182 | 0 | } |
183 | | |
184 | | #endif // __x86_64__ && __linux__ |
185 | | |
186 | | /** |
187 | | * Returns number of CPUs on system that are useful for math. |
188 | | */ |
189 | 0 | int32_t cpu_get_num_math() { |
190 | 0 | #if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__) |
191 | 0 | int n_cpu = sysconf(_SC_NPROCESSORS_ONLN); |
192 | 0 | if (n_cpu < 1) { |
193 | 0 | return cpu_get_num_physical_cores(); |
194 | 0 | } |
195 | 0 | if (is_hybrid_cpu()) { |
196 | 0 | cpu_set_t affinity; |
197 | 0 | if (!pthread_getaffinity_np(pthread_self(), sizeof(affinity), &affinity)) { |
198 | 0 | int result = cpu_count_math_cpus(n_cpu); |
199 | 0 | pthread_setaffinity_np(pthread_self(), sizeof(affinity), &affinity); |
200 | 0 | if (result > 0) { |
201 | 0 | return result; |
202 | 0 | } |
203 | 0 | } |
204 | 0 | } |
205 | 0 | #endif |
206 | 0 | return cpu_get_num_physical_cores(); |
207 | 0 | } |
208 | | |
209 | | // Helper for setting process priority |
210 | | |
211 | | #if defined(_WIN32) |
212 | | |
213 | | bool set_process_priority(enum ggml_sched_priority prio) { |
214 | | if (prio == GGML_SCHED_PRIO_NORMAL) { |
215 | | return true; |
216 | | } |
217 | | |
218 | | DWORD p = NORMAL_PRIORITY_CLASS; |
219 | | switch (prio) { |
220 | | case GGML_SCHED_PRIO_LOW: p = BELOW_NORMAL_PRIORITY_CLASS; break; |
221 | | case GGML_SCHED_PRIO_NORMAL: p = NORMAL_PRIORITY_CLASS; break; |
222 | | case GGML_SCHED_PRIO_MEDIUM: p = ABOVE_NORMAL_PRIORITY_CLASS; break; |
223 | | case GGML_SCHED_PRIO_HIGH: p = HIGH_PRIORITY_CLASS; break; |
224 | | case GGML_SCHED_PRIO_REALTIME: p = REALTIME_PRIORITY_CLASS; break; |
225 | | } |
226 | | |
227 | | if (!SetPriorityClass(GetCurrentProcess(), p)) { |
228 | | LOG_WRN("failed to set process priority class %d : (%d)\n", prio, (int) GetLastError()); |
229 | | return false; |
230 | | } |
231 | | |
232 | | return true; |
233 | | } |
234 | | |
235 | | #else // MacOS and POSIX |
236 | | #include <sys/types.h> |
237 | | #include <sys/resource.h> |
238 | | |
239 | 0 | bool set_process_priority(enum ggml_sched_priority prio) { |
240 | 0 | if (prio == GGML_SCHED_PRIO_NORMAL) { |
241 | 0 | return true; |
242 | 0 | } |
243 | | |
244 | 0 | int p = 0; |
245 | 0 | switch (prio) { |
246 | 0 | case GGML_SCHED_PRIO_LOW: p = 5; break; |
247 | 0 | case GGML_SCHED_PRIO_NORMAL: p = 0; break; |
248 | 0 | case GGML_SCHED_PRIO_MEDIUM: p = -5; break; |
249 | 0 | case GGML_SCHED_PRIO_HIGH: p = -10; break; |
250 | 0 | case GGML_SCHED_PRIO_REALTIME: p = -20; break; |
251 | 0 | } |
252 | | |
253 | 0 | if (!setpriority(PRIO_PROCESS, 0, p)) { |
254 | 0 | LOG_WRN("failed to set process priority %d : %s (%d)\n", prio, strerror(errno), errno); |
255 | 0 | return false; |
256 | 0 | } |
257 | 0 | return true; |
258 | 0 | } |
259 | | |
260 | | #endif |
261 | | |
262 | | // |
263 | | // CLI argument parsing |
264 | | // |
265 | | |
266 | | |
267 | 0 | void postprocess_cpu_params(cpu_params& cpuparams, const cpu_params* role_model) { |
268 | 0 | int32_t n_set = 0; |
269 | |
|
270 | 0 | if (cpuparams.n_threads < 0) { |
271 | | // Assuming everything about cpuparams is invalid |
272 | 0 | if (role_model != nullptr) { |
273 | 0 | cpuparams = *role_model; |
274 | 0 | } else { |
275 | 0 | cpuparams.n_threads = cpu_get_num_math(); |
276 | 0 | } |
277 | 0 | } |
278 | |
|
279 | 0 | for (int32_t i = 0; i < GGML_MAX_N_THREADS; i++) { |
280 | 0 | if (cpuparams.cpumask[i]) { |
281 | 0 | n_set++; |
282 | 0 | } |
283 | 0 | } |
284 | |
|
285 | 0 | if (n_set && n_set < cpuparams.n_threads) { |
286 | | // Not enough set bits, may experience performance issues. |
287 | 0 | LOG_WRN("Not enough set bits in CPU mask (%d) to satisfy requested thread count: %d\n", n_set, cpuparams.n_threads); |
288 | 0 | } |
289 | 0 | } |
290 | | |
291 | 0 | bool parse_cpu_range(const std::string & range, bool (&boolmask)[GGML_MAX_N_THREADS]) { |
292 | 0 | size_t dash_loc = range.find('-'); |
293 | 0 | if (dash_loc == std::string::npos) { |
294 | 0 | LOG_ERR("Format of CPU range is invalid! Expected [<start>]-[<end>].\n"); |
295 | 0 | return false; |
296 | 0 | } |
297 | | |
298 | 0 | size_t start_i; |
299 | 0 | size_t end_i; |
300 | |
|
301 | 0 | if (dash_loc == 0) { |
302 | 0 | start_i = 0; |
303 | 0 | } else { |
304 | 0 | start_i = std::stoull(range.substr(0, dash_loc)); |
305 | 0 | if (start_i >= GGML_MAX_N_THREADS) { |
306 | 0 | LOG_ERR("Start index out of bounds!\n"); |
307 | 0 | return false; |
308 | 0 | } |
309 | 0 | } |
310 | | |
311 | 0 | if (dash_loc == range.length() - 1) { |
312 | 0 | end_i = GGML_MAX_N_THREADS - 1; |
313 | 0 | } else { |
314 | 0 | end_i = std::stoull(range.substr(dash_loc + 1)); |
315 | 0 | if (end_i >= GGML_MAX_N_THREADS) { |
316 | 0 | LOG_ERR("End index out of bounds!\n"); |
317 | 0 | return false; |
318 | 0 | } |
319 | 0 | } |
320 | | |
321 | 0 | for (size_t i = start_i; i <= end_i; i++) { |
322 | 0 | boolmask[i] = true; |
323 | 0 | } |
324 | |
|
325 | 0 | return true; |
326 | 0 | } |
327 | | |
328 | 0 | bool parse_cpu_mask(const std::string & mask, bool (&boolmask)[GGML_MAX_N_THREADS]) { |
329 | | // Discard potential 0x prefix |
330 | 0 | size_t start_i = 0; |
331 | 0 | if (mask.length() >= 2 && mask.substr(0, 2) == "0x") { |
332 | 0 | start_i = 2; |
333 | 0 | } |
334 | |
|
335 | 0 | size_t num_digits = mask.length() - start_i; |
336 | 0 | if (num_digits > 128) num_digits = 128; |
337 | |
|
338 | 0 | size_t end_i = num_digits + start_i; |
339 | |
|
340 | 0 | for (size_t i = start_i, n = (num_digits*4 - 1); i < end_i; i++, n-=4) { |
341 | 0 | char c = mask.at(i); |
342 | 0 | int8_t id = c; |
343 | |
|
344 | 0 | if ((c >= '0' && c <= '9')) { |
345 | 0 | id -= '0'; |
346 | 0 | } else if (c >= 'a' && c <= 'f') { |
347 | 0 | id -= 'a' - 10; |
348 | 0 | } else if (c >= 'A' && c <= 'F') { |
349 | 0 | id -= 'A' - 10; |
350 | 0 | } else { |
351 | 0 | LOG_ERR("Invalid hex character '%c' at position %d\n", c, int32_t(i)); |
352 | 0 | return false; |
353 | 0 | } |
354 | | |
355 | 0 | boolmask[ n ] = boolmask[ n ] || ((id & 8) != 0); |
356 | 0 | boolmask[n - 1] = boolmask[n - 1] || ((id & 4) != 0); |
357 | 0 | boolmask[n - 2] = boolmask[n - 2] || ((id & 2) != 0); |
358 | 0 | boolmask[n - 3] = boolmask[n - 3] || ((id & 1) != 0); |
359 | 0 | } |
360 | | |
361 | 0 | return true; |
362 | 0 | } |
363 | | |
364 | 0 | void common_init() { |
365 | 0 | llama_log_set(common_log_default_callback, NULL); |
366 | |
|
367 | 0 | #ifdef NDEBUG |
368 | 0 | const char * build_type = ""; |
369 | | #else |
370 | | const char * build_type = " (debug)"; |
371 | | #endif |
372 | |
|
373 | 0 | LOG_INF("build: %d (%s) with %s for %s%s\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, LLAMA_BUILD_TARGET, build_type); |
374 | 0 | } |
375 | | |
376 | 0 | std::string common_params_get_system_info(const common_params & params) { |
377 | 0 | std::ostringstream os; |
378 | |
|
379 | 0 | os << "system_info: n_threads = " << params.cpuparams.n_threads; |
380 | 0 | if (params.cpuparams_batch.n_threads != -1) { |
381 | 0 | os << " (n_threads_batch = " << params.cpuparams_batch.n_threads << ")"; |
382 | 0 | } |
383 | | #if defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later |
384 | | // TODO: windows + arm64 + mingw64 |
385 | | DWORD logicalProcessorCount = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS); |
386 | | os << " / " << logicalProcessorCount << " | " << llama_print_system_info(); |
387 | | #else |
388 | 0 | os << " / " << std::thread::hardware_concurrency() << " | " << llama_print_system_info(); |
389 | 0 | #endif |
390 | |
|
391 | 0 | return os.str(); |
392 | 0 | } |
393 | | |
394 | | // |
395 | | // String utils |
396 | | // |
397 | | |
398 | 0 | std::string string_format(const char * fmt, ...) { |
399 | 0 | va_list ap; |
400 | 0 | va_list ap2; |
401 | 0 | va_start(ap, fmt); |
402 | 0 | va_copy(ap2, ap); |
403 | 0 | int size = vsnprintf(NULL, 0, fmt, ap); |
404 | 0 | GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT |
405 | 0 | std::vector<char> buf(size + 1); |
406 | 0 | int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); |
407 | 0 | GGML_ASSERT(size2 == size); |
408 | 0 | va_end(ap2); |
409 | 0 | va_end(ap); |
410 | 0 | return std::string(buf.data(), size); |
411 | 0 | } |
412 | | |
413 | 0 | std::string string_strip(const std::string & str) { |
414 | 0 | size_t start = 0; |
415 | 0 | size_t end = str.size(); |
416 | 0 | while (start < end && std::isspace(str[start])) { |
417 | 0 | start++; |
418 | 0 | } |
419 | 0 | while (end > start && std::isspace(str[end - 1])) { |
420 | 0 | end--; |
421 | 0 | } |
422 | 0 | return str.substr(start, end - start); |
423 | 0 | } |
424 | | |
425 | 0 | std::string string_get_sortable_timestamp() { |
426 | 0 | using clock = std::chrono::system_clock; |
427 | |
|
428 | 0 | const clock::time_point current_time = clock::now(); |
429 | 0 | const time_t as_time_t = clock::to_time_t(current_time); |
430 | 0 | char timestamp_no_ns[100]; |
431 | 0 | std::strftime(timestamp_no_ns, 100, "%Y_%m_%d-%H_%M_%S", std::localtime(&as_time_t)); |
432 | |
|
433 | 0 | const int64_t ns = std::chrono::duration_cast<std::chrono::nanoseconds>( |
434 | 0 | current_time.time_since_epoch() % 1000000000).count(); |
435 | 0 | char timestamp_ns[11]; |
436 | 0 | snprintf(timestamp_ns, 11, "%09" PRId64, ns); |
437 | |
|
438 | 0 | return std::string(timestamp_no_ns) + "." + std::string(timestamp_ns); |
439 | 0 | } |
440 | | |
441 | 0 | void string_replace_all(std::string & s, const std::string & search, const std::string & replace) { |
442 | 0 | if (search.empty()) { |
443 | 0 | return; |
444 | 0 | } |
445 | 0 | std::string builder; |
446 | 0 | builder.reserve(s.length()); |
447 | 0 | size_t pos = 0; |
448 | 0 | size_t last_pos = 0; |
449 | 0 | while ((pos = s.find(search, last_pos)) != std::string::npos) { |
450 | 0 | builder.append(s, last_pos, pos - last_pos); |
451 | 0 | builder.append(replace); |
452 | 0 | last_pos = pos + search.length(); |
453 | 0 | } |
454 | 0 | builder.append(s, last_pos, std::string::npos); |
455 | 0 | s = std::move(builder); |
456 | 0 | } |
457 | | |
458 | 0 | bool string_ends_with(const std::string_view & str, const std::string_view & suffix) { |
459 | 0 | return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0; |
460 | 0 | } |
461 | | |
462 | 0 | bool string_remove_suffix(std::string & str, const std::string_view & suffix) { |
463 | 0 | bool has_suffix = string_ends_with(str, suffix); |
464 | 0 | if (has_suffix) { |
465 | 0 | str = str.substr(0, str.size() - suffix.size()); |
466 | 0 | } |
467 | 0 | return has_suffix; |
468 | 0 | } |
469 | | |
470 | 0 | size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop) { |
471 | 0 | if (!str.empty() && !stop.empty()) { |
472 | 0 | const char text_last_char = str.back(); |
473 | 0 | for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) { |
474 | 0 | if (stop[char_index] == text_last_char) { |
475 | 0 | const auto current_partial = stop.substr(0, char_index + 1); |
476 | 0 | if (string_ends_with(str, current_partial)) { |
477 | 0 | return str.size() - char_index - 1; |
478 | 0 | } |
479 | 0 | } |
480 | 0 | } |
481 | 0 | } |
482 | | |
483 | 0 | return std::string::npos; |
484 | 0 | } |
485 | | |
486 | 0 | std::string regex_escape(const std::string & s) { |
487 | 0 | static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]"); |
488 | 0 | return std::regex_replace(s, special_chars, "\\$&"); |
489 | 0 | } |
490 | | |
491 | 94.0k | std::string string_join(const std::vector<std::string> & values, const std::string & separator) { |
492 | 94.0k | std::ostringstream result; |
493 | 1.16M | for (size_t i = 0; i < values.size(); ++i) { |
494 | 1.07M | if (i > 0) { |
495 | 980k | result << separator; |
496 | 980k | } |
497 | 1.07M | result << values[i]; |
498 | 1.07M | } |
499 | 94.0k | return result.str(); |
500 | 94.0k | } |
501 | | |
502 | 87.7k | std::vector<std::string> string_split(const std::string & str, const std::string & delimiter) { |
503 | 87.7k | std::vector<std::string> parts; |
504 | 87.7k | size_t start = 0; |
505 | 87.7k | size_t end = str.find(delimiter); |
506 | | |
507 | 797k | while (end != std::string::npos) { |
508 | 710k | parts.push_back(str.substr(start, end - start)); |
509 | 710k | start = end + delimiter.length(); |
510 | 710k | end = str.find(delimiter, start); |
511 | 710k | } |
512 | | |
513 | 87.7k | parts.push_back(str.substr(start)); |
514 | | |
515 | 87.7k | return parts; |
516 | 87.7k | } |
517 | | |
518 | 0 | std::string string_repeat(const std::string & str, size_t n) { |
519 | 0 | if (n == 0) { |
520 | 0 | return ""; |
521 | 0 | } |
522 | | |
523 | 0 | std::string result; |
524 | 0 | result.reserve(str.length() * n); |
525 | |
|
526 | 0 | for (size_t i = 0; i < n; ++i) { |
527 | 0 | result += str; |
528 | 0 | } |
529 | |
|
530 | 0 | return result; |
531 | 0 | } |
532 | | |
533 | 0 | std::string string_from(bool value) { |
534 | 0 | return value ? "true" : "false"; |
535 | 0 | } |
536 | | |
537 | 0 | std::string string_from(const std::vector<int> & values) { |
538 | 0 | std::stringstream buf; |
539 | |
|
540 | 0 | buf << "[ "; |
541 | 0 | bool first = true; |
542 | 0 | for (auto e : values) { |
543 | 0 | if (first) { |
544 | 0 | first = false; |
545 | 0 | } else { |
546 | 0 | buf << ", "; |
547 | 0 | } |
548 | 0 | buf << std::to_string(e); |
549 | 0 | } |
550 | 0 | buf << " ]"; |
551 | |
|
552 | 0 | return buf.str(); |
553 | 0 | } |
554 | | |
555 | 0 | std::string string_from(const struct llama_context * ctx, const std::vector<llama_token> & tokens) { |
556 | 0 | std::stringstream buf; |
557 | |
|
558 | 0 | buf << "[ "; |
559 | |
|
560 | 0 | bool first = true; |
561 | 0 | for (const auto & token : tokens) { |
562 | 0 | if (!first) { |
563 | 0 | buf << ", "; |
564 | 0 | } else { |
565 | 0 | first = false; |
566 | 0 | } |
567 | |
|
568 | 0 | auto detokenized = common_token_to_piece(ctx, token); |
569 | |
|
570 | 0 | buf << "'" << detokenized << "'" |
571 | 0 | << ":" << std::to_string(token); |
572 | 0 | } |
573 | |
|
574 | 0 | buf << " ]"; |
575 | |
|
576 | 0 | return buf.str(); |
577 | 0 | } |
578 | | |
579 | 0 | std::string string_from(const struct llama_context * ctx, const struct llama_batch & batch) { |
580 | 0 | std::stringstream buf; |
581 | |
|
582 | 0 | buf << "[ "; |
583 | |
|
584 | 0 | bool first = true; |
585 | 0 | for (int i = 0; i < batch.n_tokens; ++i) { |
586 | 0 | if (!first) { |
587 | 0 | buf << ", "; |
588 | 0 | } else { |
589 | 0 | first = false; |
590 | 0 | } |
591 | |
|
592 | 0 | auto detokenized = common_token_to_piece(ctx, batch.token[i]); |
593 | |
|
594 | 0 | buf << "\n" << std::to_string(i) |
595 | 0 | << ", token '" << detokenized << "'" |
596 | 0 | << ", pos " << std::to_string(batch.pos[i]) |
597 | 0 | << ", n_seq_id " << std::to_string(batch.n_seq_id[i]) |
598 | 0 | << ", seq_id " << std::to_string(batch.seq_id[i][0]) |
599 | 0 | << ", logits " << std::to_string(batch.logits[i]); |
600 | 0 | } |
601 | |
|
602 | 0 | buf << " ]"; |
603 | |
|
604 | 0 | return buf.str(); |
605 | 0 | } |
606 | | |
607 | 0 | void string_process_escapes(std::string & input) { |
608 | 0 | std::size_t input_len = input.length(); |
609 | 0 | std::size_t output_idx = 0; |
610 | |
|
611 | 0 | for (std::size_t input_idx = 0; input_idx < input_len; ++input_idx) { |
612 | 0 | if (input[input_idx] == '\\' && input_idx + 1 < input_len) { |
613 | 0 | switch (input[++input_idx]) { |
614 | 0 | case 'n': input[output_idx++] = '\n'; break; |
615 | 0 | case 'r': input[output_idx++] = '\r'; break; |
616 | 0 | case 't': input[output_idx++] = '\t'; break; |
617 | 0 | case '\'': input[output_idx++] = '\''; break; |
618 | 0 | case '\"': input[output_idx++] = '\"'; break; |
619 | 0 | case '\\': input[output_idx++] = '\\'; break; |
620 | 0 | case 'x': |
621 | | // Handle \x12, etc |
622 | 0 | if (input_idx + 2 < input_len) { |
623 | 0 | const char x[3] = { input[input_idx + 1], input[input_idx + 2], 0 }; |
624 | 0 | char *err_p = nullptr; |
625 | 0 | const long val = std::strtol(x, &err_p, 16); |
626 | 0 | if (err_p == x + 2) { |
627 | 0 | input_idx += 2; |
628 | 0 | input[output_idx++] = char(val); |
629 | 0 | break; |
630 | 0 | } |
631 | 0 | } |
632 | | // fall through |
633 | 0 | default: input[output_idx++] = '\\'; |
634 | 0 | input[output_idx++] = input[input_idx]; break; |
635 | 0 | } |
636 | 0 | } else { |
637 | 0 | input[output_idx++] = input[input_idx]; |
638 | 0 | } |
639 | 0 | } |
640 | | |
641 | 0 | input.resize(output_idx); |
642 | 0 | } |
643 | | |
644 | 0 | bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides) { |
645 | 0 | const char * sep = strchr(data, '='); |
646 | 0 | if (sep == nullptr || sep - data >= 128) { |
647 | 0 | LOG_ERR("%s: malformed KV override '%s'\n", __func__, data); |
648 | 0 | return false; |
649 | 0 | } |
650 | 0 | llama_model_kv_override kvo; |
651 | 0 | std::strncpy(kvo.key, data, sep - data); |
652 | 0 | kvo.key[sep - data] = 0; |
653 | 0 | sep++; |
654 | 0 | if (strncmp(sep, "int:", 4) == 0) { |
655 | 0 | sep += 4; |
656 | 0 | kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT; |
657 | 0 | kvo.val_i64 = std::atol(sep); |
658 | 0 | } else if (strncmp(sep, "float:", 6) == 0) { |
659 | 0 | sep += 6; |
660 | 0 | kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT; |
661 | 0 | kvo.val_f64 = std::atof(sep); |
662 | 0 | } else if (strncmp(sep, "bool:", 5) == 0) { |
663 | 0 | sep += 5; |
664 | 0 | kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL; |
665 | 0 | if (std::strcmp(sep, "true") == 0) { |
666 | 0 | kvo.val_bool = true; |
667 | 0 | } else if (std::strcmp(sep, "false") == 0) { |
668 | 0 | kvo.val_bool = false; |
669 | 0 | } else { |
670 | 0 | LOG_ERR("%s: invalid boolean value for KV override '%s'\n", __func__, data); |
671 | 0 | return false; |
672 | 0 | } |
673 | 0 | } else if (strncmp(sep, "str:", 4) == 0) { |
674 | 0 | sep += 4; |
675 | 0 | kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR; |
676 | 0 | if (strlen(sep) > 127) { |
677 | 0 | LOG_ERR("%s: malformed KV override '%s', value cannot exceed 127 chars\n", __func__, data); |
678 | 0 | return false; |
679 | 0 | } |
680 | 0 | strncpy(kvo.val_str, sep, 127); |
681 | 0 | kvo.val_str[127] = '\0'; |
682 | 0 | } else { |
683 | 0 | LOG_ERR("%s: invalid type for KV override '%s'\n", __func__, data); |
684 | 0 | return false; |
685 | 0 | } |
686 | 0 | overrides.emplace_back(std::move(kvo)); |
687 | 0 | return true; |
688 | 0 | } |
689 | | |
690 | | // |
691 | | // Filesystem utils |
692 | | // |
693 | | |
694 | | // Validate if a filename is safe to use |
695 | | // To validate a full path, split the path by the OS-specific path separator, and validate each part with this function |
696 | 0 | bool fs_validate_filename(const std::string & filename) { |
697 | 0 | if (!filename.length()) { |
698 | | // Empty filename invalid |
699 | 0 | return false; |
700 | 0 | } |
701 | 0 | if (filename.length() > 255) { |
702 | | // Limit at common largest possible filename on Linux filesystems |
703 | | // to avoid unnecessary further validation |
704 | | // (On systems with smaller limits it will be caught by the OS) |
705 | 0 | return false; |
706 | 0 | } |
707 | | |
708 | 0 | std::u32string filename_utf32; |
709 | 0 | try { |
710 | 0 | #if defined(__clang__) |
711 | | // disable C++17 deprecation warning for std::codecvt_utf8 |
712 | 0 | # pragma clang diagnostic push |
713 | 0 | # pragma clang diagnostic ignored "-Wdeprecated-declarations" |
714 | | #elif defined(__GNUC__) |
715 | | # pragma GCC diagnostic push |
716 | | # pragma GCC diagnostic ignored "-Wdeprecated-declarations" |
717 | | #endif |
718 | |
|
719 | 0 | std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> converter; |
720 | |
|
721 | 0 | #if defined(__clang__) |
722 | 0 | # pragma clang diagnostic pop |
723 | | #elif defined(__GNUC__) |
724 | | # pragma GCC diagnostic pop |
725 | | #endif |
726 | |
|
727 | 0 | filename_utf32 = converter.from_bytes(filename); |
728 | | |
729 | | // If the reverse conversion mismatches, it means overlong UTF-8 sequences were used, |
730 | | // or invalid encodings were encountered. Reject such attempts |
731 | 0 | std::string filename_reencoded = converter.to_bytes(filename_utf32); |
732 | 0 | if (filename_reencoded != filename) { |
733 | 0 | return false; |
734 | 0 | } |
735 | 0 | } catch (const std::exception &) { |
736 | 0 | return false; |
737 | 0 | } |
738 | | |
739 | | // Check for forbidden codepoints: |
740 | | // - Control characters |
741 | | // - Unicode equivalents of illegal characters |
742 | | // - UTF-16 surrogate pairs |
743 | | // - UTF-8 replacement character |
744 | | // - Byte order mark (BOM) |
745 | | // - Illegal characters: / \ : * ? " < > | |
746 | 0 | for (char32_t c : filename_utf32) { |
747 | 0 | if (c <= 0x1F // Control characters (C0) |
748 | 0 | || c == 0x7F // Control characters (DEL) |
749 | 0 | || (c >= 0x80 && c <= 0x9F) // Control characters (C1) |
750 | 0 | || c == 0xFF0E // Fullwidth Full Stop (period equivalent) |
751 | 0 | || c == 0x2215 // Division Slash (forward slash equivalent) |
752 | 0 | || c == 0x2216 // Set Minus (backslash equivalent) |
753 | 0 | || (c >= 0xD800 && c <= 0xDFFF) // UTF-16 surrogate pairs |
754 | 0 | || c == 0xFFFD // Replacement Character (UTF-8) |
755 | 0 | || c == 0xFEFF // Byte Order Mark (BOM) |
756 | 0 | || c == '/' || c == '\\' || c == ':' || c == '*' // Illegal characters |
757 | 0 | || c == '?' || c == '"' || c == '<' || c == '>' || c == '|') { |
758 | 0 | return false; |
759 | 0 | } |
760 | 0 | } |
761 | | |
762 | | // Reject any leading or trailing ' ', or any trailing '.', these are stripped on Windows and will cause a different filename |
763 | | // Unicode and other whitespace is not affected, only 0x20 space |
764 | 0 | if (filename.front() == ' ' || filename.back() == ' ' || filename.back() == '.') { |
765 | 0 | return false; |
766 | 0 | } |
767 | | |
768 | | // Reject any ".." (currently stricter than necessary, it should be fine to just check for == ".." instead) |
769 | 0 | if (filename.find("..") != std::string::npos) { |
770 | 0 | return false; |
771 | 0 | } |
772 | | |
773 | | // Reject "." |
774 | 0 | if (filename == ".") { |
775 | 0 | return false; |
776 | 0 | } |
777 | | |
778 | 0 | return true; |
779 | 0 | } |
780 | | |
781 | | #include <iostream> |
782 | | |
783 | | |
784 | | // returns true if successful, false otherwise |
785 | 0 | bool fs_create_directory_with_parents(const std::string & path) { |
786 | | #ifdef _WIN32 |
787 | | std::wstring_convert<std::codecvt_utf8<wchar_t>> converter; |
788 | | std::wstring wpath = converter.from_bytes(path); |
789 | | |
790 | | // if the path already exists, check whether it's a directory |
791 | | const DWORD attributes = GetFileAttributesW(wpath.c_str()); |
792 | | if ((attributes != INVALID_FILE_ATTRIBUTES) && (attributes & FILE_ATTRIBUTE_DIRECTORY)) { |
793 | | return true; |
794 | | } |
795 | | |
796 | | size_t pos_slash = 0; |
797 | | |
798 | | // process path from front to back, procedurally creating directories |
799 | | while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) { |
800 | | const std::wstring subpath = wpath.substr(0, pos_slash); |
801 | | |
802 | | pos_slash += 1; |
803 | | |
804 | | // skip the drive letter, in some systems it can return an access denied error |
805 | | if (subpath.length() == 2 && subpath[1] == ':') { |
806 | | continue; |
807 | | } |
808 | | |
809 | | const bool success = CreateDirectoryW(subpath.c_str(), NULL); |
810 | | |
811 | | if (!success) { |
812 | | const DWORD error = GetLastError(); |
813 | | |
814 | | // if the path already exists, ensure that it's a directory |
815 | | if (error == ERROR_ALREADY_EXISTS) { |
816 | | const DWORD attributes = GetFileAttributesW(subpath.c_str()); |
817 | | if (attributes == INVALID_FILE_ATTRIBUTES || !(attributes & FILE_ATTRIBUTE_DIRECTORY)) { |
818 | | return false; |
819 | | } |
820 | | } else { |
821 | | return false; |
822 | | } |
823 | | } |
824 | | } |
825 | | |
826 | | return true; |
827 | | #else |
828 | | // if the path already exists, check whether it's a directory |
829 | 0 | struct stat info; |
830 | 0 | if (stat(path.c_str(), &info) == 0) { |
831 | 0 | return S_ISDIR(info.st_mode); |
832 | 0 | } |
833 | | |
834 | 0 | size_t pos_slash = 1; // skip leading slashes for directory creation |
835 | | |
836 | | // process path from front to back, procedurally creating directories |
837 | 0 | while ((pos_slash = path.find('/', pos_slash)) != std::string::npos) { |
838 | 0 | const std::string subpath = path.substr(0, pos_slash); |
839 | 0 | struct stat info; |
840 | | |
841 | | // if the path already exists, ensure that it's a directory |
842 | 0 | if (stat(subpath.c_str(), &info) == 0) { |
843 | 0 | if (!S_ISDIR(info.st_mode)) { |
844 | 0 | return false; |
845 | 0 | } |
846 | 0 | } else { |
847 | | // create parent directories |
848 | 0 | const int ret = mkdir(subpath.c_str(), 0755); |
849 | 0 | if (ret != 0) { |
850 | 0 | return false; |
851 | 0 | } |
852 | 0 | } |
853 | | |
854 | 0 | pos_slash += 1; |
855 | 0 | } |
856 | | |
857 | 0 | return true; |
858 | 0 | #endif // _WIN32 |
859 | 0 | } |
860 | | |
861 | 0 | std::string fs_get_cache_directory() { |
862 | 0 | std::string cache_directory = ""; |
863 | 0 | auto ensure_trailing_slash = [](std::string p) { |
864 | | // Make sure to add trailing slash |
865 | 0 | if (p.back() != DIRECTORY_SEPARATOR) { |
866 | 0 | p += DIRECTORY_SEPARATOR; |
867 | 0 | } |
868 | 0 | return p; |
869 | 0 | }; |
870 | 0 | if (getenv("LLAMA_CACHE")) { |
871 | 0 | cache_directory = std::getenv("LLAMA_CACHE"); |
872 | 0 | } else { |
873 | 0 | #if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX) || defined(__OpenBSD__) |
874 | 0 | if (std::getenv("XDG_CACHE_HOME")) { |
875 | 0 | cache_directory = std::getenv("XDG_CACHE_HOME"); |
876 | 0 | } else if (std::getenv("HOME")) { |
877 | 0 | cache_directory = std::getenv("HOME") + std::string("/.cache/"); |
878 | 0 | } else { |
879 | 0 | #if defined(__linux__) |
880 | | /* no $HOME is defined, fallback to getpwuid */ |
881 | 0 | struct passwd *pw = getpwuid(getuid()); |
882 | 0 | if ((!pw) || (!pw->pw_dir)) { |
883 | 0 | throw std::runtime_error("Failed to find $HOME directory"); |
884 | 0 | } |
885 | | |
886 | 0 | cache_directory = std::string(pw->pw_dir) + std::string("/.cache/"); |
887 | | #else /* defined(__linux__) */ |
888 | | throw std::runtime_error("Failed to find $HOME directory"); |
889 | | #endif /* defined(__linux__) */ |
890 | 0 | } |
891 | | #elif defined(__APPLE__) |
892 | | cache_directory = std::getenv("HOME") + std::string("/Library/Caches/"); |
893 | | #elif defined(_WIN32) |
894 | | cache_directory = std::getenv("LOCALAPPDATA"); |
895 | | #else |
896 | | # error Unknown architecture |
897 | | #endif |
898 | 0 | cache_directory = ensure_trailing_slash(cache_directory); |
899 | 0 | cache_directory += "llama.cpp"; |
900 | 0 | } |
901 | 0 | return ensure_trailing_slash(cache_directory); |
902 | 0 | } |
903 | | |
904 | 0 | std::string fs_get_cache_file(const std::string & filename) { |
905 | 0 | GGML_ASSERT(filename.find(DIRECTORY_SEPARATOR) == std::string::npos); |
906 | 0 | std::string cache_directory = fs_get_cache_directory(); |
907 | 0 | const bool success = fs_create_directory_with_parents(cache_directory); |
908 | 0 | if (!success) { |
909 | 0 | throw std::runtime_error("failed to create cache directory: " + cache_directory); |
910 | 0 | } |
911 | 0 | return cache_directory + filename; |
912 | 0 | } |
913 | | |
914 | 0 | std::vector<common_file_info> fs_list_files(const std::string & path) { |
915 | 0 | std::vector<common_file_info> files; |
916 | 0 | if (path.empty()) return files; |
917 | | |
918 | 0 | std::filesystem::path dir(path); |
919 | 0 | if (!std::filesystem::exists(dir) || !std::filesystem::is_directory(dir)) { |
920 | 0 | return files; |
921 | 0 | } |
922 | | |
923 | 0 | for (const auto & entry : std::filesystem::directory_iterator(dir)) { |
924 | 0 | try { |
925 | | // Only include regular files (skip directories) |
926 | 0 | const auto & p = entry.path(); |
927 | 0 | if (std::filesystem::is_regular_file(p)) { |
928 | 0 | common_file_info info; |
929 | 0 | info.path = p.string(); |
930 | 0 | info.name = p.filename().string(); |
931 | 0 | try { |
932 | 0 | info.size = static_cast<size_t>(std::filesystem::file_size(p)); |
933 | 0 | } catch (const std::filesystem::filesystem_error &) { |
934 | 0 | info.size = 0; |
935 | 0 | } |
936 | 0 | files.push_back(std::move(info)); |
937 | 0 | } |
938 | 0 | } catch (const std::filesystem::filesystem_error &) { |
939 | | // skip entries we cannot inspect |
940 | 0 | continue; |
941 | 0 | } |
942 | 0 | } |
943 | | |
944 | 0 | return files; |
945 | 0 | } |
946 | | |
947 | | |
948 | | // |
949 | | // Model utils |
950 | | // |
951 | | |
952 | 0 | struct common_init_result common_init_from_params(common_params & params) { |
953 | 0 | common_init_result iparams; |
954 | 0 | auto mparams = common_model_params_to_llama(params); |
955 | |
|
956 | 0 | llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); |
957 | 0 | if (model == NULL) { |
958 | 0 | LOG_ERR("%s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n", |
959 | 0 | __func__, params.model.path.c_str()); |
960 | 0 | return iparams; |
961 | 0 | } |
962 | | |
963 | 0 | const llama_vocab * vocab = llama_model_get_vocab(model); |
964 | |
|
965 | 0 | auto cparams = common_context_params_to_llama(params); |
966 | |
|
967 | 0 | llama_context * lctx = llama_init_from_model(model, cparams); |
968 | 0 | if (lctx == NULL) { |
969 | 0 | LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n", |
970 | 0 | __func__, params.model.path.c_str()); |
971 | 0 | llama_model_free(model); |
972 | 0 | return iparams; |
973 | 0 | } |
974 | | |
975 | 0 | if (params.ctx_shift && !llama_memory_can_shift(llama_get_memory(lctx))) { |
976 | 0 | LOG_WRN("%s: KV cache shifting is not supported for this context, disabling KV cache shifting\n", __func__); |
977 | 0 | params.ctx_shift = false; |
978 | 0 | } |
979 | |
|
980 | 0 | if (!params.control_vectors.empty()) { |
981 | 0 | if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1; |
982 | 0 | if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_model_n_layer(model); |
983 | |
|
984 | 0 | const auto cvec = common_control_vector_load(params.control_vectors); |
985 | 0 | if (cvec.n_embd == -1) { |
986 | 0 | llama_free(lctx); |
987 | 0 | llama_model_free(model); |
988 | |
|
989 | 0 | return iparams; |
990 | 0 | } |
991 | | |
992 | 0 | int err = llama_apply_adapter_cvec( |
993 | 0 | lctx, |
994 | 0 | cvec.data.data(), |
995 | 0 | cvec.data.size(), |
996 | 0 | cvec.n_embd, |
997 | 0 | params.control_vector_layer_start, |
998 | 0 | params.control_vector_layer_end); |
999 | 0 | if (err) { |
1000 | 0 | llama_free(lctx); |
1001 | 0 | llama_model_free(model); |
1002 | |
|
1003 | 0 | return iparams; |
1004 | 0 | } |
1005 | 0 | } |
1006 | | |
1007 | 0 | if (llama_pooling_type(lctx) == LLAMA_POOLING_TYPE_RANK) { |
1008 | 0 | bool ok = true; |
1009 | |
|
1010 | 0 | if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) { |
1011 | 0 | LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__); |
1012 | 0 | ok = false; |
1013 | 0 | } |
1014 | |
|
1015 | 0 | bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL; |
1016 | 0 | bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL; |
1017 | 0 | bool has_rerank_prompt = llama_model_chat_template(model, "rerank") != NULL; |
1018 | |
|
1019 | 0 | if (!has_eos && !has_sep && !has_rerank_prompt) { |
1020 | 0 | LOG_WRN("%s: warning: vocab does not have an EOS token, SEP token, or rerank prompt. Reranking will not work\n", __func__); |
1021 | 0 | ok = false; |
1022 | 0 | } else if (!has_eos) { |
1023 | 0 | LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__); |
1024 | 0 | } |
1025 | |
|
1026 | 0 | if (!ok) { |
1027 | 0 | llama_free(lctx); |
1028 | 0 | llama_model_free(model); |
1029 | |
|
1030 | 0 | return iparams; |
1031 | 0 | } |
1032 | 0 | } |
1033 | | |
1034 | | // load and optionally apply lora adapters |
1035 | 0 | for (auto & la : params.lora_adapters) { |
1036 | 0 | llama_adapter_lora_ptr lora; |
1037 | 0 | lora.reset(llama_adapter_lora_init(model, la.path.c_str())); |
1038 | 0 | if (lora == nullptr) { |
1039 | 0 | LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str()); |
1040 | 0 | llama_free(lctx); |
1041 | 0 | llama_model_free(model); |
1042 | 0 | return iparams; |
1043 | 0 | } |
1044 | | |
1045 | 0 | char buf[1024]; |
1046 | 0 | la.ptr = lora.get(); |
1047 | 0 | llama_adapter_meta_val_str(la.ptr, "adapter.lora.task_name", buf, sizeof(buf)); |
1048 | 0 | la.task_name = buf; |
1049 | 0 | llama_adapter_meta_val_str(la.ptr, "adapter.lora.prompt_prefix", buf, sizeof(buf)); |
1050 | 0 | la.prompt_prefix = buf; |
1051 | 0 | iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters |
1052 | 0 | } |
1053 | | |
1054 | 0 | if (!params.lora_init_without_apply) { |
1055 | 0 | common_set_adapter_lora(lctx, params.lora_adapters); |
1056 | 0 | } |
1057 | |
|
1058 | 0 | if (params.sampling.ignore_eos && llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) { |
1059 | 0 | LOG_WRN("%s: warning: vocab does not have an EOS token, ignoring --ignore-eos\n", __func__); |
1060 | 0 | params.sampling.ignore_eos = false; |
1061 | 0 | } |
1062 | | |
1063 | | // initialize once |
1064 | 0 | for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) { |
1065 | 0 | if (llama_vocab_is_eog(vocab, i)) { |
1066 | 0 | LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY); |
1067 | 0 | params.sampling.logit_bias_eog.push_back({i, -INFINITY}); |
1068 | 0 | } |
1069 | 0 | } |
1070 | |
|
1071 | 0 | if (params.sampling.ignore_eos) { |
1072 | | // add EOG biases to the active set of logit biases |
1073 | 0 | params.sampling.logit_bias.insert( |
1074 | 0 | params.sampling.logit_bias.end(), |
1075 | 0 | params.sampling.logit_bias_eog.begin(), params.sampling.logit_bias_eog.end()); |
1076 | 0 | } |
1077 | |
|
1078 | 0 | if (params.sampling.penalty_last_n == -1) { |
1079 | 0 | LOG_INF("%s: setting penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx)); |
1080 | 0 | params.sampling.penalty_last_n = llama_n_ctx(lctx); |
1081 | 0 | } |
1082 | |
|
1083 | 0 | if (params.sampling.dry_penalty_last_n == -1) { |
1084 | 0 | LOG_INF("%s: setting dry_penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx)); |
1085 | 0 | params.sampling.dry_penalty_last_n = llama_n_ctx(lctx); |
1086 | 0 | } |
1087 | |
|
1088 | 0 | if (params.warmup) { |
1089 | 0 | LOG_WRN("%s: warming up the model with an empty run - please wait ... (--no-warmup to disable)\n", __func__); |
1090 | |
|
1091 | 0 | llama_set_warmup(lctx, true); |
1092 | |
|
1093 | 0 | std::vector<llama_token> tmp; |
1094 | 0 | llama_token bos = llama_vocab_bos(vocab); |
1095 | 0 | llama_token eos = llama_vocab_eos(vocab); |
1096 | | |
1097 | | // some models (e.g. T5) don't have a BOS token |
1098 | 0 | if (bos != LLAMA_TOKEN_NULL) { |
1099 | 0 | tmp.push_back(bos); |
1100 | 0 | } |
1101 | 0 | if (eos != LLAMA_TOKEN_NULL) { |
1102 | 0 | tmp.push_back(eos); |
1103 | 0 | } |
1104 | 0 | if (tmp.empty()) { |
1105 | 0 | tmp.push_back(0); |
1106 | 0 | } |
1107 | |
|
1108 | 0 | if (llama_model_has_encoder(model)) { |
1109 | 0 | llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size())); |
1110 | 0 | llama_token decoder_start_token_id = llama_model_decoder_start_token(model); |
1111 | 0 | if (decoder_start_token_id == LLAMA_TOKEN_NULL) { |
1112 | 0 | decoder_start_token_id = bos; |
1113 | 0 | } |
1114 | 0 | tmp.clear(); |
1115 | 0 | tmp.push_back(decoder_start_token_id); |
1116 | 0 | } |
1117 | 0 | if (llama_model_has_decoder(model)) { |
1118 | 0 | llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch))); |
1119 | 0 | } |
1120 | 0 | llama_memory_clear(llama_get_memory(lctx), true); |
1121 | 0 | llama_synchronize(lctx); |
1122 | 0 | llama_perf_context_reset(lctx); |
1123 | 0 | llama_set_warmup(lctx, false); |
1124 | 0 | } |
1125 | |
|
1126 | 0 | iparams.model.reset(model); |
1127 | 0 | iparams.context.reset(lctx); |
1128 | |
|
1129 | 0 | return iparams; |
1130 | 0 | } |
1131 | | |
1132 | 0 | std::string get_model_endpoint() { |
1133 | 0 | const char * model_endpoint_env = getenv("MODEL_ENDPOINT"); |
1134 | | // We still respect the use of environment-variable "HF_ENDPOINT" for backward-compatibility. |
1135 | 0 | const char * hf_endpoint_env = getenv("HF_ENDPOINT"); |
1136 | 0 | const char * endpoint_env = model_endpoint_env ? model_endpoint_env : hf_endpoint_env; |
1137 | 0 | std::string model_endpoint = "https://huggingface.co/"; |
1138 | 0 | if (endpoint_env) { |
1139 | 0 | model_endpoint = endpoint_env; |
1140 | 0 | if (model_endpoint.back() != '/') model_endpoint += '/'; |
1141 | 0 | } |
1142 | 0 | return model_endpoint; |
1143 | 0 | } |
1144 | | |
1145 | 0 | void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) { |
1146 | 0 | llama_clear_adapter_lora(ctx); |
1147 | 0 | for (auto & la : lora) { |
1148 | 0 | if (la.scale != 0.0f) { |
1149 | 0 | llama_set_adapter_lora(ctx, la.ptr, la.scale); |
1150 | 0 | } |
1151 | 0 | } |
1152 | 0 | } |
1153 | | |
1154 | 0 | struct llama_model_params common_model_params_to_llama(common_params & params) { |
1155 | 0 | auto mparams = llama_model_default_params(); |
1156 | |
|
1157 | 0 | if (!params.devices.empty()) { |
1158 | 0 | mparams.devices = params.devices.data(); |
1159 | 0 | } |
1160 | |
|
1161 | 0 | if (params.n_gpu_layers != -1) { |
1162 | 0 | mparams.n_gpu_layers = params.n_gpu_layers; |
1163 | 0 | } |
1164 | |
|
1165 | 0 | mparams.main_gpu = params.main_gpu; |
1166 | 0 | mparams.split_mode = params.split_mode; |
1167 | 0 | mparams.tensor_split = params.tensor_split; |
1168 | 0 | mparams.use_mmap = params.use_mmap; |
1169 | 0 | mparams.use_mlock = params.use_mlock; |
1170 | 0 | mparams.check_tensors = params.check_tensors; |
1171 | 0 | mparams.use_extra_bufts = !params.no_extra_bufts; |
1172 | 0 | mparams.no_host = params.no_host; |
1173 | |
|
1174 | 0 | if (params.kv_overrides.empty()) { |
1175 | 0 | mparams.kv_overrides = NULL; |
1176 | 0 | } else { |
1177 | 0 | GGML_ASSERT(params.kv_overrides.back().key[0] == 0 && "KV overrides not terminated with empty key"); |
1178 | 0 | mparams.kv_overrides = params.kv_overrides.data(); |
1179 | 0 | } |
1180 | |
|
1181 | 0 | if (params.tensor_buft_overrides.empty()) { |
1182 | 0 | mparams.tensor_buft_overrides = NULL; |
1183 | 0 | } else { |
1184 | 0 | GGML_ASSERT(params.tensor_buft_overrides.back().pattern == nullptr && "Tensor buffer overrides not terminated with empty pattern"); |
1185 | 0 | mparams.tensor_buft_overrides = params.tensor_buft_overrides.data(); |
1186 | 0 | } |
1187 | |
|
1188 | 0 | mparams.progress_callback = params.load_progress_callback; |
1189 | 0 | mparams.progress_callback_user_data = params.load_progress_callback_user_data; |
1190 | |
|
1191 | 0 | return mparams; |
1192 | 0 | } |
1193 | | |
1194 | 0 | struct llama_context_params common_context_params_to_llama(const common_params & params) { |
1195 | 0 | auto cparams = llama_context_default_params(); |
1196 | |
|
1197 | 0 | cparams.n_ctx = params.n_ctx; |
1198 | 0 | cparams.n_seq_max = params.n_parallel; |
1199 | 0 | cparams.n_batch = params.n_batch; |
1200 | 0 | cparams.n_ubatch = params.n_ubatch; |
1201 | 0 | cparams.n_threads = params.cpuparams.n_threads; |
1202 | 0 | cparams.n_threads_batch = params.cpuparams_batch.n_threads == -1 ? |
1203 | 0 | params.cpuparams.n_threads : params.cpuparams_batch.n_threads; |
1204 | 0 | cparams.embeddings = params.embedding; |
1205 | 0 | cparams.rope_scaling_type = params.rope_scaling_type; |
1206 | 0 | cparams.rope_freq_base = params.rope_freq_base; |
1207 | 0 | cparams.rope_freq_scale = params.rope_freq_scale; |
1208 | 0 | cparams.yarn_ext_factor = params.yarn_ext_factor; |
1209 | 0 | cparams.yarn_attn_factor = params.yarn_attn_factor; |
1210 | 0 | cparams.yarn_beta_fast = params.yarn_beta_fast; |
1211 | 0 | cparams.yarn_beta_slow = params.yarn_beta_slow; |
1212 | 0 | cparams.yarn_orig_ctx = params.yarn_orig_ctx; |
1213 | 0 | cparams.pooling_type = params.pooling_type; |
1214 | 0 | cparams.attention_type = params.attention_type; |
1215 | 0 | cparams.flash_attn_type = params.flash_attn_type; |
1216 | 0 | cparams.cb_eval = params.cb_eval; |
1217 | 0 | cparams.cb_eval_user_data = params.cb_eval_user_data; |
1218 | 0 | cparams.offload_kqv = !params.no_kv_offload; |
1219 | 0 | cparams.no_perf = params.no_perf; |
1220 | 0 | cparams.op_offload = !params.no_op_offload; |
1221 | 0 | cparams.swa_full = params.swa_full; |
1222 | 0 | cparams.kv_unified = params.kv_unified; |
1223 | |
|
1224 | 0 | cparams.type_k = params.cache_type_k; |
1225 | 0 | cparams.type_v = params.cache_type_v; |
1226 | |
|
1227 | 0 | return cparams; |
1228 | 0 | } |
1229 | | |
1230 | 0 | struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params) { |
1231 | 0 | struct ggml_threadpool_params tpp; |
1232 | |
|
1233 | 0 | ggml_threadpool_params_init(&tpp, params.n_threads); // setup the defaults |
1234 | |
|
1235 | 0 | if (params.mask_valid) { |
1236 | 0 | std::memcpy(&tpp.cpumask, ¶ms.cpumask, GGML_MAX_N_THREADS); |
1237 | 0 | } |
1238 | |
|
1239 | 0 | tpp.prio = params.priority; |
1240 | 0 | tpp.poll = params.poll; |
1241 | 0 | tpp.strict_cpu = params.strict_cpu; |
1242 | |
|
1243 | 0 | return tpp; |
1244 | 0 | } |
1245 | | |
1246 | | // |
1247 | | // Batch utils |
1248 | | // |
1249 | | |
1250 | 0 | void common_batch_clear(struct llama_batch & batch) { |
1251 | 0 | batch.n_tokens = 0; |
1252 | 0 | } |
1253 | | |
1254 | | void common_batch_add( |
1255 | | struct llama_batch & batch, |
1256 | | llama_token id, |
1257 | | llama_pos pos, |
1258 | | const std::vector<llama_seq_id> & seq_ids, |
1259 | 0 | bool logits) { |
1260 | 0 | GGML_ASSERT(batch.seq_id[batch.n_tokens] && "llama_batch size exceeded"); |
1261 | |
|
1262 | 0 | batch.token [batch.n_tokens] = id; |
1263 | 0 | batch.pos [batch.n_tokens] = pos; |
1264 | 0 | batch.n_seq_id[batch.n_tokens] = seq_ids.size(); |
1265 | 0 | for (size_t i = 0; i < seq_ids.size(); ++i) { |
1266 | 0 | batch.seq_id[batch.n_tokens][i] = seq_ids[i]; |
1267 | 0 | } |
1268 | 0 | batch.logits [batch.n_tokens] = logits; |
1269 | |
|
1270 | 0 | batch.n_tokens++; |
1271 | 0 | } |
1272 | | |
1273 | | // |
1274 | | // Token utils |
1275 | | // |
1276 | | |
1277 | 0 | size_t common_lcp(const llama_tokens & a, const llama_tokens & b) { |
1278 | 0 | size_t i; |
1279 | 0 | for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {} |
1280 | |
|
1281 | 0 | return i; |
1282 | 0 | } |
1283 | | |
1284 | 0 | size_t common_lcs(const llama_tokens & a, const llama_tokens & b) { |
1285 | | // check for empty sequences |
1286 | 0 | if (a.empty() || b.empty()) { |
1287 | 0 | return 0; |
1288 | 0 | } |
1289 | | |
1290 | | // get the lengths of the input sequences |
1291 | 0 | size_t a_len = a.size(); |
1292 | 0 | size_t b_len = b.size(); |
1293 | | |
1294 | | // initialize the maximum length of the longest common subsequence (LCS) |
1295 | 0 | size_t max_length = 0; |
1296 | | |
1297 | | // use two rows instead of a 2D matrix to optimize space |
1298 | 0 | std::vector<size_t> prev_row(b_len + 1, 0); |
1299 | 0 | std::vector<size_t> curr_row(b_len + 1, 0); |
1300 | | |
1301 | | // iterate through the elements of a |
1302 | 0 | for (size_t i = 1; i <= a_len; i++) { |
1303 | | // iterate through the elements of b |
1304 | 0 | for (size_t j = 1; j <= b_len; j++) { |
1305 | | // if elements at the current positions match |
1306 | 0 | if (a[i - 1] == b[j - 1]) { |
1307 | | // if it's the first element of either sequences, set LCS length to 1 |
1308 | 0 | if (i == 1 || j == 1) { |
1309 | 0 | curr_row[j] = 1; |
1310 | 0 | } else { |
1311 | | // increment LCS length by 1 compared to the previous element |
1312 | 0 | curr_row[j] = prev_row[j - 1] + 1; |
1313 | 0 | } |
1314 | | |
1315 | | // update max_length if necessary |
1316 | 0 | if (curr_row[j] > max_length) { |
1317 | 0 | max_length = curr_row[j]; |
1318 | 0 | } |
1319 | 0 | } else { |
1320 | | // reset LCS length if elements don't match |
1321 | 0 | curr_row[j] = 0; |
1322 | 0 | } |
1323 | 0 | } |
1324 | | |
1325 | | // update the previous row for the next iteration |
1326 | 0 | prev_row = curr_row; |
1327 | 0 | } |
1328 | | |
1329 | | // return the maximum length of the LCS |
1330 | 0 | return max_length; |
1331 | 0 | } |
1332 | | |
1333 | | // |
1334 | | // Vocab utils |
1335 | | // |
1336 | | |
1337 | | std::vector<llama_token> common_tokenize( |
1338 | | const struct llama_context * ctx, |
1339 | | const std::string & text, |
1340 | | bool add_special, |
1341 | 0 | bool parse_special) { |
1342 | 0 | const llama_model * model = llama_get_model(ctx); |
1343 | 0 | const llama_vocab * vocab = llama_model_get_vocab(model); |
1344 | 0 | return common_tokenize(vocab, text, add_special, parse_special); |
1345 | 0 | } |
1346 | | |
1347 | | std::vector<llama_token> common_tokenize( |
1348 | | const struct llama_vocab * vocab, |
1349 | | const std::string & text, |
1350 | | bool add_special, |
1351 | 0 | bool parse_special) { |
1352 | | // upper limit for the number of tokens |
1353 | 0 | int n_tokens = text.length() + 2 * add_special; |
1354 | 0 | std::vector<llama_token> result(n_tokens); |
1355 | 0 | n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); |
1356 | 0 | if (n_tokens == std::numeric_limits<int32_t>::min()) { |
1357 | 0 | throw std::runtime_error("Tokenization failed: input text too large, tokenization result exceeds int32_t limit"); |
1358 | 0 | } |
1359 | 0 | if (n_tokens < 0) { |
1360 | 0 | result.resize(-n_tokens); |
1361 | 0 | int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); |
1362 | 0 | GGML_ASSERT(check == -n_tokens); |
1363 | 0 | } else { |
1364 | 0 | result.resize(n_tokens); |
1365 | 0 | } |
1366 | 0 | return result; |
1367 | 0 | } |
1368 | | |
1369 | 0 | std::string common_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) { |
1370 | 0 | const llama_model * model = llama_get_model(ctx); |
1371 | 0 | const llama_vocab * vocab = llama_model_get_vocab(model); |
1372 | 0 | return common_token_to_piece(vocab, token, special); |
1373 | 0 | } |
1374 | | |
1375 | 0 | std::string common_token_to_piece(const struct llama_vocab * vocab, llama_token token, bool special) { |
1376 | 0 | std::string piece; |
1377 | 0 | piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n' |
1378 | 0 | const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special); |
1379 | 0 | if (n_chars < 0) { |
1380 | 0 | piece.resize(-n_chars); |
1381 | 0 | int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special); |
1382 | 0 | GGML_ASSERT(check == -n_chars); |
1383 | 0 | } |
1384 | 0 | else { |
1385 | 0 | piece.resize(n_chars); |
1386 | 0 | } |
1387 | |
|
1388 | 0 | return piece; |
1389 | 0 | } |
1390 | | |
1391 | 0 | std::string common_detokenize(const struct llama_context * ctx, const std::vector<llama_token> & tokens, bool special) { |
1392 | 0 | const llama_model * model = llama_get_model(ctx); |
1393 | 0 | const llama_vocab * vocab = llama_model_get_vocab(model); |
1394 | 0 | return common_detokenize(vocab, tokens, special); |
1395 | 0 | } |
1396 | | |
1397 | 0 | std::string common_detokenize(const struct llama_vocab * vocab, const std::vector<llama_token> & tokens, bool special) { |
1398 | 0 | std::string text; |
1399 | 0 | text.resize(std::max(text.capacity(), tokens.size())); |
1400 | 0 | int32_t n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); |
1401 | 0 | if (n_chars < 0) { |
1402 | 0 | text.resize(-n_chars); |
1403 | 0 | n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); |
1404 | 0 | GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization |
1405 | 0 | } |
1406 | |
|
1407 | 0 | text.resize(n_chars); |
1408 | | |
1409 | | // NOTE: the original tokenizer decodes bytes after collecting the pieces. |
1410 | 0 | return text; |
1411 | 0 | } |
1412 | | |
1413 | | // |
1414 | | // Embedding utils |
1415 | | // |
1416 | | |
1417 | 0 | void common_embd_normalize(const float * inp, float * out, int n, int embd_norm) { |
1418 | 0 | double sum = 0.0; |
1419 | |
|
1420 | 0 | switch (embd_norm) { |
1421 | 0 | case -1: // no normalisation |
1422 | 0 | sum = 1.0; |
1423 | 0 | break; |
1424 | 0 | case 0: // max absolute |
1425 | 0 | for (int i = 0; i < n; i++) { |
1426 | 0 | if (sum < std::abs(inp[i])) { |
1427 | 0 | sum = std::abs(inp[i]); |
1428 | 0 | } |
1429 | 0 | } |
1430 | 0 | sum /= 32760.0; // make an int16 range |
1431 | 0 | break; |
1432 | 0 | case 2: // euclidean |
1433 | 0 | for (int i = 0; i < n; i++) { |
1434 | 0 | sum += inp[i] * inp[i]; |
1435 | 0 | } |
1436 | 0 | sum = std::sqrt(sum); |
1437 | 0 | break; |
1438 | 0 | default: // p-norm (euclidean is p-norm p=2) |
1439 | 0 | for (int i = 0; i < n; i++) { |
1440 | 0 | sum += std::pow(std::abs(inp[i]), embd_norm); |
1441 | 0 | } |
1442 | 0 | sum = std::pow(sum, 1.0 / embd_norm); |
1443 | 0 | break; |
1444 | 0 | } |
1445 | | |
1446 | 0 | const float norm = sum > 0.0 ? 1.0 / sum : 0.0f; |
1447 | |
|
1448 | 0 | for (int i = 0; i < n; i++) { |
1449 | 0 | out[i] = inp[i] * norm; |
1450 | 0 | } |
1451 | 0 | } |
1452 | | |
1453 | 0 | float common_embd_similarity_cos(const float * embd1, const float * embd2, int n){ |
1454 | 0 | double sum = 0.0; |
1455 | 0 | double sum1 = 0.0; |
1456 | 0 | double sum2 = 0.0; |
1457 | |
|
1458 | 0 | for (int i = 0; i < n; i++) { |
1459 | 0 | sum += embd1[i] * embd2[i]; |
1460 | 0 | sum1 += embd1[i] * embd1[i]; |
1461 | 0 | sum2 += embd2[i] * embd2[i]; |
1462 | 0 | } |
1463 | | |
1464 | | // Handle the case where one or both vectors are zero vectors |
1465 | 0 | if (sum1 == 0.0 || sum2 == 0.0) { |
1466 | 0 | if (sum1 == 0.0 && sum2 == 0.0) { |
1467 | 0 | return 1.0f; // two zero vectors are similar |
1468 | 0 | } |
1469 | 0 | return 0.0f; |
1470 | 0 | } |
1471 | | |
1472 | 0 | return sum / (sqrt(sum1) * sqrt(sum2)); |
1473 | 0 | } |
1474 | | |
1475 | | // |
1476 | | // Control vector utils |
1477 | | // |
1478 | | |
1479 | 0 | static common_control_vector_data common_control_vector_load_one(const common_control_vector_load_info & load_info) { |
1480 | 0 | common_control_vector_data result = { -1, {} }; |
1481 | |
|
1482 | 0 | ggml_context * ctx = nullptr; |
1483 | 0 | struct gguf_init_params meta_gguf_params = { |
1484 | 0 | /* .no_alloc = */ false, |
1485 | 0 | /* .ctx = */ &ctx, |
1486 | 0 | }; |
1487 | 0 | struct gguf_context * ctx_gguf = gguf_init_from_file(load_info.fname.c_str(), meta_gguf_params); |
1488 | 0 | if (!ctx_gguf) { |
1489 | 0 | LOG_ERR("%s: failed to load control vector file from %s\n", __func__, load_info.fname.c_str()); |
1490 | 0 | return result; |
1491 | 0 | } |
1492 | | |
1493 | 0 | int32_t n_tensors = gguf_get_n_tensors(ctx_gguf); |
1494 | 0 | if (n_tensors == 0) { |
1495 | 0 | LOG_WRN("%s: no direction tensors found in %s\n", __func__, load_info.fname.c_str()); |
1496 | 0 | } |
1497 | |
|
1498 | 0 | for (int i = 0; i < n_tensors; i++) { |
1499 | 0 | std::string name = gguf_get_tensor_name(ctx_gguf, i); |
1500 | |
|
1501 | 0 | int layer_idx = -1; |
1502 | | |
1503 | | // split on '.' |
1504 | 0 | size_t dotpos = name.find('.'); |
1505 | 0 | if (dotpos != std::string::npos && name.substr(0, dotpos) == "direction") { |
1506 | 0 | try { |
1507 | 0 | layer_idx = std::stoi(name.substr(dotpos + 1)); |
1508 | 0 | } catch (...) { |
1509 | 0 | layer_idx = -1; |
1510 | 0 | } |
1511 | 0 | } |
1512 | 0 | if (layer_idx < 0) { |
1513 | 0 | LOG_ERR("%s: invalid/unparsable direction tensor layer index in %s\n", __func__, load_info.fname.c_str()); |
1514 | 0 | result.n_embd = -1; |
1515 | 0 | break; |
1516 | 0 | } else if (layer_idx == 0) { |
1517 | 0 | LOG_ERR("%s: invalid (zero) direction tensor layer index in %s\n", __func__, load_info.fname.c_str()); |
1518 | 0 | result.n_embd = -1; |
1519 | 0 | break; |
1520 | 0 | } |
1521 | | |
1522 | 0 | struct ggml_tensor * tensor = ggml_get_tensor(ctx, name.c_str()); |
1523 | 0 | if (tensor->type != GGML_TYPE_F32) { |
1524 | 0 | LOG_ERR("%s: invalid (non-F32) direction tensor type in %s\n", __func__, load_info.fname.c_str()); |
1525 | 0 | result.n_embd = -1; |
1526 | 0 | break; |
1527 | 0 | } |
1528 | 0 | if (ggml_n_dims(tensor) != 1) { |
1529 | 0 | LOG_ERR("%s: invalid (non-1D) direction tensor shape in %s\n", __func__, load_info.fname.c_str()); |
1530 | 0 | result.n_embd = -1; |
1531 | 0 | break; |
1532 | 0 | } |
1533 | | |
1534 | 0 | if (result.n_embd == -1) { |
1535 | 0 | result.n_embd = ggml_nelements(tensor); |
1536 | 0 | } else if (ggml_nelements(tensor) != result.n_embd) { |
1537 | 0 | LOG_ERR("%s: direction tensor in %s does not match previous dimensions\n", __func__, load_info.fname.c_str()); |
1538 | 0 | result.n_embd = -1; |
1539 | 0 | break; |
1540 | 0 | } |
1541 | | |
1542 | | // extend if necessary - do not store data for layer 0 (it's not used) |
1543 | 0 | result.data.resize(std::max(result.data.size(), static_cast<size_t>(result.n_embd * layer_idx)), 0.0f); |
1544 | |
|
1545 | 0 | const float * src = (const float *) tensor->data; |
1546 | 0 | float * dst = result.data.data() + result.n_embd * (layer_idx - 1); // layer 1 at [0] |
1547 | 0 | for (int j = 0; j < result.n_embd; j++) { |
1548 | 0 | dst[j] += src[j] * load_info.strength; // allows multiple directions for same layer in same file |
1549 | 0 | } |
1550 | |
|
1551 | 0 | } |
1552 | | |
1553 | 0 | if (result.n_embd == -1) { |
1554 | 0 | LOG_WRN("%s: skipping %s due to invalid direction tensors\n", __func__, load_info.fname.c_str()); |
1555 | 0 | result.data.clear(); |
1556 | 0 | } |
1557 | |
|
1558 | 0 | gguf_free(ctx_gguf); |
1559 | 0 | ggml_free(ctx); |
1560 | |
|
1561 | 0 | return result; |
1562 | 0 | } |
1563 | | |
1564 | 0 | common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) { |
1565 | 0 | common_control_vector_data result = { -1, {} }; |
1566 | |
|
1567 | 0 | for (const auto & info : load_infos) { |
1568 | 0 | auto cur = common_control_vector_load_one(info); |
1569 | |
|
1570 | 0 | if (cur.n_embd == -1) { |
1571 | 0 | result.n_embd = -1; |
1572 | 0 | break; |
1573 | 0 | } |
1574 | 0 | if (result.n_embd != -1 && result.n_embd != cur.n_embd) { |
1575 | 0 | LOG_ERR("%s: control vectors in %s does not match previous dimensions\n", __func__, info.fname.c_str()); |
1576 | 0 | result.n_embd = -1; |
1577 | 0 | break; |
1578 | 0 | } |
1579 | | |
1580 | 0 | if (result.n_embd == -1) { |
1581 | 0 | result = std::move(cur); |
1582 | 0 | } else { |
1583 | 0 | result.data.resize(std::max(result.data.size(), cur.data.size()), 0.0f); // extend if necessary |
1584 | 0 | for (size_t i = 0; i < cur.data.size(); i++) { |
1585 | 0 | result.data[i] += cur.data[i]; |
1586 | 0 | } |
1587 | 0 | } |
1588 | 0 | } |
1589 | |
|
1590 | 0 | if (result.n_embd == -1) { |
1591 | 0 | LOG_ERR("%s: no valid control vector files passed\n", __func__); |
1592 | 0 | result.data.clear(); |
1593 | 0 | } |
1594 | |
|
1595 | 0 | return result; |
1596 | 0 | } |
1597 | | |
1598 | 0 | ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride) { |
1599 | 0 | const int64_t ne_datapoint = llama_n_ctx(ctx); |
1600 | 0 | const int64_t ndata = (tokens.size() - ne_datapoint - 1) / stride; |
1601 | 0 | ggml_opt_dataset_t result = ggml_opt_dataset_init( |
1602 | 0 | GGML_TYPE_I32, GGML_TYPE_I32, ne_datapoint, ne_datapoint, ndata, /*ndata_shard =*/ 1); |
1603 | |
|
1604 | 0 | llama_token * data = (llama_token *) ggml_opt_dataset_data(result)->data; |
1605 | 0 | llama_token * labels = (llama_token *) ggml_opt_dataset_labels(result)->data; |
1606 | |
|
1607 | 0 | for (int64_t idata = 0; idata < ndata; ++idata) { |
1608 | 0 | memcpy(data + idata*ne_datapoint, tokens.data() + idata*stride + 0, ne_datapoint*sizeof(llama_token)); |
1609 | 0 | memcpy(labels + idata*ne_datapoint, tokens.data() + idata*stride + 1, ne_datapoint*sizeof(llama_token)); |
1610 | 0 | } |
1611 | |
|
1612 | 0 | return result; |
1613 | 0 | } |
1614 | | |
1615 | 0 | ggml_opt_optimizer_params common_opt_lr_pars(void * userdata) { |
1616 | 0 | ggml_opt_optimizer_params result = ggml_opt_get_default_optimizer_params(nullptr); |
1617 | 0 | const lr_opt & d = *(lr_opt *) userdata; |
1618 | 0 | result.adamw.alpha = result.sgd.alpha = d.get_lr(d.epoch); |
1619 | 0 | result.sgd.wd = result.adamw.wd = d.wd; |
1620 | 0 | return result; |
1621 | 0 | } |
1622 | | |
1623 | | // TODO make all command line args case-insensitive |
1624 | 0 | static inline bool eq_case_insensitive(char const* a, char const* b) { |
1625 | 0 | return ! |
1626 | | #if defined(_MSC_VER) |
1627 | | _stricmp |
1628 | | #else |
1629 | 0 | strcasecmp |
1630 | 0 | #endif // defined(_MSC_VER) |
1631 | 0 | (a, b); |
1632 | 0 | } |
1633 | | |
1634 | 0 | enum ggml_opt_optimizer_type common_opt_get_optimizer(const char * n) { |
1635 | 0 | if (eq_case_insensitive("adamw", n)) { |
1636 | 0 | return GGML_OPT_OPTIMIZER_TYPE_ADAMW; |
1637 | 0 | } |
1638 | 0 | if (eq_case_insensitive("sgd", n)) { |
1639 | 0 | return GGML_OPT_OPTIMIZER_TYPE_SGD; |
1640 | 0 | } |
1641 | 0 | return GGML_OPT_OPTIMIZER_TYPE_COUNT; |
1642 | 0 | } |
1643 | | |
1644 | | // TODO simplify to use just log and exp |
1645 | | static float const k_log_2 = std::log(2.f); |
1646 | | |
1647 | 0 | void lr_opt::init() { |
1648 | 0 | if (lr_min > 0 && lr_min < lr0) { |
1649 | 0 | float nhalf = std::log(lr0 / lr_min) / k_log_2; |
1650 | 0 | float e = epochs; |
1651 | 0 | if (decay_epochs > 0 && decay_epochs < e) { |
1652 | 0 | e = decay_epochs; |
1653 | 0 | } else { |
1654 | 0 | decay_epochs = e; |
1655 | 0 | } |
1656 | 0 | scale_epoch = nhalf / e; |
1657 | 0 | } |
1658 | 0 | } |
1659 | | |
1660 | 0 | float lr_opt::get_lr(float epoch) const { |
1661 | 0 | float r = lr_min <= 0 ? lr0 : |
1662 | 0 | epoch >= decay_epochs ? lr_min : |
1663 | 0 | lr0 * std::pow(0.5f, epoch * scale_epoch); |
1664 | 0 | LOG_INF("epoch %.2g lr=%.2g\n", epoch, r); |
1665 | 0 | return r; |
1666 | 0 | } |