Coverage Report

Created: 2025-11-28 06:57

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/common/common.cpp
Line
Count
Source
1
#if defined(_MSC_VER)
2
#define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
3
#endif
4
5
#include "ggml.h"
6
#include "gguf.h"
7
8
#include "common.h"
9
#include "log.h"
10
#include "llama.h"
11
#include "sampling.h"
12
13
#include <algorithm>
14
#include <cinttypes>
15
#include <climits>
16
#include <cmath>
17
#include <codecvt>
18
#include <chrono>
19
#include <cstdarg>
20
#include <cstring>
21
#include <ctime>
22
#include <filesystem>
23
#include <fstream>
24
#include <iostream>
25
#include <iterator>
26
#include <regex>
27
#include <sstream>
28
#include <string>
29
#include <thread>
30
#include <unordered_set>
31
#include <vector>
32
33
#if defined(__APPLE__) && defined(__MACH__)
34
#include <sys/types.h>
35
#include <sys/sysctl.h>
36
#endif
37
38
#if defined(_WIN32)
39
#define WIN32_LEAN_AND_MEAN
40
#ifndef NOMINMAX
41
#   define NOMINMAX
42
#endif
43
#include <locale>
44
#include <windows.h>
45
#include <string.h>
46
#include <fcntl.h>
47
#include <io.h>
48
#else
49
#include <sys/ioctl.h>
50
#include <sys/stat.h>
51
#include <unistd.h>
52
#endif
53
54
#if defined(__linux__)
55
#include <sys/types.h>
56
#include <pwd.h>
57
#endif
58
59
#if defined(_MSC_VER)
60
#pragma warning(disable: 4244 4267) // possible loss of data
61
#endif
62
63
0
common_time_meas::common_time_meas(int64_t & t_acc, bool disable) : t_start_us(disable ? -1 : ggml_time_us()), t_acc(t_acc) {}
64
65
0
common_time_meas::~common_time_meas() {
66
0
    if (t_start_us >= 0) {
67
0
        t_acc += ggml_time_us() - t_start_us;
68
0
    }
69
0
}
70
71
//
72
// CPU utils
73
//
74
75
0
int32_t cpu_get_num_physical_cores() {
76
0
#ifdef __linux__
77
    // enumerate the set of thread siblings, num entries is num cores
78
0
    std::unordered_set<std::string> siblings;
79
0
    for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
80
0
        std::ifstream thread_siblings("/sys/devices/system/cpu/cpu"
81
0
            + std::to_string(cpu) + "/topology/thread_siblings");
82
0
        if (!thread_siblings.is_open()) {
83
0
            break; // no more cpus
84
0
        }
85
0
        std::string line;
86
0
        if (std::getline(thread_siblings, line)) {
87
0
            siblings.insert(line);
88
0
        }
89
0
    }
90
0
    if (!siblings.empty()) {
91
0
        return static_cast<int32_t>(siblings.size());
92
0
    }
93
#elif defined(__APPLE__) && defined(__MACH__)
94
    int32_t num_physical_cores;
95
    size_t len = sizeof(num_physical_cores);
96
    int result = sysctlbyname("hw.perflevel0.physicalcpu", &num_physical_cores, &len, NULL, 0);
97
    if (result == 0) {
98
        return num_physical_cores;
99
    }
100
    result = sysctlbyname("hw.physicalcpu", &num_physical_cores, &len, NULL, 0);
101
    if (result == 0) {
102
        return num_physical_cores;
103
    }
104
#elif defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later
105
    // TODO: windows + arm64 + mingw64
106
    unsigned int n_threads_win = std::thread::hardware_concurrency();
107
    unsigned int default_threads = n_threads_win > 0 ? (n_threads_win <= 4 ? n_threads_win : n_threads_win / 2) : 4;
108
109
    DWORD buffer_size = 0;
110
    if (!GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &buffer_size)) {
111
        if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
112
            return default_threads;
113
        }
114
    }
115
116
    std::vector<char> buffer(buffer_size);
117
    if (!GetLogicalProcessorInformationEx(RelationProcessorCore, reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data()), &buffer_size)) {
118
        return default_threads;
119
    }
120
121
    int32_t num_physical_cores = 0;
122
    PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(buffer.data());
123
    while (buffer_size > 0) {
124
        if (info->Relationship == RelationProcessorCore) {
125
            num_physical_cores += info->Processor.GroupCount;
126
        }
127
        buffer_size -= info->Size;
128
        info = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX>(reinterpret_cast<char*>(info) + info->Size);
129
    }
130
131
    return num_physical_cores > 0 ? num_physical_cores : default_threads;
132
#endif
133
0
    unsigned int n_threads = std::thread::hardware_concurrency();
134
0
    return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
135
0
}
136
137
#if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
138
#include <pthread.h>
139
140
static void cpuid(unsigned leaf, unsigned subleaf,
141
0
                  unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *edx) {
142
0
    __asm__("movq\t%%rbx,%%rsi\n\t"
143
0
            "cpuid\n\t"
144
0
            "xchgq\t%%rbx,%%rsi"
145
0
            : "=a"(*eax), "=S"(*ebx), "=c"(*ecx), "=d"(*edx)
146
0
            : "0"(leaf), "2"(subleaf));
147
0
}
148
149
0
static int pin_cpu(int cpu) {
150
0
    cpu_set_t mask;
151
0
    CPU_ZERO(&mask);
152
0
    CPU_SET(cpu, &mask);
153
0
    return pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
154
0
}
155
156
0
static bool is_hybrid_cpu(void) {
157
0
    unsigned eax, ebx, ecx, edx;
158
0
    cpuid(7, 0, &eax, &ebx, &ecx, &edx);
159
0
    return !!(edx & (1u << 15));
160
0
}
161
162
0
static bool is_running_on_efficiency_core(void) {
163
0
    unsigned eax, ebx, ecx, edx;
164
0
    cpuid(0x1a, 0, &eax, &ebx, &ecx, &edx);
165
0
    int intel_atom = 0x20;
166
0
    int core_type = (eax & 0xff000000u) >> 24;
167
0
    return core_type == intel_atom;
168
0
}
169
170
0
static int cpu_count_math_cpus(int n_cpu) {
171
0
    int result = 0;
172
0
    for (int cpu = 0; cpu < n_cpu; ++cpu) {
173
0
        if (pin_cpu(cpu)) {
174
0
            return -1;
175
0
        }
176
0
        if (is_running_on_efficiency_core()) {
177
0
            continue; // efficiency cores harm lockstep threading
178
0
        }
179
0
        ++cpu; // hyperthreading isn't useful for linear algebra
180
0
        ++result;
181
0
    }
182
0
    return result;
183
0
}
184
185
#endif // __x86_64__ && __linux__
186
187
/**
188
 * Returns number of CPUs on system that are useful for math.
189
 */
190
0
int32_t cpu_get_num_math() {
191
0
#if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
192
0
    int n_cpu = sysconf(_SC_NPROCESSORS_ONLN);
193
0
    if (n_cpu < 1) {
194
0
        return cpu_get_num_physical_cores();
195
0
    }
196
0
    if (is_hybrid_cpu()) {
197
0
        cpu_set_t affinity;
198
0
        if (!pthread_getaffinity_np(pthread_self(), sizeof(affinity), &affinity)) {
199
0
            int result = cpu_count_math_cpus(n_cpu);
200
0
            pthread_setaffinity_np(pthread_self(), sizeof(affinity), &affinity);
201
0
            if (result > 0) {
202
0
                return result;
203
0
            }
204
0
        }
205
0
    }
206
0
#endif
207
0
    return cpu_get_num_physical_cores();
208
0
}
209
210
// Helper for setting process priority
211
212
#if defined(_WIN32)
213
214
bool set_process_priority(enum ggml_sched_priority prio) {
215
    if (prio == GGML_SCHED_PRIO_NORMAL) {
216
        return true;
217
    }
218
219
    DWORD p = NORMAL_PRIORITY_CLASS;
220
    switch (prio) {
221
        case GGML_SCHED_PRIO_LOW:      p = BELOW_NORMAL_PRIORITY_CLASS; break;
222
        case GGML_SCHED_PRIO_NORMAL:   p = NORMAL_PRIORITY_CLASS;       break;
223
        case GGML_SCHED_PRIO_MEDIUM:   p = ABOVE_NORMAL_PRIORITY_CLASS; break;
224
        case GGML_SCHED_PRIO_HIGH:     p = HIGH_PRIORITY_CLASS;         break;
225
        case GGML_SCHED_PRIO_REALTIME: p = REALTIME_PRIORITY_CLASS;     break;
226
    }
227
228
    if (!SetPriorityClass(GetCurrentProcess(), p)) {
229
        LOG_WRN("failed to set process priority class %d : (%d)\n", prio, (int) GetLastError());
230
        return false;
231
    }
232
233
    return true;
234
}
235
236
#else // MacOS and POSIX
237
#include <sys/types.h>
238
#include <sys/resource.h>
239
240
0
bool set_process_priority(enum ggml_sched_priority prio) {
241
0
    if (prio == GGML_SCHED_PRIO_NORMAL) {
242
0
        return true;
243
0
    }
244
245
0
    int p = 0;
246
0
    switch (prio) {
247
0
        case GGML_SCHED_PRIO_LOW:      p =  5;  break;
248
0
        case GGML_SCHED_PRIO_NORMAL:   p =  0;  break;
249
0
        case GGML_SCHED_PRIO_MEDIUM:   p = -5;  break;
250
0
        case GGML_SCHED_PRIO_HIGH:     p = -10; break;
251
0
        case GGML_SCHED_PRIO_REALTIME: p = -20; break;
252
0
    }
253
254
0
    if (!setpriority(PRIO_PROCESS, 0, p)) {
255
0
        LOG_WRN("failed to set process priority %d : %s (%d)\n", prio, strerror(errno), errno);
256
0
        return false;
257
0
    }
258
0
    return true;
259
0
}
260
261
#endif
262
263
//
264
// CLI argument parsing
265
//
266
267
268
0
void postprocess_cpu_params(cpu_params& cpuparams, const cpu_params* role_model) {
269
0
    int32_t n_set = 0;
270
271
0
    if (cpuparams.n_threads < 0) {
272
        // Assuming everything about cpuparams is invalid
273
0
        if (role_model != nullptr) {
274
0
            cpuparams = *role_model;
275
0
        } else {
276
0
            cpuparams.n_threads = cpu_get_num_math();
277
0
        }
278
0
    }
279
280
0
    for (int32_t i = 0; i < GGML_MAX_N_THREADS; i++) {
281
0
        if (cpuparams.cpumask[i]) {
282
0
            n_set++;
283
0
        }
284
0
    }
285
286
0
    if (n_set && n_set < cpuparams.n_threads) {
287
        // Not enough set bits, may experience performance issues.
288
0
        LOG_WRN("Not enough set bits in CPU mask (%d) to satisfy requested thread count: %d\n", n_set, cpuparams.n_threads);
289
0
    }
290
0
}
291
292
0
bool parse_cpu_range(const std::string & range, bool (&boolmask)[GGML_MAX_N_THREADS]) {
293
0
    size_t dash_loc = range.find('-');
294
0
    if (dash_loc == std::string::npos) {
295
0
        LOG_ERR("Format of CPU range is invalid! Expected [<start>]-[<end>].\n");
296
0
        return false;
297
0
    }
298
299
0
    size_t start_i;
300
0
    size_t end_i;
301
302
0
    if (dash_loc == 0) {
303
0
        start_i = 0;
304
0
    } else {
305
0
        start_i = std::stoull(range.substr(0, dash_loc));
306
0
        if (start_i >= GGML_MAX_N_THREADS) {
307
0
            LOG_ERR("Start index out of bounds!\n");
308
0
            return false;
309
0
        }
310
0
    }
311
312
0
    if (dash_loc == range.length() - 1) {
313
0
        end_i = GGML_MAX_N_THREADS - 1;
314
0
    } else {
315
0
        end_i = std::stoull(range.substr(dash_loc + 1));
316
0
        if (end_i >= GGML_MAX_N_THREADS) {
317
0
            LOG_ERR("End index out of bounds!\n");
318
0
            return false;
319
0
        }
320
0
    }
321
322
0
    for (size_t i = start_i; i <= end_i; i++) {
323
0
        boolmask[i] = true;
324
0
    }
325
326
0
    return true;
327
0
}
328
329
0
bool parse_cpu_mask(const std::string & mask, bool (&boolmask)[GGML_MAX_N_THREADS]) {
330
    // Discard potential 0x prefix
331
0
    size_t start_i = 0;
332
0
    if (mask.length() >= 2 && mask.substr(0, 2) == "0x") {
333
0
        start_i = 2;
334
0
    }
335
336
0
    size_t num_digits = mask.length() - start_i;
337
0
    if (num_digits > 128) num_digits = 128;
338
339
0
    size_t end_i = num_digits + start_i;
340
341
0
    for (size_t i = start_i, n = (num_digits*4 - 1); i < end_i; i++, n-=4) {
342
0
        char c = mask.at(i);
343
0
        int8_t id = c;
344
345
0
        if ((c >= '0' && c <= '9')) {
346
0
            id -= '0';
347
0
        } else if (c >= 'a' && c <= 'f') {
348
0
            id -= 'a' - 10;
349
0
        } else if (c >= 'A' && c <= 'F') {
350
0
            id -= 'A' - 10;
351
0
        } else {
352
0
            LOG_ERR("Invalid hex character '%c' at position %d\n", c, int32_t(i));
353
0
            return false;
354
0
        }
355
356
0
        boolmask[  n  ] = boolmask[  n  ] || ((id & 8) != 0);
357
0
        boolmask[n - 1] = boolmask[n - 1] || ((id & 4) != 0);
358
0
        boolmask[n - 2] = boolmask[n - 2] || ((id & 2) != 0);
359
0
        boolmask[n - 3] = boolmask[n - 3] || ((id & 1) != 0);
360
0
    }
361
362
0
    return true;
363
0
}
364
365
0
void common_init() {
366
0
    llama_log_set(common_log_default_callback, NULL);
367
368
0
#ifdef NDEBUG
369
0
    const char * build_type = "";
370
#else
371
    const char * build_type = " (debug)";
372
#endif
373
374
0
    LOG_INF("build: %d (%s) with %s for %s%s\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, LLAMA_BUILD_TARGET, build_type);
375
0
}
376
377
0
std::string common_params_get_system_info(const common_params & params) {
378
0
    std::ostringstream os;
379
380
0
    os << "system_info: n_threads = " << params.cpuparams.n_threads;
381
0
    if (params.cpuparams_batch.n_threads != -1) {
382
0
        os << " (n_threads_batch = " << params.cpuparams_batch.n_threads << ")";
383
0
    }
384
#if defined(_WIN32) && (_WIN32_WINNT >= 0x0601) && !defined(__MINGW64__) // windows 7 and later
385
    // TODO: windows + arm64 + mingw64
386
    DWORD logicalProcessorCount = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
387
    os << " / " << logicalProcessorCount << " | " << llama_print_system_info();
388
#else
389
0
    os << " / " << std::thread::hardware_concurrency() << " | " << llama_print_system_info();
390
0
#endif
391
392
0
    return os.str();
393
0
}
394
395
//
396
// String utils
397
//
398
399
0
std::string string_format(const char * fmt, ...) {
400
0
    va_list ap;
401
0
    va_list ap2;
402
0
    va_start(ap, fmt);
403
0
    va_copy(ap2, ap);
404
0
    int size = vsnprintf(NULL, 0, fmt, ap);
405
0
    GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
406
0
    std::vector<char> buf(size + 1);
407
0
    int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
408
0
    GGML_ASSERT(size2 == size);
409
0
    va_end(ap2);
410
0
    va_end(ap);
411
0
    return std::string(buf.data(), size);
412
0
}
413
414
0
std::string string_strip(const std::string & str) {
415
0
    size_t start = 0;
416
0
    size_t end = str.size();
417
0
    while (start < end && std::isspace(str[start])) {
418
0
        start++;
419
0
    }
420
0
    while (end > start && std::isspace(str[end - 1])) {
421
0
        end--;
422
0
    }
423
0
    return str.substr(start, end - start);
424
0
}
425
426
0
std::string string_get_sortable_timestamp() {
427
0
    using clock = std::chrono::system_clock;
428
429
0
    const clock::time_point current_time = clock::now();
430
0
    const time_t as_time_t = clock::to_time_t(current_time);
431
0
    char timestamp_no_ns[100];
432
0
    std::strftime(timestamp_no_ns, 100, "%Y_%m_%d-%H_%M_%S", std::localtime(&as_time_t));
433
434
0
    const int64_t ns = std::chrono::duration_cast<std::chrono::nanoseconds>(
435
0
        current_time.time_since_epoch() % 1000000000).count();
436
0
    char timestamp_ns[11];
437
0
    snprintf(timestamp_ns, 11, "%09" PRId64, ns);
438
439
0
    return std::string(timestamp_no_ns) + "." + std::string(timestamp_ns);
440
0
}
441
442
0
void string_replace_all(std::string & s, const std::string & search, const std::string & replace) {
443
0
    if (search.empty()) {
444
0
        return;
445
0
    }
446
0
    std::string builder;
447
0
    builder.reserve(s.length());
448
0
    size_t pos = 0;
449
0
    size_t last_pos = 0;
450
0
    while ((pos = s.find(search, last_pos)) != std::string::npos) {
451
0
        builder.append(s, last_pos, pos - last_pos);
452
0
        builder.append(replace);
453
0
        last_pos = pos + search.length();
454
0
    }
455
0
    builder.append(s, last_pos, std::string::npos);
456
0
    s = std::move(builder);
457
0
}
458
459
0
bool string_ends_with(const std::string_view & str, const std::string_view & suffix) {
460
0
    return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
461
0
}
462
463
0
bool string_remove_suffix(std::string & str, const std::string_view & suffix) {
464
0
    bool has_suffix = string_ends_with(str, suffix);
465
0
    if (has_suffix) {
466
0
        str = str.substr(0, str.size() - suffix.size());
467
0
    }
468
0
    return has_suffix;
469
0
}
470
471
0
size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop) {
472
0
    if (!str.empty() && !stop.empty()) {
473
0
        const char text_last_char = str.back();
474
0
        for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
475
0
            if (stop[char_index] == text_last_char) {
476
0
                const auto current_partial = stop.substr(0, char_index + 1);
477
0
                if (string_ends_with(str, current_partial)) {
478
0
                    return str.size() - char_index - 1;
479
0
                }
480
0
            }
481
0
        }
482
0
    }
483
484
0
    return std::string::npos;
485
0
}
486
487
0
std::string regex_escape(const std::string & s) {
488
0
    static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]");
489
0
    return std::regex_replace(s, special_chars, "\\$&");
490
0
}
491
492
99.3k
std::string string_join(const std::vector<std::string> & values, const std::string & separator) {
493
99.3k
    std::ostringstream result;
494
1.15M
    for (size_t i = 0; i < values.size(); ++i) {
495
1.05M
        if (i > 0) {
496
959k
            result << separator;
497
959k
        }
498
1.05M
        result << values[i];
499
1.05M
    }
500
99.3k
    return result.str();
501
99.3k
}
502
503
85.8k
std::vector<std::string> string_split(const std::string & str, const std::string & delimiter) {
504
85.8k
    std::vector<std::string> parts;
505
85.8k
    size_t start = 0;
506
85.8k
    size_t end = str.find(delimiter);
507
508
791k
    while (end != std::string::npos) {
509
705k
        parts.push_back(str.substr(start, end - start));
510
705k
        start = end + delimiter.length();
511
705k
        end = str.find(delimiter, start);
512
705k
    }
513
514
85.8k
    parts.push_back(str.substr(start));
515
516
85.8k
    return parts;
517
85.8k
}
518
519
0
std::string string_repeat(const std::string & str, size_t n) {
520
0
    if (n == 0) {
521
0
        return "";
522
0
    }
523
524
0
    std::string result;
525
0
    result.reserve(str.length() * n);
526
527
0
    for (size_t i = 0; i < n; ++i) {
528
0
        result += str;
529
0
    }
530
531
0
    return result;
532
0
}
533
534
0
std::string string_from(bool value) {
535
0
    return value ? "true" : "false";
536
0
}
537
538
0
std::string string_from(const std::vector<int> & values) {
539
0
    std::stringstream buf;
540
541
0
    buf << "[ ";
542
0
    bool first = true;
543
0
    for (auto e : values) {
544
0
        if (first) {
545
0
            first = false;
546
0
        } else {
547
0
            buf << ", ";
548
0
        }
549
0
        buf << std::to_string(e);
550
0
    }
551
0
    buf << " ]";
552
553
0
    return buf.str();
554
0
}
555
556
0
std::string string_from(const struct llama_context * ctx, const std::vector<llama_token> & tokens) {
557
0
    std::stringstream buf;
558
559
0
    buf << "[ ";
560
561
0
    bool first = true;
562
0
    for (const auto & token : tokens) {
563
0
        if (!first) {
564
0
            buf << ", ";
565
0
        } else {
566
0
            first = false;
567
0
        }
568
569
0
        auto detokenized = common_token_to_piece(ctx, token);
570
571
0
        buf << "'" << detokenized << "'"
572
0
            << ":" << std::to_string(token);
573
0
    }
574
575
0
    buf << " ]";
576
577
0
    return buf.str();
578
0
}
579
580
0
std::string string_from(const struct llama_context * ctx, const struct llama_batch & batch) {
581
0
    std::stringstream buf;
582
583
0
    buf << "[ ";
584
585
0
    bool first = true;
586
0
    for (int i = 0; i < batch.n_tokens; ++i) {
587
0
        if (!first) {
588
0
            buf << ", ";
589
0
        } else {
590
0
            first = false;
591
0
        }
592
593
0
        auto detokenized = common_token_to_piece(ctx, batch.token[i]);
594
595
0
        buf << "\n"          << std::to_string(i)
596
0
            << ", token '"   << detokenized << "'"
597
0
            << ", pos "      << std::to_string(batch.pos[i])
598
0
            << ", n_seq_id " << std::to_string(batch.n_seq_id[i])
599
0
            << ", seq_id "   << std::to_string(batch.seq_id[i][0])
600
0
            << ", logits "   << std::to_string(batch.logits[i]);
601
0
    }
602
603
0
    buf << " ]";
604
605
0
    return buf.str();
606
0
}
607
608
0
void string_process_escapes(std::string & input) {
609
0
    std::size_t input_len = input.length();
610
0
    std::size_t output_idx = 0;
611
612
0
    for (std::size_t input_idx = 0; input_idx < input_len; ++input_idx) {
613
0
        if (input[input_idx] == '\\' && input_idx + 1 < input_len) {
614
0
            switch (input[++input_idx]) {
615
0
                case 'n':  input[output_idx++] = '\n'; break;
616
0
                case 'r':  input[output_idx++] = '\r'; break;
617
0
                case 't':  input[output_idx++] = '\t'; break;
618
0
                case '\'': input[output_idx++] = '\''; break;
619
0
                case '\"': input[output_idx++] = '\"'; break;
620
0
                case '\\': input[output_idx++] = '\\'; break;
621
0
                case 'x':
622
                    // Handle \x12, etc
623
0
                    if (input_idx + 2 < input_len) {
624
0
                        const char x[3] = { input[input_idx + 1], input[input_idx + 2], 0 };
625
0
                        char *err_p = nullptr;
626
0
                        const long val = std::strtol(x, &err_p, 16);
627
0
                        if (err_p == x + 2) {
628
0
                            input_idx += 2;
629
0
                            input[output_idx++] = char(val);
630
0
                            break;
631
0
                        }
632
0
                    }
633
                    // fall through
634
0
                default:   input[output_idx++] = '\\';
635
0
                           input[output_idx++] = input[input_idx]; break;
636
0
            }
637
0
        } else {
638
0
            input[output_idx++] = input[input_idx];
639
0
        }
640
0
    }
641
642
0
    input.resize(output_idx);
643
0
}
644
645
0
bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides) {
646
0
    const char * sep = strchr(data, '=');
647
0
    if (sep == nullptr || sep - data >= 128) {
648
0
        LOG_ERR("%s: malformed KV override '%s'\n", __func__, data);
649
0
        return false;
650
0
    }
651
0
    llama_model_kv_override kvo;
652
0
    std::strncpy(kvo.key, data, sep - data);
653
0
    kvo.key[sep - data] = 0;
654
0
    sep++;
655
0
    if (strncmp(sep, "int:", 4) == 0) {
656
0
        sep += 4;
657
0
        kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
658
0
        kvo.val_i64 = std::atol(sep);
659
0
    } else if (strncmp(sep, "float:", 6) == 0) {
660
0
        sep += 6;
661
0
        kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
662
0
        kvo.val_f64 = std::atof(sep);
663
0
    } else if (strncmp(sep, "bool:", 5) == 0) {
664
0
        sep += 5;
665
0
        kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
666
0
        if (std::strcmp(sep, "true") == 0) {
667
0
            kvo.val_bool = true;
668
0
        } else if (std::strcmp(sep, "false") == 0) {
669
0
            kvo.val_bool = false;
670
0
        } else {
671
0
            LOG_ERR("%s: invalid boolean value for KV override '%s'\n", __func__, data);
672
0
            return false;
673
0
        }
674
0
    } else if (strncmp(sep, "str:", 4) == 0) {
675
0
        sep += 4;
676
0
        kvo.tag = LLAMA_KV_OVERRIDE_TYPE_STR;
677
0
        if (strlen(sep) > 127) {
678
0
            LOG_ERR("%s: malformed KV override '%s', value cannot exceed 127 chars\n", __func__, data);
679
0
            return false;
680
0
        }
681
0
        strncpy(kvo.val_str, sep, 127);
682
0
        kvo.val_str[127] = '\0';
683
0
    } else {
684
0
        LOG_ERR("%s: invalid type for KV override '%s'\n", __func__, data);
685
0
        return false;
686
0
    }
687
0
    overrides.emplace_back(std::move(kvo));
688
0
    return true;
689
0
}
690
691
//
692
// Filesystem utils
693
//
694
695
// Validate if a filename is safe to use
696
// To validate a full path, split the path by the OS-specific path separator, and validate each part with this function
697
0
bool fs_validate_filename(const std::string & filename) {
698
0
    if (!filename.length()) {
699
        // Empty filename invalid
700
0
        return false;
701
0
    }
702
0
    if (filename.length() > 255) {
703
        // Limit at common largest possible filename on Linux filesystems
704
        // to avoid unnecessary further validation
705
        // (On systems with smaller limits it will be caught by the OS)
706
0
        return false;
707
0
    }
708
709
0
    std::u32string filename_utf32;
710
0
    try {
711
0
#if defined(__clang__)
712
        // disable C++17 deprecation warning for std::codecvt_utf8
713
0
#    pragma clang diagnostic push
714
0
#    pragma clang diagnostic ignored "-Wdeprecated-declarations"
715
#elif defined(__GNUC__)
716
#    pragma GCC diagnostic push
717
#    pragma GCC diagnostic ignored "-Wdeprecated-declarations"
718
#endif
719
720
0
        std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> converter;
721
722
0
#if defined(__clang__)
723
0
#    pragma clang diagnostic pop
724
#elif defined(__GNUC__)
725
#    pragma GCC diagnostic pop
726
#endif
727
728
0
        filename_utf32 = converter.from_bytes(filename);
729
730
        // If the reverse conversion mismatches, it means overlong UTF-8 sequences were used,
731
        // or invalid encodings were encountered. Reject such attempts
732
0
        std::string filename_reencoded = converter.to_bytes(filename_utf32);
733
0
        if (filename_reencoded != filename) {
734
0
            return false;
735
0
        }
736
0
    } catch (const std::exception &) {
737
0
        return false;
738
0
    }
739
740
    // Check for forbidden codepoints:
741
    // - Control characters
742
    // - Unicode equivalents of illegal characters
743
    // - UTF-16 surrogate pairs
744
    // - UTF-8 replacement character
745
    // - Byte order mark (BOM)
746
    // - Illegal characters: / \ : * ? " < > |
747
0
    for (char32_t c : filename_utf32) {
748
0
        if (c <= 0x1F // Control characters (C0)
749
0
            || c == 0x7F // Control characters (DEL)
750
0
            || (c >= 0x80 && c <= 0x9F) // Control characters (C1)
751
0
            || c == 0xFF0E // Fullwidth Full Stop (period equivalent)
752
0
            || c == 0x2215 // Division Slash (forward slash equivalent)
753
0
            || c == 0x2216 // Set Minus (backslash equivalent)
754
0
            || (c >= 0xD800 && c <= 0xDFFF) // UTF-16 surrogate pairs
755
0
            || c == 0xFFFD // Replacement Character (UTF-8)
756
0
            || c == 0xFEFF // Byte Order Mark (BOM)
757
0
            || c == '/' || c == '\\' || c == ':' || c == '*' // Illegal characters
758
0
            || c == '?' || c == '"' || c == '<' || c == '>' || c == '|') {
759
0
            return false;
760
0
        }
761
0
    }
762
763
    // Reject any leading or trailing ' ', or any trailing '.', these are stripped on Windows and will cause a different filename
764
    // Unicode and other whitespace is not affected, only 0x20 space
765
0
    if (filename.front() == ' ' || filename.back() == ' ' || filename.back() == '.') {
766
0
        return false;
767
0
    }
768
769
    // Reject any ".." (currently stricter than necessary, it should be fine to just check for == ".." instead)
770
0
    if (filename.find("..") != std::string::npos) {
771
0
        return false;
772
0
    }
773
774
    // Reject "."
775
0
    if (filename == ".") {
776
0
        return false;
777
0
    }
778
779
0
    return true;
780
0
}
781
782
#include <iostream>
783
784
785
// returns true if successful, false otherwise
786
0
bool fs_create_directory_with_parents(const std::string & path) {
787
#ifdef _WIN32
788
    std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
789
    std::wstring wpath = converter.from_bytes(path);
790
791
    // if the path already exists, check whether it's a directory
792
    const DWORD attributes = GetFileAttributesW(wpath.c_str());
793
    if ((attributes != INVALID_FILE_ATTRIBUTES) && (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
794
        return true;
795
    }
796
797
    size_t pos_slash = 0;
798
799
    // process path from front to back, procedurally creating directories
800
    while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) {
801
        const std::wstring subpath = wpath.substr(0, pos_slash);
802
803
        pos_slash += 1;
804
805
        // skip the drive letter, in some systems it can return an access denied error
806
        if (subpath.length() == 2 && subpath[1] == ':') {
807
            continue;
808
        }
809
810
        const bool success = CreateDirectoryW(subpath.c_str(), NULL);
811
812
        if (!success) {
813
            const DWORD error = GetLastError();
814
815
            // if the path already exists, ensure that it's a directory
816
            if (error == ERROR_ALREADY_EXISTS) {
817
                const DWORD attributes = GetFileAttributesW(subpath.c_str());
818
                if (attributes == INVALID_FILE_ATTRIBUTES || !(attributes & FILE_ATTRIBUTE_DIRECTORY)) {
819
                    return false;
820
                }
821
            } else {
822
                return false;
823
            }
824
        }
825
    }
826
827
    return true;
828
#else
829
    // if the path already exists, check whether it's a directory
830
0
    struct stat info;
831
0
    if (stat(path.c_str(), &info) == 0) {
832
0
        return S_ISDIR(info.st_mode);
833
0
    }
834
835
0
    size_t pos_slash = 1; // skip leading slashes for directory creation
836
837
    // process path from front to back, procedurally creating directories
838
0
    while ((pos_slash = path.find('/', pos_slash)) != std::string::npos) {
839
0
        const std::string subpath = path.substr(0, pos_slash);
840
0
        struct stat info;
841
842
        // if the path already exists, ensure that it's a directory
843
0
        if (stat(subpath.c_str(), &info) == 0) {
844
0
            if (!S_ISDIR(info.st_mode)) {
845
0
                return false;
846
0
            }
847
0
        } else {
848
            // create parent directories
849
0
            const int ret = mkdir(subpath.c_str(), 0755);
850
0
            if (ret != 0) {
851
0
                return false;
852
0
            }
853
0
        }
854
855
0
        pos_slash += 1;
856
0
    }
857
858
0
    return true;
859
0
#endif // _WIN32
860
0
}
861
862
0
std::string fs_get_cache_directory() {
863
0
    std::string cache_directory = "";
864
0
    auto ensure_trailing_slash = [](std::string p) {
865
        // Make sure to add trailing slash
866
0
        if (p.back() != DIRECTORY_SEPARATOR) {
867
0
            p += DIRECTORY_SEPARATOR;
868
0
        }
869
0
        return p;
870
0
    };
871
0
    if (getenv("LLAMA_CACHE")) {
872
0
        cache_directory = std::getenv("LLAMA_CACHE");
873
0
    } else {
874
0
#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX) || defined(__OpenBSD__)
875
0
        if (std::getenv("XDG_CACHE_HOME")) {
876
0
            cache_directory = std::getenv("XDG_CACHE_HOME");
877
0
        } else if (std::getenv("HOME")) {
878
0
            cache_directory = std::getenv("HOME") + std::string("/.cache/");
879
0
        } else {
880
0
#if defined(__linux__)
881
            /* no $HOME is defined, fallback to getpwuid */
882
0
            struct passwd *pw = getpwuid(getuid());
883
0
            if ((!pw) || (!pw->pw_dir)) {
884
0
                throw std::runtime_error("Failed to find $HOME directory");
885
0
            }
886
887
0
            cache_directory = std::string(pw->pw_dir) + std::string("/.cache/");
888
#else /* defined(__linux__) */
889
            throw std::runtime_error("Failed to find $HOME directory");
890
#endif /* defined(__linux__) */
891
0
        }
892
#elif defined(__APPLE__)
893
        cache_directory = std::getenv("HOME") + std::string("/Library/Caches/");
894
#elif defined(_WIN32)
895
        cache_directory = std::getenv("LOCALAPPDATA");
896
#else
897
#  error Unknown architecture
898
#endif
899
0
        cache_directory = ensure_trailing_slash(cache_directory);
900
0
        cache_directory += "llama.cpp";
901
0
    }
902
0
    return ensure_trailing_slash(cache_directory);
903
0
}
904
905
0
std::string fs_get_cache_file(const std::string & filename) {
906
0
    GGML_ASSERT(filename.find(DIRECTORY_SEPARATOR) == std::string::npos);
907
0
    std::string cache_directory = fs_get_cache_directory();
908
0
    const bool success = fs_create_directory_with_parents(cache_directory);
909
0
    if (!success) {
910
0
        throw std::runtime_error("failed to create cache directory: " + cache_directory);
911
0
    }
912
0
    return cache_directory + filename;
913
0
}
914
915
0
std::vector<common_file_info> fs_list_files(const std::string & path) {
916
0
    std::vector<common_file_info> files;
917
0
    if (path.empty()) return files;
918
919
0
    std::filesystem::path dir(path);
920
0
    if (!std::filesystem::exists(dir) || !std::filesystem::is_directory(dir)) {
921
0
        return files;
922
0
    }
923
924
0
    for (const auto & entry : std::filesystem::directory_iterator(dir)) {
925
0
        try {
926
            // Only include regular files (skip directories)
927
0
            const auto & p = entry.path();
928
0
            if (std::filesystem::is_regular_file(p)) {
929
0
                common_file_info info;
930
0
                info.path = p.string();
931
0
                info.name = p.filename().string();
932
0
                try {
933
0
                    info.size = static_cast<size_t>(std::filesystem::file_size(p));
934
0
                } catch (const std::filesystem::filesystem_error &) {
935
0
                    info.size = 0;
936
0
                }
937
0
                files.push_back(std::move(info));
938
0
            }
939
0
        } catch (const std::filesystem::filesystem_error &) {
940
            // skip entries we cannot inspect
941
0
            continue;
942
0
        }
943
0
    }
944
945
0
    return files;
946
0
}
947
948
949
//
950
// Model utils
951
//
952
953
static inline void common_init_sampler_from_model(
954
    const llama_model * model,
955
0
    common_params_sampling & sparams) {
956
957
0
    const uint64_t config = sparams.user_sampling_config;
958
959
0
    auto get_int32 = [&](const char * key, int32_t & dst, uint64_t user_config) {
960
0
        if (config & user_config) return;
961
962
0
        char buf[64] = {0};
963
0
        if (llama_model_meta_val_str(model, key, buf, sizeof(buf)) > 0) {
964
0
            char * end = nullptr;
965
0
            int32_t v = strtol(buf, &end, 10);
966
0
            if (end && end != buf) dst = v;
967
0
        }
968
0
    };
969
970
0
    auto get_float = [&](const char * key, float & dst, uint64_t user_config) {
971
0
        if (config & user_config) return;
972
973
0
        char buf[128] = {0};
974
0
        if (llama_model_meta_val_str(model, key, buf, sizeof(buf)) > 0) {
975
0
            char * end = nullptr;
976
0
            float v = strtof(buf, &end);
977
0
            if (end && end != buf) dst = v;
978
0
        }
979
0
    };
980
981
    // Sampling sequence
982
0
    if (!(config & common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_SAMPLERS)) {
983
0
        char buf[512] = {0};
984
0
        if (llama_model_meta_val_str(model, llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_SEQUENCE), buf, sizeof(buf)) > 0) {
985
0
            const std::vector<std::string> sampler_names = string_split<std::string>(std::string(buf), ';');
986
0
            if (!sampler_names.empty()) {
987
0
                sparams.samplers = common_sampler_types_from_names(sampler_names, true);
988
0
            }
989
0
        }
990
0
    }
991
992
0
    get_int32(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_TOP_K),           sparams.top_k,           common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TOP_K);
993
0
    get_float(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_TOP_P),           sparams.top_p,           common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TOP_P);
994
0
    get_float(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_MIN_P),           sparams.min_p,           common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIN_P);
995
0
    get_float(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_XTC_PROBABILITY), sparams.xtc_probability, common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_XTC_PROBABILITY);
996
0
    get_float(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_XTC_THRESHOLD),   sparams.xtc_threshold,   common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_XTC_THRESHOLD);
997
0
    get_float(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_TEMP),            sparams.temp,            common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_TEMP);
998
0
    get_int32(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_LAST_N),  sparams.penalty_last_n,  common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_PENALTY_LAST_N);
999
0
    get_float(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_REPEAT),  sparams.penalty_repeat,  common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_PENALTY_REPEAT);
1000
0
    get_int32(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT),        sparams.mirostat,        common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT);
1001
0
    get_float(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_TAU),    sparams.mirostat_tau,    common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT_TAU);
1002
0
    get_float(llama_model_meta_key_str(LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_ETA),    sparams.mirostat_eta,    common_params_sampling_config::COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT_ETA);
1003
0
}
1004
1005
0
struct common_init_result common_init_from_params(common_params & params) {
1006
0
    common_init_result iparams;
1007
0
    auto mparams = common_model_params_to_llama(params);
1008
1009
0
    llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
1010
0
    if (model == NULL) {
1011
0
        LOG_ERR("%s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
1012
0
            __func__, params.model.path.c_str());
1013
0
        return iparams;
1014
0
    }
1015
1016
0
    common_init_sampler_from_model(model, params.sampling);
1017
1018
0
    const llama_vocab * vocab = llama_model_get_vocab(model);
1019
1020
0
    auto cparams = common_context_params_to_llama(params);
1021
1022
0
    llama_context * lctx = llama_init_from_model(model, cparams);
1023
0
    if (lctx == NULL) {
1024
0
        LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
1025
0
            __func__, params.model.path.c_str());
1026
0
        llama_model_free(model);
1027
0
        return iparams;
1028
0
    }
1029
1030
0
    if (params.ctx_shift && !llama_memory_can_shift(llama_get_memory(lctx))) {
1031
0
        LOG_WRN("%s: KV cache shifting is not supported for this context, disabling KV cache shifting\n", __func__);
1032
0
        params.ctx_shift = false;
1033
0
    }
1034
1035
0
    if (!params.control_vectors.empty()) {
1036
0
        if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1;
1037
0
        if (params.control_vector_layer_end   <= 0) params.control_vector_layer_end   = llama_model_n_layer(model);
1038
1039
0
        const auto cvec = common_control_vector_load(params.control_vectors);
1040
0
        if (cvec.n_embd == -1) {
1041
0
            llama_free(lctx);
1042
0
            llama_model_free(model);
1043
1044
0
            return iparams;
1045
0
        }
1046
1047
0
        int err = llama_apply_adapter_cvec(
1048
0
                lctx,
1049
0
                cvec.data.data(),
1050
0
                cvec.data.size(),
1051
0
                cvec.n_embd,
1052
0
                params.control_vector_layer_start,
1053
0
                params.control_vector_layer_end);
1054
0
        if (err) {
1055
0
            llama_free(lctx);
1056
0
            llama_model_free(model);
1057
1058
0
            return iparams;
1059
0
        }
1060
0
    }
1061
1062
0
    if (llama_pooling_type(lctx) == LLAMA_POOLING_TYPE_RANK) {
1063
0
        bool ok = true;
1064
1065
0
        if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) {
1066
0
            LOG_WRN("%s: warning: vocab does not have a  BOS token, reranking will not work\n", __func__);
1067
0
            ok = false;
1068
0
        }
1069
1070
0
        bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL;
1071
0
        bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL;
1072
0
        bool has_rerank_prompt = llama_model_chat_template(model, "rerank") != NULL;
1073
1074
0
        if (!has_eos && !has_sep && !has_rerank_prompt) {
1075
0
            LOG_WRN("%s: warning: vocab does not have an EOS token, SEP token, or rerank prompt. Reranking will not work\n", __func__);
1076
0
            ok = false;
1077
0
        } else if (!has_eos) {
1078
0
            LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__);
1079
0
        }
1080
1081
0
        if (!ok) {
1082
0
            llama_free(lctx);
1083
0
            llama_model_free(model);
1084
1085
0
            return iparams;
1086
0
        }
1087
0
    }
1088
1089
    // load and optionally apply lora adapters
1090
0
    for (auto & la : params.lora_adapters) {
1091
0
        llama_adapter_lora_ptr lora;
1092
0
        lora.reset(llama_adapter_lora_init(model, la.path.c_str()));
1093
0
        if (lora == nullptr) {
1094
0
            LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str());
1095
0
            llama_free(lctx);
1096
0
            llama_model_free(model);
1097
0
            return iparams;
1098
0
        }
1099
1100
0
        char buf[1024];
1101
0
        la.ptr = lora.get();
1102
0
        llama_adapter_meta_val_str(la.ptr, "adapter.lora.task_name", buf, sizeof(buf));
1103
0
        la.task_name = buf;
1104
0
        llama_adapter_meta_val_str(la.ptr, "adapter.lora.prompt_prefix", buf, sizeof(buf));
1105
0
        la.prompt_prefix = buf;
1106
0
        iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters
1107
0
    }
1108
1109
0
    if (!params.lora_init_without_apply) {
1110
0
        common_set_adapter_lora(lctx, params.lora_adapters);
1111
0
    }
1112
1113
0
    if (params.sampling.ignore_eos && llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) {
1114
0
        LOG_WRN("%s: warning: vocab does not have an EOS token, ignoring --ignore-eos\n", __func__);
1115
0
        params.sampling.ignore_eos = false;
1116
0
    }
1117
1118
    // initialize once
1119
0
    for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) {
1120
0
        if (llama_vocab_is_eog(vocab, i)) {
1121
0
            LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY);
1122
0
            params.sampling.logit_bias_eog.push_back({i, -INFINITY});
1123
0
        }
1124
0
    }
1125
1126
0
    if (params.sampling.ignore_eos) {
1127
        // add EOG biases to the active set of logit biases
1128
0
        params.sampling.logit_bias.insert(
1129
0
                params.sampling.logit_bias.end(),
1130
0
                params.sampling.logit_bias_eog.begin(), params.sampling.logit_bias_eog.end());
1131
0
    }
1132
1133
0
    if (params.sampling.penalty_last_n == -1) {
1134
0
        LOG_INF("%s: setting penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx));
1135
0
        params.sampling.penalty_last_n = llama_n_ctx(lctx);
1136
0
    }
1137
1138
0
    if (params.sampling.dry_penalty_last_n == -1) {
1139
0
        LOG_INF("%s: setting dry_penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx));
1140
0
        params.sampling.dry_penalty_last_n = llama_n_ctx(lctx);
1141
0
    }
1142
1143
0
    if (params.warmup) {
1144
0
        LOG_WRN("%s: warming up the model with an empty run - please wait ... (--no-warmup to disable)\n", __func__);
1145
1146
0
        llama_set_warmup(lctx, true);
1147
1148
0
        std::vector<llama_token> tmp;
1149
0
        llama_token bos = llama_vocab_bos(vocab);
1150
0
        llama_token eos = llama_vocab_eos(vocab);
1151
1152
        // some models (e.g. T5) don't have a BOS token
1153
0
        if (bos != LLAMA_TOKEN_NULL) {
1154
0
            tmp.push_back(bos);
1155
0
        }
1156
0
        if (eos != LLAMA_TOKEN_NULL) {
1157
0
            tmp.push_back(eos);
1158
0
        }
1159
0
        if (tmp.empty()) {
1160
0
            tmp.push_back(0);
1161
0
        }
1162
1163
0
        if (llama_model_has_encoder(model)) {
1164
0
            llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
1165
0
            llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
1166
0
            if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
1167
0
                decoder_start_token_id = bos;
1168
0
            }
1169
0
            tmp.clear();
1170
0
            tmp.push_back(decoder_start_token_id);
1171
0
        }
1172
0
        if (llama_model_has_decoder(model)) {
1173
0
            llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
1174
0
        }
1175
0
        llama_memory_clear(llama_get_memory(lctx), true);
1176
0
        llama_synchronize(lctx);
1177
0
        llama_perf_context_reset(lctx);
1178
0
        llama_set_warmup(lctx, false);
1179
0
    }
1180
1181
0
    iparams.model.reset(model);
1182
0
    iparams.context.reset(lctx);
1183
1184
0
    return iparams;
1185
0
}
1186
1187
0
std::string get_model_endpoint() {
1188
0
    const char * model_endpoint_env = getenv("MODEL_ENDPOINT");
1189
    // We still respect the use of environment-variable "HF_ENDPOINT" for backward-compatibility.
1190
0
    const char * hf_endpoint_env = getenv("HF_ENDPOINT");
1191
0
    const char * endpoint_env = model_endpoint_env ? model_endpoint_env : hf_endpoint_env;
1192
0
    std::string model_endpoint = "https://huggingface.co/";
1193
0
    if (endpoint_env) {
1194
0
        model_endpoint = endpoint_env;
1195
0
        if (model_endpoint.back() != '/') model_endpoint += '/';
1196
0
    }
1197
0
    return model_endpoint;
1198
0
}
1199
1200
0
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) {
1201
0
    llama_clear_adapter_lora(ctx);
1202
0
    for (auto & la : lora) {
1203
0
        if (la.scale != 0.0f) {
1204
0
            llama_set_adapter_lora(ctx, la.ptr, la.scale);
1205
0
        }
1206
0
    }
1207
0
}
1208
1209
1.54k
struct llama_model_params common_model_params_to_llama(common_params & params) {
1210
1.54k
    auto mparams = llama_model_default_params();
1211
1212
1.54k
    if (!params.devices.empty()) {
1213
0
        mparams.devices = params.devices.data();
1214
0
    }
1215
1216
1.54k
    if (params.n_gpu_layers != -1) {
1217
0
        mparams.n_gpu_layers = params.n_gpu_layers;
1218
0
    }
1219
1220
1.54k
    mparams.main_gpu        = params.main_gpu;
1221
1.54k
    mparams.split_mode      = params.split_mode;
1222
1.54k
    mparams.tensor_split    = params.tensor_split;
1223
1.54k
    mparams.use_mmap        = params.use_mmap;
1224
1.54k
    mparams.use_mlock       = params.use_mlock;
1225
1.54k
    mparams.check_tensors   = params.check_tensors;
1226
1.54k
    mparams.use_extra_bufts = !params.no_extra_bufts;
1227
1.54k
    mparams.no_host         = params.no_host;
1228
1229
1.54k
    if (params.kv_overrides.empty()) {
1230
1.54k
        mparams.kv_overrides = NULL;
1231
1.54k
    } else {
1232
0
        GGML_ASSERT(params.kv_overrides.back().key[0] == 0 && "KV overrides not terminated with empty key");
1233
0
        mparams.kv_overrides = params.kv_overrides.data();
1234
0
    }
1235
1236
1.54k
    if (params.tensor_buft_overrides.empty()) {
1237
1.54k
        mparams.tensor_buft_overrides = NULL;
1238
1.54k
    } else {
1239
0
        GGML_ASSERT(params.tensor_buft_overrides.back().pattern == nullptr && "Tensor buffer overrides not terminated with empty pattern");
1240
0
        mparams.tensor_buft_overrides = params.tensor_buft_overrides.data();
1241
0
    }
1242
1243
1.54k
    mparams.progress_callback           = params.load_progress_callback;
1244
1.54k
    mparams.progress_callback_user_data = params.load_progress_callback_user_data;
1245
1246
1.54k
    return mparams;
1247
1.54k
}
1248
1249
0
struct llama_context_params common_context_params_to_llama(const common_params & params) {
1250
0
    auto cparams = llama_context_default_params();
1251
1252
0
    cparams.n_ctx             = params.n_ctx;
1253
0
    cparams.n_seq_max         = params.n_parallel;
1254
0
    cparams.n_batch           = params.n_batch;
1255
0
    cparams.n_ubatch          = params.n_ubatch;
1256
0
    cparams.n_threads         = params.cpuparams.n_threads;
1257
0
    cparams.n_threads_batch   = params.cpuparams_batch.n_threads == -1 ?
1258
0
                                params.cpuparams.n_threads : params.cpuparams_batch.n_threads;
1259
0
    cparams.embeddings        = params.embedding;
1260
0
    cparams.rope_scaling_type = params.rope_scaling_type;
1261
0
    cparams.rope_freq_base    = params.rope_freq_base;
1262
0
    cparams.rope_freq_scale   = params.rope_freq_scale;
1263
0
    cparams.yarn_ext_factor   = params.yarn_ext_factor;
1264
0
    cparams.yarn_attn_factor  = params.yarn_attn_factor;
1265
0
    cparams.yarn_beta_fast    = params.yarn_beta_fast;
1266
0
    cparams.yarn_beta_slow    = params.yarn_beta_slow;
1267
0
    cparams.yarn_orig_ctx     = params.yarn_orig_ctx;
1268
0
    cparams.pooling_type      = params.pooling_type;
1269
0
    cparams.attention_type    = params.attention_type;
1270
0
    cparams.flash_attn_type   = params.flash_attn_type;
1271
0
    cparams.cb_eval           = params.cb_eval;
1272
0
    cparams.cb_eval_user_data = params.cb_eval_user_data;
1273
0
    cparams.offload_kqv       = !params.no_kv_offload;
1274
0
    cparams.no_perf           = params.no_perf;
1275
0
    cparams.op_offload        = !params.no_op_offload;
1276
0
    cparams.swa_full          = params.swa_full;
1277
0
    cparams.kv_unified        = params.kv_unified;
1278
1279
0
    cparams.type_k = params.cache_type_k;
1280
0
    cparams.type_v = params.cache_type_v;
1281
1282
0
    return cparams;
1283
0
}
1284
1285
0
struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params) {
1286
0
    struct ggml_threadpool_params tpp;
1287
1288
0
    ggml_threadpool_params_init(&tpp, params.n_threads); // setup the defaults
1289
1290
0
    if (params.mask_valid) {
1291
0
        std::memcpy(&tpp.cpumask, &params.cpumask, GGML_MAX_N_THREADS);
1292
0
    }
1293
1294
0
    tpp.prio       = params.priority;
1295
0
    tpp.poll       = params.poll;
1296
0
    tpp.strict_cpu = params.strict_cpu;
1297
1298
0
    return tpp;
1299
0
}
1300
1301
//
1302
// Batch utils
1303
//
1304
1305
0
void common_batch_clear(struct llama_batch & batch) {
1306
0
    batch.n_tokens = 0;
1307
0
}
1308
1309
void common_batch_add(
1310
                 struct llama_batch & batch,
1311
                        llama_token   id,
1312
                          llama_pos   pos,
1313
    const std::vector<llama_seq_id> & seq_ids,
1314
0
                               bool   logits) {
1315
0
    GGML_ASSERT(batch.seq_id[batch.n_tokens] && "llama_batch size exceeded");
1316
1317
0
    batch.token   [batch.n_tokens] = id;
1318
0
    batch.pos     [batch.n_tokens] = pos;
1319
0
    batch.n_seq_id[batch.n_tokens] = seq_ids.size();
1320
0
    for (size_t i = 0; i < seq_ids.size(); ++i) {
1321
0
        batch.seq_id[batch.n_tokens][i] = seq_ids[i];
1322
0
    }
1323
0
    batch.logits  [batch.n_tokens] = logits;
1324
1325
0
    batch.n_tokens++;
1326
0
}
1327
1328
//
1329
// Token utils
1330
//
1331
1332
0
size_t common_lcp(const llama_tokens & a, const llama_tokens & b) {
1333
0
    size_t i;
1334
0
    for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
1335
1336
0
    return i;
1337
0
}
1338
1339
0
size_t common_lcs(const llama_tokens & a, const llama_tokens & b) {
1340
    // check for empty sequences
1341
0
    if (a.empty() || b.empty()) {
1342
0
        return 0;
1343
0
    }
1344
1345
    // get the lengths of the input sequences
1346
0
    size_t a_len = a.size();
1347
0
    size_t b_len = b.size();
1348
1349
    // initialize the maximum length of the longest common subsequence (LCS)
1350
0
    size_t max_length = 0;
1351
1352
    // use two rows instead of a 2D matrix to optimize space
1353
0
    std::vector<size_t> prev_row(b_len + 1, 0);
1354
0
    std::vector<size_t> curr_row(b_len + 1, 0);
1355
1356
    // iterate through the elements of a
1357
0
    for (size_t i = 1; i <= a_len; i++) {
1358
        // iterate through the elements of b
1359
0
        for (size_t j = 1; j <= b_len; j++) {
1360
            // if elements at the current positions match
1361
0
            if (a[i - 1] == b[j - 1]) {
1362
                // if it's the first element of either sequences, set LCS length to 1
1363
0
                if (i == 1 || j == 1) {
1364
0
                    curr_row[j] = 1;
1365
0
                } else {
1366
                    // increment LCS length by 1 compared to the previous element
1367
0
                    curr_row[j] = prev_row[j - 1] + 1;
1368
0
                }
1369
1370
                // update max_length if necessary
1371
0
                if (curr_row[j] > max_length) {
1372
0
                    max_length = curr_row[j];
1373
0
                }
1374
0
            } else {
1375
                // reset LCS length if elements don't match
1376
0
                curr_row[j] = 0;
1377
0
            }
1378
0
        }
1379
1380
        // update the previous row for the next iteration
1381
0
        prev_row = curr_row;
1382
0
    }
1383
1384
    // return the maximum length of the LCS
1385
0
    return max_length;
1386
0
}
1387
1388
//
1389
// Vocab utils
1390
//
1391
1392
std::vector<llama_token> common_tokenize(
1393
  const struct llama_context * ctx,
1394
           const std::string & text,
1395
                        bool   add_special,
1396
0
                        bool   parse_special) {
1397
0
    const llama_model * model = llama_get_model(ctx);
1398
0
    const llama_vocab * vocab = llama_model_get_vocab(model);
1399
0
    return common_tokenize(vocab, text, add_special, parse_special);
1400
0
}
1401
1402
std::vector<llama_token> common_tokenize(
1403
    const struct llama_vocab * vocab,
1404
           const std::string & text,
1405
                        bool   add_special,
1406
0
                        bool   parse_special) {
1407
    // upper limit for the number of tokens
1408
0
    int n_tokens = text.length() + 2 * add_special;
1409
0
    std::vector<llama_token> result(n_tokens);
1410
0
    n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
1411
0
    if (n_tokens == std::numeric_limits<int32_t>::min()) {
1412
0
        throw std::runtime_error("Tokenization failed: input text too large, tokenization result exceeds int32_t limit");
1413
0
    }
1414
0
    if (n_tokens < 0) {
1415
0
        result.resize(-n_tokens);
1416
0
        int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
1417
0
        GGML_ASSERT(check == -n_tokens);
1418
0
    } else {
1419
0
        result.resize(n_tokens);
1420
0
    }
1421
0
    return result;
1422
0
}
1423
1424
0
std::string common_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
1425
0
    const llama_model * model = llama_get_model(ctx);
1426
0
    const llama_vocab * vocab = llama_model_get_vocab(model);
1427
0
    return common_token_to_piece(vocab, token, special);
1428
0
}
1429
1430
0
std::string common_token_to_piece(const struct llama_vocab * vocab, llama_token token, bool special) {
1431
0
    std::string piece;
1432
0
    piece.resize(piece.capacity());  // using string internal cache, 15 bytes + '\n'
1433
0
    const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
1434
0
    if (n_chars < 0) {
1435
0
        piece.resize(-n_chars);
1436
0
        int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
1437
0
        GGML_ASSERT(check == -n_chars);
1438
0
    }
1439
0
    else {
1440
0
        piece.resize(n_chars);
1441
0
    }
1442
1443
0
    return piece;
1444
0
}
1445
1446
0
std::string common_detokenize(const struct llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
1447
0
    const llama_model * model = llama_get_model(ctx);
1448
0
    const llama_vocab * vocab = llama_model_get_vocab(model);
1449
0
    return common_detokenize(vocab, tokens, special);
1450
0
}
1451
1452
0
std::string common_detokenize(const struct llama_vocab * vocab, const std::vector<llama_token> & tokens, bool special) {
1453
0
    std::string text;
1454
0
    text.resize(std::max(text.capacity(), tokens.size()));
1455
0
    int32_t n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
1456
0
    if (n_chars < 0) {
1457
0
        text.resize(-n_chars);
1458
0
        n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
1459
0
        GGML_ASSERT(n_chars <= (int32_t)text.size());  // whitespace trimming is performed after per-token detokenization
1460
0
    }
1461
1462
0
    text.resize(n_chars);
1463
1464
    // NOTE: the original tokenizer decodes bytes after collecting the pieces.
1465
0
    return text;
1466
0
}
1467
1468
//
1469
// Embedding utils
1470
//
1471
1472
0
void common_embd_normalize(const float * inp, float * out, int n, int embd_norm) {
1473
0
    double sum = 0.0;
1474
1475
0
    switch (embd_norm) {
1476
0
        case -1: // no normalisation
1477
0
            sum = 1.0;
1478
0
            break;
1479
0
        case 0: // max absolute
1480
0
            for (int i = 0; i < n; i++) {
1481
0
                if (sum < std::abs(inp[i])) {
1482
0
                    sum = std::abs(inp[i]);
1483
0
                }
1484
0
            }
1485
0
            sum /= 32760.0; // make an int16 range
1486
0
            break;
1487
0
        case 2: // euclidean
1488
0
            for (int i = 0; i < n; i++) {
1489
0
                sum += inp[i] * inp[i];
1490
0
            }
1491
0
            sum = std::sqrt(sum);
1492
0
            break;
1493
0
        default: // p-norm (euclidean is p-norm p=2)
1494
0
            for (int i = 0; i < n; i++) {
1495
0
                sum += std::pow(std::abs(inp[i]), embd_norm);
1496
0
            }
1497
0
            sum = std::pow(sum, 1.0 / embd_norm);
1498
0
            break;
1499
0
    }
1500
1501
0
    const float norm = sum > 0.0 ? 1.0 / sum : 0.0f;
1502
1503
0
    for (int i = 0; i < n; i++) {
1504
0
        out[i] = inp[i] * norm;
1505
0
    }
1506
0
}
1507
1508
0
float common_embd_similarity_cos(const float * embd1, const float * embd2, int n){
1509
0
    double sum  = 0.0;
1510
0
    double sum1 = 0.0;
1511
0
    double sum2 = 0.0;
1512
1513
0
    for (int i = 0; i < n; i++) {
1514
0
        sum  += embd1[i] * embd2[i];
1515
0
        sum1 += embd1[i] * embd1[i];
1516
0
        sum2 += embd2[i] * embd2[i];
1517
0
    }
1518
1519
    // Handle the case where one or both vectors are zero vectors
1520
0
    if (sum1 == 0.0 || sum2 == 0.0) {
1521
0
        if (sum1 == 0.0 && sum2 == 0.0) {
1522
0
            return 1.0f; // two zero vectors are similar
1523
0
        }
1524
0
        return 0.0f;
1525
0
    }
1526
1527
0
    return sum / (sqrt(sum1) * sqrt(sum2));
1528
0
}
1529
1530
//
1531
// Control vector utils
1532
//
1533
1534
0
static common_control_vector_data common_control_vector_load_one(const common_control_vector_load_info & load_info) {
1535
0
    common_control_vector_data result = { -1, {} };
1536
1537
0
    ggml_context * ctx = nullptr;
1538
0
    struct gguf_init_params meta_gguf_params = {
1539
0
        /* .no_alloc = */ false,
1540
0
        /* .ctx      = */ &ctx,
1541
0
    };
1542
0
    struct gguf_context * ctx_gguf = gguf_init_from_file(load_info.fname.c_str(), meta_gguf_params);
1543
0
    if (!ctx_gguf) {
1544
0
        LOG_ERR("%s: failed to load control vector file from %s\n", __func__, load_info.fname.c_str());
1545
0
        return result;
1546
0
    }
1547
1548
0
    int32_t n_tensors = gguf_get_n_tensors(ctx_gguf);
1549
0
    if (n_tensors == 0) {
1550
0
        LOG_WRN("%s: no direction tensors found in %s\n", __func__, load_info.fname.c_str());
1551
0
    }
1552
1553
0
    for (int i = 0; i < n_tensors; i++) {
1554
0
        std::string name = gguf_get_tensor_name(ctx_gguf, i);
1555
1556
0
        int layer_idx = -1;
1557
1558
        // split on '.'
1559
0
        size_t dotpos = name.find('.');
1560
0
        if (dotpos != std::string::npos && name.substr(0, dotpos) == "direction") {
1561
0
            try {
1562
0
                layer_idx = std::stoi(name.substr(dotpos + 1));
1563
0
            } catch (...) {
1564
0
                layer_idx = -1;
1565
0
            }
1566
0
        }
1567
0
        if (layer_idx < 0) {
1568
0
            LOG_ERR("%s: invalid/unparsable direction tensor layer index in %s\n", __func__, load_info.fname.c_str());
1569
0
            result.n_embd = -1;
1570
0
            break;
1571
0
        } else if (layer_idx == 0) {
1572
0
            LOG_ERR("%s: invalid (zero) direction tensor layer index in %s\n", __func__, load_info.fname.c_str());
1573
0
            result.n_embd = -1;
1574
0
            break;
1575
0
        }
1576
1577
0
        struct ggml_tensor * tensor = ggml_get_tensor(ctx, name.c_str());
1578
0
        if (tensor->type != GGML_TYPE_F32) {
1579
0
            LOG_ERR("%s: invalid (non-F32) direction tensor type in %s\n", __func__, load_info.fname.c_str());
1580
0
            result.n_embd = -1;
1581
0
            break;
1582
0
        }
1583
0
        if (ggml_n_dims(tensor) != 1) {
1584
0
            LOG_ERR("%s: invalid (non-1D) direction tensor shape in %s\n", __func__, load_info.fname.c_str());
1585
0
            result.n_embd = -1;
1586
0
            break;
1587
0
        }
1588
1589
0
        if (result.n_embd == -1) {
1590
0
            result.n_embd = ggml_nelements(tensor);
1591
0
        } else if (ggml_nelements(tensor) != result.n_embd) {
1592
0
            LOG_ERR("%s: direction tensor in %s does not match previous dimensions\n", __func__, load_info.fname.c_str());
1593
0
            result.n_embd = -1;
1594
0
            break;
1595
0
        }
1596
1597
        // extend if necessary - do not store data for layer 0 (it's not used)
1598
0
        result.data.resize(std::max(result.data.size(), static_cast<size_t>(result.n_embd * layer_idx)), 0.0f);
1599
1600
0
        const float * src = (const float *) tensor->data;
1601
0
        float * dst = result.data.data() + result.n_embd * (layer_idx - 1);  // layer 1 at [0]
1602
0
        for (int j = 0; j < result.n_embd; j++) {
1603
0
            dst[j] += src[j] * load_info.strength;  // allows multiple directions for same layer in same file
1604
0
        }
1605
1606
0
    }
1607
1608
0
    if (result.n_embd == -1) {
1609
0
        LOG_WRN("%s: skipping %s due to invalid direction tensors\n", __func__, load_info.fname.c_str());
1610
0
        result.data.clear();
1611
0
    }
1612
1613
0
    gguf_free(ctx_gguf);
1614
0
    ggml_free(ctx);
1615
1616
0
    return result;
1617
0
}
1618
1619
0
common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos) {
1620
0
    common_control_vector_data result = { -1, {} };
1621
1622
0
    for (const auto & info : load_infos) {
1623
0
        auto cur = common_control_vector_load_one(info);
1624
1625
0
        if (cur.n_embd == -1) {
1626
0
            result.n_embd = -1;
1627
0
            break;
1628
0
        }
1629
0
        if (result.n_embd != -1 && result.n_embd != cur.n_embd) {
1630
0
            LOG_ERR("%s: control vectors in %s does not match previous dimensions\n", __func__, info.fname.c_str());
1631
0
            result.n_embd = -1;
1632
0
            break;
1633
0
        }
1634
1635
0
        if (result.n_embd == -1) {
1636
0
            result = std::move(cur);
1637
0
        } else {
1638
0
            result.data.resize(std::max(result.data.size(), cur.data.size()), 0.0f);  // extend if necessary
1639
0
            for (size_t i = 0; i < cur.data.size(); i++) {
1640
0
                result.data[i] += cur.data[i];
1641
0
            }
1642
0
        }
1643
0
    }
1644
1645
0
    if (result.n_embd == -1) {
1646
0
        LOG_ERR("%s: no valid control vector files passed\n", __func__);
1647
0
        result.data.clear();
1648
0
    }
1649
1650
0
    return result;
1651
0
}
1652
1653
0
ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride) {
1654
0
    const int64_t ne_datapoint = llama_n_ctx(ctx);
1655
0
    const int64_t ndata        = (tokens.size() - ne_datapoint - 1) / stride;
1656
0
    ggml_opt_dataset_t result = ggml_opt_dataset_init(
1657
0
        GGML_TYPE_I32, GGML_TYPE_I32, ne_datapoint, ne_datapoint, ndata, /*ndata_shard =*/ 1);
1658
1659
0
    llama_token * data   = (llama_token *) ggml_opt_dataset_data(result)->data;
1660
0
    llama_token * labels = (llama_token *) ggml_opt_dataset_labels(result)->data;
1661
1662
0
    for (int64_t idata = 0; idata < ndata; ++idata) {
1663
0
        memcpy(data   + idata*ne_datapoint, tokens.data() + idata*stride + 0, ne_datapoint*sizeof(llama_token));
1664
0
        memcpy(labels + idata*ne_datapoint, tokens.data() + idata*stride + 1, ne_datapoint*sizeof(llama_token));
1665
0
    }
1666
1667
0
    return result;
1668
0
}
1669
1670
0
ggml_opt_optimizer_params common_opt_lr_pars(void * userdata) {
1671
0
    ggml_opt_optimizer_params result = ggml_opt_get_default_optimizer_params(nullptr);
1672
0
    const lr_opt &            d      = *(lr_opt *) userdata;
1673
0
    result.adamw.alpha = result.sgd.alpha = d.get_lr(d.epoch);
1674
0
    result.sgd.wd = result.adamw.wd = d.wd;
1675
0
    return result;
1676
0
}
1677
1678
// TODO make all command line args case-insensitive
1679
0
static inline bool eq_case_insensitive(char const* a, char const* b) {
1680
0
    return !
1681
#if defined(_MSC_VER)
1682
        _stricmp
1683
#else
1684
0
        strcasecmp
1685
0
#endif // defined(_MSC_VER)
1686
0
        (a, b);
1687
0
}
1688
1689
0
enum ggml_opt_optimizer_type common_opt_get_optimizer(const char * n) {
1690
0
    if (eq_case_insensitive("adamw", n)) {
1691
0
        return GGML_OPT_OPTIMIZER_TYPE_ADAMW;
1692
0
    }
1693
0
    if (eq_case_insensitive("sgd", n)) {
1694
0
        return GGML_OPT_OPTIMIZER_TYPE_SGD;
1695
0
    }
1696
0
    return GGML_OPT_OPTIMIZER_TYPE_COUNT;
1697
0
}
1698
1699
// TODO simplify to use just log and exp
1700
static float const k_log_2 = std::log(2.f);
1701
1702
0
void lr_opt::init() {
1703
0
    if (lr_min > 0 && lr_min < lr0) {
1704
0
        float nhalf = std::log(lr0 / lr_min) / k_log_2;
1705
0
        float e     = epochs;
1706
0
        if (decay_epochs > 0 && decay_epochs < e) {
1707
0
            e = decay_epochs;
1708
0
        } else {
1709
0
            decay_epochs = e;
1710
0
        }
1711
0
        scale_epoch = nhalf / e;
1712
0
    }
1713
0
}
1714
1715
0
float lr_opt::get_lr(float epoch) const {
1716
0
    float r = lr_min <= 0 ? lr0 :
1717
0
        epoch >= decay_epochs ? lr_min :
1718
0
        lr0 * std::pow(0.5f, epoch * scale_epoch);
1719
0
    LOG_INF("epoch %.2g lr=%.2g\n", epoch, r);
1720
0
    return r;
1721
0
}