Coverage Report

Created: 2026-01-09 06:16

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-sampling.cpp
Line
Count
Source
1
#include "llama-sampling.h"
2
3
#include "llama-impl.h"
4
#include "llama-vocab.h"
5
#include "llama-grammar.h"
6
7
#include "ggml-cpp.h"
8
9
#include <array>
10
#include <algorithm>
11
#include <cassert>
12
#include <cfloat>
13
#include <chrono>
14
#include <cmath>
15
#include <cstdlib>
16
#include <cstring>
17
#include <ctime>
18
#include <numeric>
19
#include <random>
20
#include <unordered_map>
21
#include <stdexcept>
22
23
// the ring buffer works similarly to std::deque, but with a fixed capacity
24
template<typename T>
25
struct ring_buffer {
26
0
    ring_buffer(size_t cap) : capacity(cap), data(cap) {}
27
28
0
    T & front() {
29
0
        if (sz == 0) {
30
0
            throw std::runtime_error("ring buffer is empty");
31
0
        }
32
0
        return data[first];
33
0
    }
34
35
    const T & front() const {
36
        if (sz == 0) {
37
            throw std::runtime_error("ring buffer is empty");
38
        }
39
        return data[first];
40
    }
41
42
    T & back() {
43
        if (sz == 0) {
44
            throw std::runtime_error("ring buffer is empty");
45
        }
46
        return data[pos];
47
    }
48
49
    const T & back() const {
50
        if (sz == 0) {
51
            throw std::runtime_error("ring buffer is empty");
52
        }
53
        return data[pos];
54
    }
55
56
0
    void push_back(const T & value) {
57
0
        if (capacity == 0) {
58
0
            throw std::runtime_error("ring buffer: capacity is zero");
59
0
        }
60
61
0
        if (sz == capacity) {
62
            // advance the start when buffer is full
63
0
            first = (first + 1) % capacity;
64
0
        } else {
65
0
            sz++;
66
0
        }
67
0
        data[pos] = value;
68
0
        pos = (pos + 1) % capacity;
69
0
    }
70
71
    T pop_front() {
72
        if (sz == 0) {
73
            throw std::runtime_error("ring buffer is empty");
74
        }
75
        T value = data[first];
76
        first = (first + 1) % capacity;
77
        sz--;
78
        return value;
79
    }
80
81
    //T & operator[](size_t i) {
82
    //    if (i >= sz) {
83
    //        throw std::runtime_error("ring buffer: index out of bounds");
84
    //    }
85
    //    return data[(first + i) % capacity];
86
    //}
87
88
    //const T & at(size_t i) const {
89
    //    if (i >= sz) {
90
    //        throw std::runtime_error("ring buffer: index out of bounds");
91
    //    }
92
    //    return data[(first + i) % capacity];
93
    //}
94
95
0
    const T & rat(size_t i) const {
96
0
        if (i >= sz) {
97
0
            throw std::runtime_error("ring buffer: index out of bounds");
98
0
        }
99
0
        return data[(first + sz - i - 1) % capacity];
100
0
    }
101
102
    std::vector<T> to_vector() const {
103
        std::vector<T> result;
104
        result.reserve(sz);
105
        for (size_t i = 0; i < sz; i++) {
106
            result.push_back(data[(first + i) % capacity]);
107
        }
108
        return result;
109
    }
110
111
0
    void clear() {
112
        // here only reset the status of the buffer
113
0
        sz = 0;
114
0
        first = 0;
115
0
        pos = 0;
116
0
    }
117
118
    bool empty() const {
119
        return sz == 0;
120
    }
121
122
0
    size_t size() const {
123
0
        return sz;
124
0
    }
125
126
    size_t capacity = 0;
127
    size_t sz = 0;
128
    size_t first = 0;
129
    size_t pos = 0;
130
131
    std::vector<T> data;
132
};
133
134
// writes result in res, does not mutate cur
135
0
static void llama_token_data_array_partial_sort(const llama_token_data_array & cur, int npartial, std::vector<llama_token_data> & res) {
136
0
    static const auto comp = [](const llama_token_data & a, const llama_token_data & b) {
137
0
        return a.logit > b.logit;
138
0
    };
139
140
0
    constexpr int   nbuckets     = 128;
141
0
    constexpr float bucket_low   = -10.0f;
142
0
    constexpr float bucket_high  =  10.0f;
143
0
    constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low);
144
0
    constexpr float bucket_inter = -bucket_low * bucket_scale;
145
146
0
    std::vector<int> bucket_idx;
147
0
    std::vector<int> histo(nbuckets, 0);
148
149
0
    std::vector<llama_token_data*> bucket_ptrs;
150
151
0
    bucket_idx.reserve(cur.size);
152
153
0
    for (int i = 0; i < (int)cur.size; ++i) {
154
0
        const float val = cur.data[i].logit;
155
0
        int ib = int(bucket_scale * val + bucket_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low);
156
0
        ib = std::max(0, std::min(nbuckets - 1, ib));
157
0
        bucket_idx.push_back(ib);
158
0
        ++histo[ib];
159
0
    }
160
0
    int nhave = 0;
161
0
    int ib = nbuckets - 1;
162
0
    for ( ; ib >= 0; --ib) {
163
0
        nhave += histo[ib];
164
0
        if (nhave >= npartial) {
165
0
            break;
166
0
        }
167
0
    }
168
0
    res.resize(nhave);
169
0
    auto * ptr = res.data();
170
0
    bucket_ptrs.reserve(nbuckets - ib);
171
0
    for (int j = nbuckets - 1; j >= ib; --j) {
172
0
        bucket_ptrs.push_back(ptr);
173
0
        ptr += histo[j];
174
0
    }
175
0
    for (int i = 0; i < (int)cur.size; ++i) {
176
0
        int j = bucket_idx[i];
177
0
        if (j >= ib) {
178
0
            *bucket_ptrs[nbuckets - 1 - j]++ = cur.data[i];
179
0
        }
180
0
    }
181
182
0
    ptr = res.data();
183
0
    int ndone = 0;
184
0
    for (int j = nbuckets - 1; j > ib; --j) {
185
0
        std::sort(ptr, ptr + histo[j], comp);
186
0
        ptr += histo[j];
187
0
        ndone += histo[j];
188
0
    }
189
0
    std::partial_sort(ptr, ptr + npartial - ndone, ptr + histo[ib], comp);
190
0
}
191
192
// reduces the size of cur_p to npartial, keeping only the top npartial elements
193
0
static void llama_token_data_array_partial_sort_inplace(llama_token_data_array * cur_p, int npartial) {
194
0
    static const auto comp = [](const llama_token_data & a, const llama_token_data & b) {
195
0
        return a.logit > b.logit;
196
0
    };
197
198
0
    if (npartial <= 128) {
199
0
        std::partial_sort(cur_p->data, cur_p->data + npartial, cur_p->data + cur_p->size, comp);
200
201
0
        cur_p->size = npartial;
202
0
        cur_p->sorted = true;
203
204
0
        return;
205
0
    }
206
207
0
    std::vector<llama_token_data> tmp;
208
209
0
    llama_token_data_array_partial_sort(*cur_p, npartial, tmp);
210
211
0
    std::copy(tmp.data(), tmp.data() + npartial, cur_p->data);
212
213
0
    cur_p->size = npartial;
214
0
    cur_p->sorted = true;
215
0
}
216
217
0
static int llama_sample_dist(llama_token_data_array * cur_p, std::mt19937 & rng) {
218
    // iterator for the probabilities
219
0
#ifdef __GNUC__
220
0
    #pragma GCC diagnostic push
221
0
    #pragma GCC diagnostic ignored "-Wunused-local-typedefs"
222
0
#endif
223
224
0
    struct probs_iterator {
225
0
        typedef std::input_iterator_tag iterator_category;
226
0
        typedef float value_type;
227
0
        typedef float * pointer;
228
0
        typedef float & reference;
229
0
        typedef ptrdiff_t difference_type;
230
231
0
        const llama_token_data * data;
232
233
0
        bool operator==(const probs_iterator & other) const { return data == other.data; }
234
0
        bool operator!=(const probs_iterator & other) const { return data != other.data; }
235
0
        const float & operator*() const { return data->p; }
236
0
        probs_iterator & operator++() { ++data; return *this; }
237
0
        probs_iterator operator++(int) { probs_iterator tmp = *this; ++data; return tmp; }
238
0
    };
239
240
0
#ifdef __GNUC__
241
0
    #pragma GCC diagnostic pop
242
0
#endif
243
244
0
    std::discrete_distribution<int> dist(probs_iterator{cur_p->data}, probs_iterator{cur_p->data + cur_p->size});
245
246
0
    return dist(rng);
247
0
}
248
249
/*
250
static void llama_log_softmax(float * array, size_t size) {
251
    float max_l = *std::max_element(array, array + size);
252
    float sum = 0.f;
253
    for (size_t i = 0; i < size; ++i) {
254
        float p = expf(array[i] - max_l);
255
        sum += p;
256
        array[i] = p;
257
    }
258
259
    for (size_t i = 0; i < size; ++i) {
260
        array[i] = logf(array[i] / sum);
261
    }
262
}
263
*/
264
265
0
static void llama_sampler_temp_impl(llama_token_data_array * cur_p, float temp) {
266
0
    if (temp <= 0.0f) {
267
        // find the token with the highest logit and set the rest to -inf
268
0
        size_t max_i = 0;
269
0
        float  max_l = cur_p->data[0].logit;
270
271
0
        for (size_t i = 1; i < cur_p->size; ++i) {
272
0
            if (cur_p->data[i    ].logit > max_l) {
273
0
                cur_p->data[max_i].logit = -INFINITY;
274
0
                max_i = i;
275
0
                max_l = cur_p->data[i].logit;
276
0
            } else {
277
0
                cur_p->data[i].logit = -INFINITY;
278
0
            }
279
0
        }
280
281
0
        return;
282
0
    }
283
284
0
    for (size_t i = 0; i < cur_p->size; ++i) {
285
0
        cur_p->data[i].logit /= temp;
286
0
    }
287
0
}
288
289
0
static void llama_sampler_softmax_impl(llama_token_data_array * cur_p, bool do_sort) {
290
0
    GGML_ASSERT(cur_p->size > 0);
291
292
    // Sort the logits in descending order if requested
293
0
    if (do_sort && !cur_p->sorted) {
294
0
        llama_token_data_array_partial_sort_inplace(cur_p, cur_p->size);
295
0
    }
296
297
0
    float max_l = cur_p->data[0].logit;
298
0
    if (!cur_p->sorted) {
299
0
        for (size_t i = 1; i < cur_p->size; ++i) {
300
0
            max_l = std::max(max_l, cur_p->data[i].logit);
301
0
        }
302
0
    }
303
304
0
    float cum_sum = 0.0f;
305
306
0
    for (size_t i = 0; i < cur_p->size; ++i) {
307
0
        float p = expf(cur_p->data[i].logit - max_l);
308
0
        cur_p->data[i].p = p;
309
0
        cum_sum += p;
310
0
    }
311
312
0
    for (size_t i = 0; i < cur_p->size; ++i) {
313
0
        cur_p->data[i].p /= cum_sum;
314
0
    }
315
0
}
316
317
0
static void llama_sampler_top_k_impl(llama_token_data_array * cur_p, int32_t k) {
318
    // if (k >= (int32_t)cur_p->size) {
319
    //     return;
320
    // }
321
322
0
    if (k <= 0) {
323
0
        return;
324
0
    }
325
326
0
    k = std::min(k, (int) cur_p->size);
327
328
    // Sort scores in descending order
329
0
    if (!cur_p->sorted) {
330
0
        llama_token_data_array_partial_sort_inplace(cur_p, k);
331
0
    }
332
333
0
    cur_p->size = k;
334
0
}
335
336
0
static uint32_t get_rng_seed(uint32_t seed) {
337
0
    if (seed == LLAMA_DEFAULT_SEED) {
338
        // use system clock if std::random_device is not a true RNG
339
0
        static bool is_rd_prng = std::random_device().entropy() == 0;
340
0
        if (is_rd_prng) {
341
0
            return (uint32_t) std::chrono::system_clock::now().time_since_epoch().count();
342
0
        }
343
0
        std::random_device rd;
344
0
        return rd();
345
0
    }
346
0
    return seed;
347
0
}
348
349
// llama_sampler API
350
351
struct llama_sampler * llama_sampler_init(
352
        struct llama_sampler_i * iface,
353
0
        llama_sampler_context_t ctx) {
354
0
    return new llama_sampler {
355
0
        /* .iface = */ iface,
356
0
        /* .ctx   = */ ctx,
357
0
    };
358
0
}
359
360
0
const char * llama_sampler_name(const struct llama_sampler * smpl) {
361
0
    if (!smpl->iface) {
362
0
        return "(null)";
363
0
    }
364
365
0
    return smpl->iface->name(smpl);
366
0
}
367
368
0
void llama_sampler_accept(struct llama_sampler * smpl, llama_token token) {
369
0
    if (!smpl) {
370
0
        return;
371
0
    }
372
373
0
    if (smpl->iface->accept) {
374
0
        smpl->iface->accept(smpl, token);
375
0
    }
376
0
}
377
378
0
void llama_sampler_apply(struct llama_sampler * smpl, struct llama_token_data_array * cur_p) {
379
0
    if (!smpl) {
380
0
        return;
381
0
    }
382
383
0
    GGML_ASSERT(smpl->iface->apply);
384
0
    smpl->iface->apply(smpl, cur_p);
385
0
}
386
387
0
void llama_sampler_reset(struct llama_sampler * smpl) {
388
0
    if (!smpl) {
389
0
        return;
390
0
    }
391
392
0
    if (smpl->iface->reset) {
393
0
        smpl->iface->reset(smpl);
394
0
    }
395
0
}
396
397
0
struct llama_sampler * llama_sampler_clone(const struct llama_sampler * smpl) {
398
0
    if (!smpl) {
399
0
        return nullptr;
400
0
    }
401
402
0
    if (smpl->iface->clone) {
403
0
        return smpl->iface->clone(smpl);
404
0
    }
405
406
0
    if (smpl->ctx == nullptr) {
407
0
        return llama_sampler_init(
408
0
            /* .iface = */ smpl->iface,
409
0
            /* .ctx   = */ nullptr
410
0
        );
411
0
    }
412
413
0
    GGML_ABORT("the sampler does not support cloning");
414
0
}
415
416
0
void llama_sampler_free(struct llama_sampler * smpl) {
417
0
    if (smpl == nullptr) {
418
0
        return;
419
0
    }
420
421
0
    if (smpl->iface->free) {
422
0
        smpl->iface->free(smpl);
423
0
    }
424
425
0
    delete smpl;
426
0
}
427
428
// empty sampler
429
430
struct llama_sampler_empty {
431
    const char * name;
432
};
433
434
static struct llama_sampler * llama_sampler_init_empty(const char * name);
435
436
0
static const char * llama_sampler_empty_name(const struct llama_sampler * smpl) {
437
0
    auto * ctx = (llama_sampler_empty *) smpl->ctx;
438
0
    return ctx->name;
439
0
}
440
441
0
static void llama_sampler_empty_accept(struct llama_sampler * smpl, llama_token token) {
442
0
    GGML_UNUSED(smpl);
443
0
    GGML_UNUSED(token);
444
0
}
445
446
0
static void llama_sampler_empty_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
447
0
    GGML_UNUSED(smpl);
448
0
    GGML_UNUSED(cur_p);
449
0
}
450
451
0
static void llama_sampler_empty_reset(struct llama_sampler * smpl) {
452
0
    GGML_UNUSED(smpl);
453
0
}
454
455
0
static struct llama_sampler * llama_sampler_empty_clone(const struct llama_sampler * smpl) {
456
0
    auto * ctx = (llama_sampler_empty *) smpl->ctx;
457
0
    return llama_sampler_init_empty(ctx->name);
458
0
}
459
460
0
static void llama_sampler_empty_free(struct llama_sampler * smpl) {
461
0
    delete (llama_sampler_empty *) smpl->ctx;
462
0
}
463
464
static bool llama_sampler_empty_backend_init(
465
        struct llama_sampler       * smpl,
466
0
        ggml_backend_buffer_type_t   buft) {
467
0
    GGML_UNUSED(smpl);
468
0
    GGML_UNUSED(buft);
469
470
0
    return true;
471
0
}
472
473
static void llama_sampler_empty_backend_accept(
474
        struct llama_sampler * smpl,
475
        ggml_context * ctx,
476
        ggml_cgraph * gf,
477
0
        struct ggml_tensor * selected_token) {
478
0
    GGML_UNUSED(smpl);
479
0
    GGML_UNUSED(ctx);
480
0
    GGML_UNUSED(gf);
481
0
    GGML_UNUSED(selected_token);
482
0
}
483
484
static void llama_sampler_empty_backend_apply(
485
          struct llama_sampler      * smpl,
486
          struct ggml_context       * ctx,
487
          struct ggml_cgraph        * gf,
488
0
          struct llama_sampler_data * data) {
489
0
    GGML_UNUSED(smpl);
490
0
    GGML_UNUSED(ctx);
491
0
    GGML_UNUSED(gf);
492
0
    GGML_UNUSED(data);
493
0
}
494
495
0
static void llama_sampler_empty_backend_set_input(struct llama_sampler * smpl) {
496
0
    GGML_UNUSED(smpl);
497
0
}
498
499
static struct llama_sampler_i llama_sampler_empty_i = {
500
    /* .name              = */ llama_sampler_empty_name,
501
    /* .accept            = */ llama_sampler_empty_accept,
502
    /* .apply             = */ llama_sampler_empty_apply,
503
    /* .reset             = */ llama_sampler_empty_reset,
504
    /* .clone             = */ llama_sampler_empty_clone,
505
    /* .free              = */ llama_sampler_empty_free,
506
    /* .backend_init      = */ llama_sampler_empty_backend_init,
507
    /* .backend_accept    = */ llama_sampler_empty_backend_accept,
508
    /* .backend_apply     = */ llama_sampler_empty_backend_apply,
509
    /* .backend_set_input = */ llama_sampler_empty_backend_set_input,
510
};
511
512
0
struct llama_sampler * llama_sampler_init_empty(const char * name) {
513
0
    return llama_sampler_init(
514
0
        /* .iface = */ &llama_sampler_empty_i,
515
0
        /* .ctx   = */ new llama_sampler_empty {
516
0
            /* .name = */ name,
517
0
        }
518
0
    );
519
0
}
520
521
// common backend sampler functionality
522
//
523
// +name : means that the sampler is support and will run on the backend
524
// -name : means that a ggml operator is not supported by the backend
525
//
526
struct llama_sampler_backend {
527
0
    llama_sampler_backend(const char * name) : name(name), name_ext(name), is_init(false), support(false) {}
528
529
0
    const char * get_name() {
530
0
        if (!is_init) {
531
0
            return name.c_str();
532
0
        }
533
534
0
        if (support) {
535
0
            name_ext = "+" + name;
536
0
        } else {
537
0
            name_ext = "-" + name;
538
0
        }
539
540
0
        return name_ext.c_str();
541
0
    }
542
543
0
    void init(bool support) {
544
0
        GGML_ASSERT(this->is_init == false);
545
546
0
        this->is_init = true;
547
0
        this->support = support;
548
0
    }
549
550
private:
551
    std::string name;
552
    std::string name_ext;
553
554
    bool is_init;
555
    bool support;
556
};
557
558
// check if all ggml ops used by the sampler are supported by the backend
559
static bool llama_sampler_backend_support(
560
        llama_sampler              * smpl,
561
0
        ggml_backend_buffer_type_t   buft) {
562
0
    auto * device = ggml_backend_buft_get_device(buft);
563
0
    if (!device) {
564
        // CPU backend always supported
565
0
        return true;
566
0
    }
567
568
0
    ggml_init_params params = {
569
0
        /*.mem_size   =*/ 128*ggml_tensor_overhead() + ggml_graph_overhead(),
570
0
        /*.mem_buffer =*/ NULL,
571
0
        /*.no_alloc   =*/ true,
572
0
    };
573
574
0
    ggml_context_ptr ctx_ptr { ggml_init(params) };
575
0
    if (!ctx_ptr) {
576
0
        throw std::runtime_error(format("failed to create ggml context"));
577
0
    }
578
579
0
    ggml_context * ctx = ctx_ptr.get();
580
581
0
    const int64_t n = 1024*1024;
582
583
0
    llama_sampler_data data = {
584
0
        /*.logits     = */ ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n),
585
0
        /*.probs      = */ nullptr,
586
0
        /*.sampled    = */ nullptr,
587
0
        /*.candidates = */ ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n),
588
0
    };
589
590
0
    ggml_cgraph * gf = ggml_new_graph(ctx);
591
592
0
    smpl->iface->backend_apply(smpl, ctx, gf, &data);
593
594
0
    if (data.logits) {
595
0
        ggml_build_forward_expand(gf, data.logits);
596
0
    }
597
598
0
    if (data.probs) {
599
0
        ggml_build_forward_expand(gf, data.probs);
600
0
    }
601
602
0
    if (data.sampled) {
603
0
        ggml_build_forward_expand(gf, data.sampled);
604
0
    }
605
606
0
    if (data.candidates) {
607
0
        ggml_build_forward_expand(gf, data.candidates);
608
0
    }
609
610
0
    for (int i = 0; i < ggml_graph_n_nodes(gf); i++) {
611
0
        struct ggml_tensor * op = ggml_graph_node(gf, i);
612
613
0
        if (!ggml_backend_dev_supports_op(device, op)) {
614
0
            LLAMA_LOG_WARN("%s: device '%s' does not have support for op %s needed for sampler '%s'\n",
615
0
                    __func__, ggml_backend_dev_name(device), ggml_op_name(op->op), smpl->iface->name(smpl));
616
617
0
            return false;
618
0
        }
619
0
    }
620
621
0
    return true;
622
0
}
623
624
// sampler chain
625
626
0
static const char * llama_sampler_chain_name(const struct llama_sampler * /*smpl*/) {
627
0
    return "chain";
628
0
}
629
630
0
static void llama_sampler_chain_accept(struct llama_sampler * smpl, llama_token token) {
631
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
632
633
0
    time_meas tm(chain->t_sample_us, chain->params.no_perf);
634
635
0
    for (auto & smpl : chain->samplers) {
636
0
        llama_sampler_accept(smpl.ptr, token);
637
0
    }
638
639
0
    chain->n_sample++;
640
0
}
641
642
0
static void llama_sampler_chain_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
643
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
644
645
0
    time_meas tm(chain->t_sample_us, chain->params.no_perf);
646
647
0
    bool is_backend = chain->is_init;
648
649
0
    for (auto & smpl : chain->samplers) {
650
0
        if (is_backend && smpl.is_backend) {
651
0
            continue;
652
0
        }
653
654
0
        is_backend = false;
655
656
0
        if (smpl.ptr->iface->apply == nullptr) {
657
0
            continue;
658
0
        }
659
660
0
        llama_sampler_apply(smpl.ptr, cur_p);
661
0
    }
662
0
}
663
664
0
static void llama_sampler_chain_reset(struct llama_sampler * smpl) {
665
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
666
667
0
    for (auto & smpl : chain->samplers) {
668
0
        llama_sampler_reset(smpl.ptr);
669
0
    }
670
0
}
671
672
0
static struct llama_sampler * llama_sampler_chain_clone(const struct llama_sampler * smpl) {
673
0
    const auto * chain_src = (const llama_sampler_chain *) smpl->ctx;
674
675
0
    auto * result = llama_sampler_chain_init(chain_src->params);
676
677
0
    for (const auto & smpl : chain_src->samplers) {
678
0
        llama_sampler_chain_add(result, llama_sampler_clone(smpl.ptr));
679
0
    }
680
681
0
    return result;
682
0
}
683
684
0
static void llama_sampler_chain_free(struct llama_sampler * smpl) {
685
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
686
687
0
    for (auto & smpl : chain->samplers) {
688
0
        llama_sampler_free(smpl.ptr);
689
0
    }
690
691
0
    delete chain;
692
0
}
693
694
static bool llama_sampler_chain_backend_init(
695
        struct llama_sampler       * smpl,
696
0
        ggml_backend_buffer_type_t   buft) {
697
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
698
699
0
    GGML_ASSERT(chain->is_init == false && "llama_sampler_chain_backend_init() called twice");
700
701
0
    chain->is_init = true;
702
703
0
    bool res = true;
704
705
0
    for (auto & smpl : chain->samplers) {
706
0
        bool res_cur = true;
707
708
        // to be able to run a sampler on the backend, it has to:
709
        // - have the .backend_init() API implemented
710
        // - return true during .backend_init()
711
0
        if (smpl.ptr->iface->backend_init) {
712
0
            if (!smpl.ptr->iface->backend_init(smpl.ptr, buft)) {
713
0
                res_cur = false;
714
0
            }
715
0
        } else {
716
0
            res_cur = false;
717
0
        }
718
719
0
        smpl.is_backend = res_cur;
720
721
0
        res = res && res_cur;
722
0
    }
723
724
0
    return res;
725
0
}
726
727
static void llama_sampler_chain_backend_accept(
728
        struct llama_sampler * smpl,
729
        ggml_context * ctx,
730
        ggml_cgraph * gf,
731
0
        struct ggml_tensor * selected_token) {
732
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
733
734
0
    for (auto & smpl : chain->samplers) {
735
0
        if (!smpl.is_backend) {
736
0
            break;
737
0
        }
738
739
0
        if (smpl.ptr->iface->backend_accept) {
740
0
            smpl.ptr->iface->backend_accept(smpl.ptr, ctx, gf, selected_token);
741
0
        }
742
0
    }
743
0
}
744
745
static void llama_sampler_chain_backend_apply(
746
          struct llama_sampler      * smpl,
747
          struct ggml_context       * ctx,
748
          struct ggml_cgraph        * gf,
749
0
          struct llama_sampler_data * data) {
750
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
751
752
0
    GGML_ASSERT(chain->is_init && "llama_sampler_chain_backend_init() not called");
753
754
0
    for (auto & smpl : chain->samplers) {
755
0
        if (!smpl.is_backend) {
756
0
            break;
757
0
        }
758
759
0
        if (smpl.ptr->iface->backend_apply) {
760
0
            smpl.ptr->iface->backend_apply(smpl.ptr, ctx, gf, data);
761
0
        }
762
0
    }
763
0
}
764
765
0
static void llama_sampler_chain_backend_set_input(struct llama_sampler * smpl) {
766
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
767
768
0
    for (auto & smpl : chain->samplers) {
769
0
        if (!smpl.is_backend) {
770
0
            break;
771
0
        }
772
773
0
        if (smpl.ptr->iface->backend_set_input) {
774
0
            smpl.ptr->iface->backend_set_input(smpl.ptr);
775
0
        }
776
0
    }
777
0
}
778
779
static struct llama_sampler_i llama_sampler_chain_i = {
780
    /* .name              = */ llama_sampler_chain_name,
781
    /* .accept            = */ llama_sampler_chain_accept,
782
    /* .apply             = */ llama_sampler_chain_apply,
783
    /* .reset             = */ llama_sampler_chain_reset,
784
    /* .clone             = */ llama_sampler_chain_clone,
785
    /* .free              = */ llama_sampler_chain_free,
786
    /* .backend_init      = */ llama_sampler_chain_backend_init,
787
    /* .backend_accept    = */ llama_sampler_chain_backend_accept,
788
    /* .backend_apply     = */ llama_sampler_chain_backend_apply,
789
    /* .backend_set_input = */ llama_sampler_chain_backend_set_input,
790
};
791
792
0
struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params) {
793
0
    return llama_sampler_init(
794
0
        /* .iface = */ &llama_sampler_chain_i,
795
0
        /* .ctx   = */ new llama_sampler_chain {
796
0
            /* .params      = */ params,
797
0
            /* .is_init     = */ false,
798
0
            /* .samplers    = */ {},
799
0
            /* .cur         = */ {},
800
0
            /* .t_sample_us = */ 0,
801
0
            /* .n_sample    = */ 0,
802
0
        }
803
0
    );
804
0
}
805
806
0
llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx) {
807
0
    const llama_token   sampled_token  = llama_get_sampled_token_ith     (ctx, idx);
808
0
    const float *       sampled_probs  = llama_get_sampled_probs_ith     (ctx, idx);
809
0
    const float *       sampled_logits = llama_get_sampled_logits_ith    (ctx, idx);
810
0
    const llama_token * sampled_ids    = llama_get_sampled_candidates_ith(ctx, idx);
811
812
    // If a backend sampler has already sampled a token, return it.
813
0
    if (sampled_token != LLAMA_TOKEN_NULL) {
814
0
        LLAMA_LOG_DEBUG("%s: Backend sampler selected token for idx %d. Skipping CPU samplers\n", __func__, idx);
815
0
        return sampled_token;
816
0
    }
817
818
0
    const llama_model * model = llama_get_model(ctx);
819
0
    const llama_vocab * vocab = llama_model_get_vocab(model);
820
821
0
    const int n_vocab = llama_vocab_n_tokens(vocab);
822
823
    // use pre-allocated buffer from chain if available, otherwise allocate locally
824
0
    std::vector<llama_token_data> * cur_ptr;
825
0
    std::vector<llama_token_data> cur_local;
826
827
0
    if (smpl->iface == &llama_sampler_chain_i) {
828
0
        auto * chain = (llama_sampler_chain *) smpl->ctx;
829
0
        cur_ptr = &chain->cur;
830
0
    } else {
831
0
        cur_ptr = &cur_local;
832
0
    }
833
834
0
    auto & cur = *cur_ptr;
835
836
0
    if (sampled_probs) {
837
0
        const uint32_t sampled_probs_count = llama_get_sampled_probs_count_ith(ctx, idx);
838
0
        cur.resize(sampled_probs_count);
839
0
        for (uint32_t i = 0; i < sampled_probs_count; ++i) {
840
0
            cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], sampled_probs[i]};
841
0
        }
842
0
    } else if (sampled_logits) {
843
0
        const uint32_t sampled_logits_count = llama_get_sampled_logits_count_ith(ctx, idx);
844
0
        cur.resize(sampled_logits_count);
845
0
        for (llama_token i = 0; i < (int)sampled_logits_count; i++) {
846
0
            cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], 0.0f};
847
0
        }
848
0
    } else {
849
0
        const auto * logits = llama_get_logits_ith(ctx, idx);
850
0
        GGML_ASSERT(logits != nullptr);
851
0
        cur.resize(n_vocab);
852
0
        for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
853
0
            cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
854
0
        }
855
0
    }
856
857
0
    llama_token_data_array cur_p = {
858
0
        /* .data       = */ cur.data(),
859
0
        /* .size       = */ cur.size(),
860
0
        /* .selected   = */ -1,
861
0
        /* .sorted     = */ false,
862
0
    };
863
864
0
    llama_sampler_apply(smpl, &cur_p);
865
866
0
    GGML_ASSERT(cur_p.selected >= 0 && cur_p.selected < (int32_t) cur_p.size);
867
868
0
    auto token = cur_p.data[cur_p.selected].id;
869
870
0
    llama_sampler_accept(smpl, token);
871
872
0
    return token;
873
0
}
874
875
876
0
void llama_sampler_chain_add(struct llama_sampler * chain, struct llama_sampler * smpl) {
877
0
    auto * p = (llama_sampler_chain *) chain->ctx;
878
0
    p->samplers.push_back({
879
0
        /* .is_backend = */ false,
880
0
        /* .ptr        = */ smpl,
881
0
    });
882
0
}
883
884
0
struct llama_sampler * llama_sampler_chain_get(struct llama_sampler * chain, int32_t i) {
885
0
    if (chain == nullptr) {
886
0
        return nullptr;
887
0
    }
888
889
0
    if (chain->iface != &llama_sampler_chain_i) {
890
0
        return nullptr;
891
0
    }
892
893
0
    if (i == -1) {
894
0
        return chain;
895
0
    }
896
897
0
    const auto * p = (const llama_sampler_chain *) chain->ctx;
898
899
0
    if (i < 0 || (size_t) i >= p->samplers.size()) {
900
0
        return nullptr;
901
0
    }
902
903
0
    return p->samplers[i].ptr;
904
0
}
905
906
0
struct llama_sampler * llama_sampler_chain_remove(struct llama_sampler * chain, int32_t i) {
907
0
    auto * p = (llama_sampler_chain *) chain->ctx;
908
909
0
    if (i < 0 || (size_t) i >= p->samplers.size()) {
910
0
        return nullptr;
911
0
    }
912
913
0
    auto * result = p->samplers[i].ptr;
914
0
    p->samplers.erase(p->samplers.begin() + i);
915
916
0
    return result;
917
0
}
918
919
0
int llama_sampler_chain_n(const struct llama_sampler * chain) {
920
0
    const auto * p = (const llama_sampler_chain *) chain->ctx;
921
922
0
    return p->samplers.size();
923
0
}
924
925
//
926
// samplers
927
//
928
929
// greedy
930
931
struct llama_sampler_greedy : public llama_sampler_backend {
932
};
933
934
0
static const char * llama_sampler_greedy_name(const struct llama_sampler * smpl) {
935
0
    auto * sctx = (llama_sampler_greedy *) smpl->ctx;
936
0
    return sctx->get_name();
937
0
}
938
939
0
static void llama_sampler_greedy_reset(struct llama_sampler * smpl) {
940
0
    auto * ctx = (llama_sampler_greedy *) smpl->ctx;
941
0
    GGML_UNUSED(ctx);
942
0
}
943
944
0
static struct llama_sampler * llama_sampler_greedy_clone(const struct llama_sampler * smpl) {
945
0
    const auto * ctx = (const llama_sampler_greedy *) smpl->ctx;
946
0
    auto * result = llama_sampler_init_greedy();
947
948
    // copy the state
949
0
    {
950
0
        auto * result_ctx = (llama_sampler_greedy *) result->ctx;
951
952
0
        GGML_UNUSED(ctx);
953
0
        GGML_UNUSED(result_ctx);
954
0
    }
955
956
0
    return result;
957
0
}
958
959
0
static void llama_sampler_greedy_free(struct llama_sampler * smpl) {
960
0
    delete (llama_sampler_greedy *) smpl->ctx;
961
0
}
962
963
0
static void llama_sampler_greedy_apply(struct llama_sampler * /*smpl*/, llama_token_data_array * cur_p) {
964
0
    cur_p->selected = 0;
965
0
    for (size_t i = 1; i < cur_p->size; ++i) {
966
0
        if (cur_p->data[i].logit > cur_p->data[cur_p->selected].logit) {
967
0
            cur_p->selected = i;
968
0
        }
969
0
    }
970
0
}
971
972
static bool llama_sampler_greedy_backend_init(
973
        struct llama_sampler       * smpl,
974
0
        ggml_backend_buffer_type_t   buft) {
975
0
    auto * sctx = (llama_sampler_greedy *) smpl->ctx;
976
977
0
    const bool res = llama_sampler_backend_support(smpl, buft);
978
979
0
    sctx->init(res);
980
981
0
    return res;
982
0
}
983
984
static void llama_sampler_greedy_backend_apply(
985
        struct llama_sampler      * smpl,
986
        struct ggml_context       * ctx,
987
        struct ggml_cgraph        * gf,
988
0
        struct llama_sampler_data * data) {
989
0
    GGML_UNUSED(gf);
990
0
    GGML_UNUSED(smpl);
991
992
0
    struct ggml_tensor * curl = ggml_argmax(ctx, data->logits);
993
0
    ggml_set_name(curl, "greedy_argmax");
994
995
0
    data->sampled = curl;
996
0
}
997
998
static struct llama_sampler_i llama_sampler_greedy_i = {
999
    /* .name              = */ llama_sampler_greedy_name,
1000
    /* .accept            = */ nullptr,
1001
    /* .apply             = */ llama_sampler_greedy_apply,
1002
    /* .reset             = */ llama_sampler_greedy_reset,
1003
    /* .clone             = */ llama_sampler_greedy_clone,
1004
    /* .free              = */ llama_sampler_greedy_free,
1005
    /* .backend_init      = */ llama_sampler_greedy_backend_init,
1006
    /* .backend_accept    = */ nullptr,
1007
    /* .backend_apply     = */ llama_sampler_greedy_backend_apply,
1008
    /* .backend_set_input = */ nullptr,
1009
};
1010
1011
0
struct llama_sampler * llama_sampler_init_greedy() {
1012
0
    return llama_sampler_init(
1013
0
        /* .iface = */ &llama_sampler_greedy_i,
1014
0
        /* .ctx   = */ new llama_sampler_greedy {
1015
0
            ("greedy"),
1016
0
        }
1017
0
    );
1018
0
}
1019
1020
// dist
1021
1022
struct llama_sampler_dist : public llama_sampler_backend {
1023
    const uint32_t seed;
1024
          uint32_t seed_cur;
1025
1026
    std::mt19937 rng;
1027
1028
    // backend input
1029
    struct ggml_tensor * inp_uniform;
1030
1031
    ggml_context_ptr        inp_ctx;
1032
    ggml_backend_buffer_ptr inp_buf;
1033
};
1034
1035
0
static const char * llama_sampler_dist_name(const struct llama_sampler * smpl) {
1036
0
    auto * sctx = (llama_sampler_dist *) smpl->ctx;
1037
0
    return sctx->get_name();
1038
0
}
1039
1040
0
static void llama_sampler_dist_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1041
0
    auto * ctx = (llama_sampler_dist *) smpl->ctx;
1042
1043
    // edge cases
1044
0
    if (cur_p->size == 0) {
1045
0
        cur_p->selected = -1;
1046
0
        return;
1047
0
    }
1048
1049
0
    cur_p->selected = 0;
1050
1051
0
    if (cur_p->size == 1) {
1052
0
        cur_p->data[0].p = 1.0f;
1053
0
        return;
1054
0
    }
1055
1056
    // max logit for numerical stability
1057
0
    float max_l = cur_p->data[0].logit;
1058
0
    if (!cur_p->sorted) {
1059
0
        for (size_t i = 1; i < cur_p->size; ++i) {
1060
0
            max_l = std::max(max_l, cur_p->data[i].logit);
1061
0
        }
1062
0
    }
1063
1064
    // apply softmax to obtain the probabilities
1065
0
    double sum_cum = 0.0f;
1066
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1067
0
        float p = expf(cur_p->data[i].logit - max_l);
1068
0
        cur_p->data[i].p = p;
1069
0
        sum_cum += p;
1070
0
    }
1071
1072
0
#if 1
1073
    // sample from the obtained probabilities and normalize the probs in a single pass
1074
    // this is ~3x faster on Mac with full gpt-oss vocab than the version below
1075
    //
1076
0
    std::uniform_real_distribution<double> dist(0.0f, 1.0f);
1077
0
    const double rnd = dist(ctx->rng);
1078
1079
0
          double sum_run = 0.0f;
1080
0
    const double sum_tgt = sum_cum*rnd;
1081
1082
0
    bool found = false;
1083
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1084
0
        if (!found) {
1085
            // accumulate probs until we reach the target sum
1086
0
            sum_run += cur_p->data[i].p;
1087
0
            if (sum_run >= sum_tgt) {
1088
0
                cur_p->selected = i;
1089
0
                found = true;
1090
0
            }
1091
0
        }
1092
1093
        // normalize probs
1094
0
        cur_p->data[i].p /= sum_cum;
1095
0
    }
1096
1097
    // fallback to the last token (don't think this can happen)
1098
0
    assert(found);
1099
0
    if (!found) {
1100
0
        cur_p->selected = cur_p->size - 1;
1101
0
    }
1102
#else
1103
    // for clarity, this is the same as above but does one pass for normalization and one extra pass for sampling
1104
    for (size_t i = 0; i < cur_p->size; ++i) {
1105
        cur_p->data[i].p /= sum_cum;
1106
    }
1107
1108
    cur_p->selected = llama_sample_dist(cur_p, ctx->rng);
1109
#endif
1110
0
}
1111
1112
0
static void llama_sampler_dist_reset(struct llama_sampler * smpl) {
1113
0
    auto * ctx = (llama_sampler_dist *) smpl->ctx;
1114
0
    ctx->seed_cur = get_rng_seed(ctx->seed);
1115
0
    ctx->rng.seed(ctx->seed_cur);
1116
0
}
1117
1118
0
static struct llama_sampler * llama_sampler_dist_clone(const struct llama_sampler * smpl) {
1119
0
    const auto * ctx = (const llama_sampler_dist *) smpl->ctx;
1120
0
    auto * result = llama_sampler_init_dist(ctx->seed);
1121
1122
    // copy the state
1123
0
    {
1124
0
        auto * result_ctx = (llama_sampler_dist *) result->ctx;
1125
1126
0
        result_ctx->rng = ctx->rng;
1127
0
    }
1128
1129
0
    return result;
1130
0
}
1131
1132
0
static void llama_sampler_dist_free(struct llama_sampler * smpl) {
1133
0
    delete (llama_sampler_dist *) smpl->ctx;
1134
0
}
1135
1136
static bool llama_sampler_dist_backend_init(
1137
        struct llama_sampler       * smpl,
1138
0
        ggml_backend_buffer_type_t   buft) {
1139
0
    auto * sctx = (llama_sampler_dist *) smpl->ctx;
1140
1141
    // allocate inputs
1142
0
    {
1143
0
        ggml_init_params params = {
1144
0
            /*.mem_size   =*/ ggml_tensor_overhead(),
1145
0
            /*.mem_buffer =*/ nullptr,
1146
0
            /*.no_alloc   =*/ true,
1147
0
        };
1148
1149
0
        sctx->inp_ctx.reset(ggml_init(params));
1150
1151
        // Create the uniform random scalar input tensor. This will be set by
1152
        // llama_sampler_dist_backend_set_input after this graph is built.
1153
0
        sctx->inp_uniform = ggml_new_tensor_1d(sctx->inp_ctx.get(), GGML_TYPE_F32, 1);
1154
0
        ggml_set_name (sctx->inp_uniform, "uniform");
1155
0
        ggml_set_input(sctx->inp_uniform);
1156
1157
        // Allocate all tensors from our context to the backend
1158
0
        sctx->inp_buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(sctx->inp_ctx.get(), buft));
1159
1160
0
        ggml_backend_buffer_clear(sctx->inp_buf.get(), 0);
1161
0
    }
1162
1163
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1164
1165
0
    sctx->init(res);
1166
1167
0
    if (!res) {
1168
0
        sctx->inp_ctx.reset(nullptr);
1169
0
        sctx->inp_buf.reset(nullptr);
1170
0
    }
1171
1172
0
    return res;
1173
0
}
1174
1175
static void llama_sampler_dist_backend_apply(
1176
        struct llama_sampler      * smpl,
1177
        struct ggml_context       * ctx,
1178
        struct ggml_cgraph        * gf,
1179
0
        struct llama_sampler_data * data) {
1180
0
    GGML_UNUSED(gf);
1181
0
    auto * sctx = (llama_sampler_dist *) smpl->ctx;
1182
1183
0
    struct ggml_tensor * probs = ggml_soft_max(ctx, data->logits);
1184
0
    ggml_set_name(probs, "dist_probs");
1185
1186
0
    struct ggml_tensor * cumsum = ggml_cumsum(ctx, probs);
1187
0
    ggml_set_name(cumsum, "dist_cumsum");
1188
1189
    // The uniform tensor has a random value and we subtract this tensor with
1190
    // the cumsum tensor (the uniform tensor will be broadcasted by ggml_sub).
1191
    // Recall that each entry in cumsum is the cumulative probability up to that
1192
    // index so values stay negative while the cumulative total is below the
1193
    // random value, and become zero/positive once the threshold is crossed.
1194
0
    struct ggml_tensor * diff = ggml_sub(ctx, cumsum, sctx->inp_uniform);
1195
0
    ggml_set_name(diff, "dist_cumsum");
1196
1197
    // The ggml_step function produces a tensor where entries are 1 if the
1198
    // corresponding entry in diff is > 0, and 0 otherwise. So all values up to
1199
    // the index where the cumulative probability exceeds the random value are 0,
1200
    // and all entries after that are 1.
1201
0
    struct ggml_tensor * mask = ggml_step(ctx, diff);
1202
0
    ggml_set_name(mask, "dist_mask");
1203
1204
    // Taking the sum of the mask gives us the sum of elements after the threshold
1205
    // we are interested in.
1206
0
    struct ggml_tensor * idxf = ggml_sum(ctx, mask);
1207
0
    ggml_set_name(idxf, "dist_index_f32");
1208
1209
    // Use ggml_scale_bias to scale the index value by -1 and then add the size
1210
    // of the mask to that value so we get the correct index ((-1 * idxf) + n).
1211
0
    struct ggml_tensor * idx = ggml_cast(ctx, ggml_scale_bias(ctx, idxf, -1.0f, mask->ne[0]), GGML_TYPE_I32);
1212
0
    ggml_set_name(idx, "dist_index_i32");
1213
1214
    // Map back to original vocab ids if a candidates tensor is available.
1215
0
    struct ggml_tensor * sampled_token = idx;
1216
0
    if (data->candidates != nullptr) {
1217
0
        struct ggml_tensor * candidates = ggml_reshape_2d(ctx, data->candidates, 1, ggml_nelements(data->candidates));
1218
1219
0
        sampled_token = ggml_get_rows(ctx, candidates, idx);
1220
0
        ggml_set_name(sampled_token, "dist_sampled_token");
1221
0
    }
1222
1223
0
    data->sampled = sampled_token;
1224
0
    data->probs = probs;
1225
0
}
1226
1227
0
static void llama_sampler_dist_backend_set_input(struct llama_sampler * smpl) {
1228
0
    auto * sctx = (llama_sampler_dist *) smpl->ctx;
1229
0
    GGML_ASSERT(sctx->inp_uniform != nullptr);
1230
1231
    // We sample in double precision and cast to float to match rnd numbers of
1232
    // llama_dampler_dist which uses double precision (sampling from
1233
    // std::uniform_real_distribution<double> and
1234
    // std::uniform_real_distribution<float> with same rng will produce
1235
    // different sequences).
1236
0
    std::uniform_real_distribution<double> dist(0.0f, 1.0f);
1237
0
    const float rnd = dist(sctx->rng);
1238
1239
0
    ggml_backend_tensor_set(sctx->inp_uniform, &rnd, 0, sizeof(float));
1240
0
}
1241
1242
static struct llama_sampler_i llama_sampler_dist_i = {
1243
    /* .name              = */ llama_sampler_dist_name,
1244
    /* .accept            = */ nullptr,
1245
    /* .apply             = */ llama_sampler_dist_apply,
1246
    /* .reset             = */ llama_sampler_dist_reset,
1247
    /* .clone             = */ llama_sampler_dist_clone,
1248
    /* .free              = */ llama_sampler_dist_free,
1249
    /* .backend_init      = */ llama_sampler_dist_backend_init,
1250
    /* .backend_accept    = */ nullptr,
1251
    /* .backend_apply     = */ llama_sampler_dist_backend_apply,
1252
    /* .backend_set_input = */ llama_sampler_dist_backend_set_input,
1253
};
1254
1255
0
struct llama_sampler * llama_sampler_init_dist(uint32_t seed) {
1256
0
    auto seed_cur = get_rng_seed(seed);
1257
0
    return llama_sampler_init(
1258
0
        /* .iface = */ &llama_sampler_dist_i,
1259
0
        /* .ctx   = */ new llama_sampler_dist {
1260
0
            ("dist"),
1261
0
            /* .seed        = */ seed,
1262
0
            /* .seed_cur    = */ seed_cur,
1263
0
            /* .rng         = */ std::mt19937(seed_cur),
1264
0
            /* .inp_uniform = */ nullptr,
1265
0
            /* .inp_ctx     = */ nullptr,
1266
0
            /* .inp_buf     = */ nullptr,
1267
0
        }
1268
0
    );
1269
0
}
1270
1271
// top-k
1272
1273
struct llama_sampler_top_k : public llama_sampler_backend {
1274
    const int32_t k;
1275
};
1276
1277
0
static const char * llama_sampler_top_k_name(const struct llama_sampler * smpl) {
1278
0
    auto * sctx = (llama_sampler_top_k *) smpl->ctx;
1279
0
    return sctx->get_name();
1280
0
}
1281
1282
0
static void llama_sampler_top_k_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1283
0
    auto * ctx = (llama_sampler_top_k *) smpl->ctx;
1284
0
    llama_sampler_top_k_impl(cur_p, ctx->k);
1285
0
}
1286
1287
0
static struct llama_sampler * llama_sampler_top_k_clone(const struct llama_sampler * smpl) {
1288
0
    const auto * ctx = (const llama_sampler_top_k *) smpl->ctx;
1289
0
    return llama_sampler_init_top_k(ctx->k);
1290
0
}
1291
1292
0
static void llama_sampler_top_k_free(struct llama_sampler * smpl) {
1293
0
    delete (llama_sampler_top_k *) smpl->ctx;
1294
0
}
1295
1296
static bool llama_sampler_top_k_backend_init(
1297
        struct llama_sampler       * smpl,
1298
0
        ggml_backend_buffer_type_t   buft) {
1299
0
    auto * sctx = (llama_sampler_top_k *) smpl->ctx;
1300
1301
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1302
1303
0
    sctx->init(res);
1304
1305
0
    return res;
1306
0
}
1307
1308
static void llama_sampler_top_k_backend_apply(
1309
        struct llama_sampler      * smpl,
1310
        struct ggml_context       * ctx,
1311
        struct ggml_cgraph        * gf,
1312
0
        struct llama_sampler_data * data) {
1313
0
    auto * sctx = (llama_sampler_top_k *) smpl->ctx;
1314
1315
0
    struct ggml_tensor * top_k = ggml_top_k(ctx, data->logits, sctx->k);
1316
0
    ggml_set_name(top_k, "top_k");
1317
1318
0
    if (data->candidates) {
1319
0
        struct ggml_tensor * candidates_rows = ggml_reshape_2d(ctx, data->candidates, 1, data->candidates->ne[0]);
1320
0
        data->candidates = ggml_get_rows(ctx, candidates_rows, top_k);
1321
0
        data->candidates = ggml_reshape_1d(ctx, data->candidates, sctx->k);
1322
0
        ggml_set_name(data->candidates, "top_k_candidates");
1323
0
    } else {
1324
0
        data->candidates = top_k;
1325
0
    }
1326
1327
0
    struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, data->logits, 1, data->logits->ne[0]);
1328
0
    struct ggml_tensor * top_k_rows = ggml_get_rows(ctx, logits_rows, top_k);
1329
0
    data->logits = ggml_reshape_1d(ctx, top_k_rows, sctx->k);
1330
0
    ggml_set_name(top_k_rows, "top_k_rows");
1331
1332
0
    GGML_UNUSED(gf);
1333
0
}
1334
1335
static struct llama_sampler_i llama_sampler_top_k_i = {
1336
    /* .name              = */ llama_sampler_top_k_name,
1337
    /* .accept            = */ nullptr,
1338
    /* .apply             = */ llama_sampler_top_k_apply,
1339
    /* .reset             = */ nullptr,
1340
    /* .clone             = */ llama_sampler_top_k_clone,
1341
    /* .free              = */ llama_sampler_top_k_free,
1342
    /* .backend_init      = */ llama_sampler_top_k_backend_init,
1343
    /* .backend_accept    = */ nullptr,
1344
    /* .backend_apply     = */ llama_sampler_top_k_backend_apply,
1345
    /* .backend_set_input = */ nullptr,
1346
};
1347
1348
0
struct llama_sampler * llama_sampler_init_top_k(int32_t k) {
1349
0
    const bool is_empty = (k <= 0);
1350
1351
0
    if (is_empty) {
1352
0
        return llama_sampler_init_empty("?top-k");
1353
0
    }
1354
1355
0
    return llama_sampler_init(
1356
0
        /* .iface = */ &llama_sampler_top_k_i,
1357
0
        /* .ctx   = */ new llama_sampler_top_k {
1358
0
            ("top-k"),
1359
0
            /* .k = */ k,
1360
0
        }
1361
0
    );
1362
0
}
1363
1364
// top-p
1365
1366
struct llama_sampler_top_p : public llama_sampler_backend {
1367
    const float  p;
1368
    const size_t min_keep;
1369
1370
    std::vector<llama_token_data> buf_sort;
1371
};
1372
1373
0
static const char * llama_sampler_top_p_name(const struct llama_sampler * smpl) {
1374
0
    auto * sctx = (llama_sampler_top_p *) smpl->ctx;
1375
0
    return sctx->get_name();
1376
0
}
1377
1378
0
static void llama_sampler_top_p_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1379
0
    auto * ctx = (llama_sampler_top_p *) smpl->ctx;
1380
1381
0
    if (ctx->p >= 1.0f) {
1382
0
        return;
1383
0
    }
1384
1385
0
    llama_sampler_softmax_impl(cur_p, false);
1386
1387
0
    size_t k = cur_p->size;
1388
0
    auto * pdata = cur_p->data;
1389
1390
0
    auto & buf_sort = ctx->buf_sort;
1391
1392
    // if not sorted, try adaptive top-k sorting
1393
0
    if (!cur_p->sorted && cur_p->size > 1024) {
1394
0
        k = std::min<size_t>(256, cur_p->size);
1395
0
        llama_token_data_array_partial_sort(*cur_p, k, buf_sort);
1396
0
        pdata = buf_sort.data();
1397
0
    } else if (!cur_p->sorted) {
1398
        // small candidates -> sort inplace
1399
0
        llama_token_data_array_partial_sort_inplace(cur_p, k);
1400
0
    }
1401
1402
    // Compute the cumulative probabilities
1403
0
    float cum_sum = 0.0f;
1404
0
    size_t last_idx = cur_p->size;
1405
1406
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1407
0
        cum_sum += pdata[i].p;
1408
1409
        // Check if the running sum is at least p or if we have kept at least min_keep tokens
1410
        // we set the last index to i+1 to indicate that the current iterate should be included in the set
1411
0
        if (cum_sum >= ctx->p && i + 1 >= ctx->min_keep) {
1412
0
            last_idx = i + 1;
1413
0
            break;
1414
0
        }
1415
1416
        // we exceeded the current top-k heuristic -> increase k and continue
1417
0
        if (!cur_p->sorted && i == k - 1) {
1418
0
            k = cur_p->size;
1419
0
            llama_token_data_array_partial_sort(*cur_p, k, buf_sort);
1420
0
            pdata = buf_sort.data();
1421
0
        }
1422
0
    }
1423
1424
    // Resize the output vector to keep only the top-p tokens
1425
0
    if (!cur_p->sorted) {
1426
0
        std::copy(buf_sort.data(), buf_sort.data() + last_idx, cur_p->data);
1427
0
        cur_p->sorted = true;
1428
0
    }
1429
1430
0
    cur_p->size = last_idx;
1431
0
}
1432
1433
0
static struct llama_sampler * llama_sampler_top_p_clone(const struct llama_sampler * smpl) {
1434
0
    const auto * ctx = (const llama_sampler_top_p *) smpl->ctx;
1435
0
    return llama_sampler_init_top_p(ctx->p, ctx->min_keep);
1436
0
}
1437
1438
0
static void llama_sampler_top_p_free(struct llama_sampler * smpl) {
1439
0
    delete (llama_sampler_top_p *) smpl->ctx;
1440
0
}
1441
1442
static bool llama_sampler_top_p_backend_init(
1443
        struct llama_sampler       * smpl,
1444
0
        ggml_backend_buffer_type_t   buft) {
1445
0
    auto * sctx = (llama_sampler_top_p *) smpl->ctx;
1446
1447
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1448
1449
0
    sctx->init(res);
1450
1451
0
    return res;
1452
0
}
1453
1454
static void llama_sampler_top_p_backend_apply(
1455
        struct llama_sampler      * smpl,
1456
        struct ggml_context       * ctx,
1457
        struct ggml_cgraph        * gf,
1458
0
        struct llama_sampler_data * data) {
1459
0
    auto * sctx = (llama_sampler_top_p *) smpl->ctx;
1460
1461
0
    auto ggml_sort = [ctx](struct ggml_tensor * a, struct ggml_tensor * b) {
1462
0
        GGML_ASSERT(ggml_nrows(a) == 1);
1463
0
        struct ggml_tensor * a_reshaped = ggml_reshape_2d(ctx, a, 1, a->ne[0]);
1464
0
        struct ggml_tensor * a_sorted   = ggml_get_rows(ctx, a_reshaped, b);
1465
0
        return ggml_reshape_1d(ctx, a_sorted, a->ne[0]);
1466
0
    };
1467
1468
    // Get the sorted logits in descending order.
1469
0
    struct ggml_tensor * sorted_idx = ggml_argsort(ctx, data->logits, GGML_SORT_ORDER_DESC);
1470
0
    ggml_set_name(sorted_idx, "top_p_sorted_idx");
1471
1472
    // Do the sorting via reshape + get_rows
1473
0
    struct ggml_tensor * sorted_logits = ggml_sort(data->logits, sorted_idx);
1474
0
    ggml_set_name(sorted_logits, "top_p_sorted_logits");
1475
1476
0
    struct ggml_tensor * softmax = ggml_soft_max(ctx, sorted_logits);
1477
0
    ggml_set_name(softmax, "top_p_softmax");
1478
1479
    // If candidates are provided, sort them as well. Otherwise, set sorted indices as candidates.
1480
0
    if (data->candidates) {
1481
0
        data->candidates = ggml_sort(data->candidates, sorted_idx);
1482
0
    } else {
1483
0
        data->candidates = sorted_idx;
1484
0
    }
1485
0
    ggml_set_name(data->candidates, "top_p_candidates");
1486
1487
    // Compute Cumulative Distribution Function (CDF) by means of GGML_OP_CUMSUM.
1488
0
    struct ggml_tensor * cdf = ggml_cumsum(ctx, softmax);
1489
0
    ggml_set_name(cdf, "top_p_cdf");
1490
1491
    // Invert CDF and add top-p value so that ggml_step yields 1 for values we want to keep
1492
0
    struct ggml_tensor * cdf_scaled = ggml_scale_bias(ctx, cdf, -1.0f, sctx->p);
1493
0
    ggml_set_name(cdf_scaled, "top_p_cdf_scaled");
1494
1495
0
    struct ggml_tensor * mask = ggml_step(ctx, cdf_scaled);
1496
0
    ggml_set_name(mask, "top_p_mask");
1497
1498
    // Taking the sum of the mask gives us the sum of elements after the threshold
1499
    // we are interested in.
1500
0
    struct ggml_tensor * idxf = ggml_sum(ctx, mask);
1501
0
    ggml_set_name(idxf, "top_p_index_f32");
1502
1503
    // prevent out-of-bounds access
1504
0
    idxf = ggml_clamp(ctx, idxf, 0.0f, mask->ne[0] - 1);
1505
1506
    // construct ones tensor to set the value in the mask
1507
0
    struct ggml_tensor * ones = ggml_scale_bias(ctx, idxf, 0.0f, 1.0f);
1508
0
    ggml_set_name(ones, "top_p_ones");
1509
1510
    // Make top-p inclusive (i.e. return all values such that cum_sum/cdf >= p)
1511
0
    struct ggml_tensor * mask_reshaped = ggml_reshape_2d(ctx, mask, 1, mask->ne[0]);
1512
1513
0
    mask_reshaped = ggml_set_rows(ctx, mask_reshaped, ones, ggml_cast(ctx, idxf, GGML_TYPE_I32));
1514
0
    mask = ggml_reshape_1d(ctx, mask_reshaped, mask->ne[0]);
1515
1516
    // Use ggml_scale_bias (output = (a * s) + b) which in this case becomes:
1517
    // top_p_bias = (mask * 1e9f) - 1e9f.
1518
    // So entries in the mask that we want to discard will become -1e9f, and
1519
    // others will be 0 (meaning that will not effect the logits).
1520
0
    const float large_val = 1e9f;
1521
0
    struct ggml_tensor * top_p_bias = ggml_scale_bias(ctx, mask, large_val, -large_val);
1522
0
    ggml_set_name(top_p_bias, "top_p_bias");
1523
1524
0
    data->logits = ggml_add(ctx, sorted_logits, top_p_bias);
1525
0
    ggml_set_name(data->logits, "top_p_logits");
1526
1527
0
    GGML_UNUSED(gf);
1528
0
}
1529
1530
static struct llama_sampler_i llama_sampler_top_p_i = {
1531
    /* .name              = */ llama_sampler_top_p_name,
1532
    /* .accept            = */ nullptr,
1533
    /* .apply             = */ llama_sampler_top_p_apply,
1534
    /* .reset             = */ nullptr,
1535
    /* .clone             = */ llama_sampler_top_p_clone,
1536
    /* .free              = */ llama_sampler_top_p_free,
1537
    /* .backend_init      = */ llama_sampler_top_p_backend_init,
1538
    /* .backend_accept    = */ nullptr,
1539
    /* .backend_apply     = */ llama_sampler_top_p_backend_apply,
1540
    /* .backend_set_input = */ nullptr,
1541
};
1542
1543
0
struct llama_sampler * llama_sampler_init_top_p(float p, size_t min_keep) {
1544
0
    const bool is_empty = p >= 1.0f;
1545
1546
0
    if (is_empty) {
1547
0
        return llama_sampler_init_empty("?top-p");
1548
0
    }
1549
1550
0
    return llama_sampler_init(
1551
0
        /* .iface = */ &llama_sampler_top_p_i,
1552
0
        /* .ctx   = */ new llama_sampler_top_p {
1553
0
            ("top-p"),
1554
0
            /* .p        = */ p,
1555
0
            /* .min_keep = */ min_keep,
1556
0
            /* .buf_sort = */ {},
1557
0
        }
1558
0
    );
1559
0
}
1560
1561
// min-p
1562
1563
struct llama_sampler_min_p : public llama_sampler_backend {
1564
    const float  p;
1565
    const size_t min_keep;
1566
};
1567
1568
0
static const char * llama_sampler_min_p_name(const struct llama_sampler * smpl) {
1569
0
    auto * sctx = (llama_sampler_min_p *) smpl->ctx;
1570
0
    return sctx->get_name();
1571
0
}
1572
1573
0
static void llama_sampler_min_p_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1574
0
    auto * ctx = (llama_sampler_min_p *) smpl->ctx;
1575
1576
0
    if (ctx->p <= 0.0f || !cur_p->size) {
1577
0
        return;
1578
0
    }
1579
1580
0
    bool min_p_applied = false;
1581
1582
    // if the cur_p aren't sorted, try the unsorted implementation first
1583
0
    if (!cur_p->sorted) {
1584
0
        std::vector<llama_token_data> filtered_tokens;
1585
1586
0
        float max_logit = -FLT_MAX;
1587
0
        for (size_t i = 0; i < cur_p->size; ++i) {
1588
0
            max_logit = std::max(max_logit, cur_p->data[i].logit);
1589
0
        }
1590
0
        const float min_logit = max_logit + logf(ctx->p); // min logit for p_i >= p * p_max
1591
1592
0
        for (size_t i = 0; i < cur_p->size; ++i) {
1593
0
            if (cur_p->data[i].logit >= min_logit) {
1594
0
                filtered_tokens.push_back(cur_p->data[i]);
1595
0
            }
1596
0
        }
1597
1598
        // if we have enough values the operation was a success
1599
0
        if (!filtered_tokens.empty() && filtered_tokens.size() >= ctx->min_keep) {
1600
0
            std::copy(filtered_tokens.begin(), filtered_tokens.end(), cur_p->data);
1601
0
            cur_p->size = filtered_tokens.size();
1602
0
            min_p_applied = true;
1603
0
        }
1604
0
    }
1605
1606
    // if the cur_p are sorted or the unsorted implementation failed, use this implementation
1607
0
    if (!min_p_applied) {
1608
        // Sort the logits in descending order
1609
0
        if (!cur_p->sorted) {
1610
0
            llama_token_data_array_partial_sort_inplace(cur_p, cur_p->size);
1611
0
        }
1612
1613
0
        const float min_logit = cur_p->data[0].logit + logf(ctx->p); // min logit for p_i >= p * p_max
1614
0
        size_t i = 1; // first token always matches
1615
1616
0
        for (; i < cur_p->size; ++i) {
1617
0
            if (cur_p->data[i].logit < min_logit && i >= ctx->min_keep) {
1618
0
                break; // prob too small
1619
0
            }
1620
0
        }
1621
1622
        // Resize the output vector to keep only the matching tokens
1623
0
        cur_p->size = i;
1624
0
    }
1625
0
}
1626
1627
0
static struct llama_sampler * llama_sampler_min_p_clone(const struct llama_sampler * smpl) {
1628
0
    const auto * ctx = (const llama_sampler_min_p *) smpl->ctx;
1629
0
    return llama_sampler_init_min_p(ctx->p, ctx->min_keep);
1630
0
}
1631
1632
0
static void llama_sampler_min_p_free(struct llama_sampler * smpl) {
1633
0
    delete (llama_sampler_min_p *) smpl->ctx;
1634
0
}
1635
1636
static bool llama_sampler_min_p_backend_init(
1637
        struct llama_sampler       * smpl,
1638
0
        ggml_backend_buffer_type_t   buft) {
1639
0
    auto * sctx = (llama_sampler_min_p *) smpl->ctx;
1640
1641
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1642
1643
0
    sctx->init(res);
1644
1645
0
    return res;
1646
0
}
1647
1648
static void llama_sampler_min_p_backend_apply(
1649
        struct llama_sampler      * smpl,
1650
        struct ggml_context       * ctx,
1651
        struct ggml_cgraph        * gf,
1652
0
        struct llama_sampler_data * data) {
1653
0
    auto * sctx = (llama_sampler_min_p *) smpl->ctx;
1654
1655
0
    struct ggml_tensor * max_idx = ggml_argmax(ctx, data->logits);
1656
0
    ggml_set_name(max_idx, "max_idx");
1657
1658
0
    struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, data->logits, 1, data->logits->ne[0]);
1659
0
    ggml_set_name(logits_rows, "logits_rows");
1660
1661
0
    struct ggml_tensor * max_logit = ggml_get_rows(ctx, logits_rows, max_idx);
1662
0
    ggml_set_name(max_logit, "max_logit");
1663
1664
    // Calculate the threshold value.
1665
0
    struct ggml_tensor * threshold = ggml_scale_bias(ctx, max_logit, 1.0f, logf(sctx->p));
1666
0
    ggml_set_name(threshold, "min_p_threshold");
1667
1668
    // Subtract the threshold from logits.
1669
0
    struct ggml_tensor * sub = ggml_sub(ctx, data->logits, threshold);
1670
1671
    // Create a mask where logits below the threshold are 0 (discard),
1672
    // and others are 1 (keep).
1673
0
    struct ggml_tensor * mask = ggml_step(ctx, sub);
1674
0
    ggml_set_name(mask, "min_p_mask");
1675
1676
    // Use ggml_scale_bias (output = (a * s) + b) which in this case becomes:
1677
    // min_p_bias = (mask * 1e9f) - 1e9f.
1678
    // So entries in the mask that we want to discard will become -1e9f, and
1679
    // others will be 0 (meaning that will not effect the logits).
1680
0
    const float large_val = 1e9f;
1681
0
    struct ggml_tensor * min_p_bias = ggml_scale_bias(ctx, mask, large_val, -large_val);
1682
0
    ggml_set_name(min_p_bias, "min_p_bias");
1683
1684
    // Add the min_p bias to the logits.
1685
0
    data->logits = ggml_add(ctx, data->logits, min_p_bias);
1686
0
    ggml_set_name(data->logits, "min_p_logits");
1687
1688
0
    GGML_UNUSED(gf);
1689
0
}
1690
1691
static struct llama_sampler_i llama_sampler_min_p_i = {
1692
    /* .name              = */ llama_sampler_min_p_name,
1693
    /* .accept            = */ nullptr,
1694
    /* .apply             = */ llama_sampler_min_p_apply,
1695
    /* .reset             = */ nullptr,
1696
    /* .clone             = */ llama_sampler_min_p_clone,
1697
    /* .free              = */ llama_sampler_min_p_free,
1698
    /* .backend_init      = */ llama_sampler_min_p_backend_init,
1699
    /* .backend_accept    = */ nullptr,
1700
    /* .backend_apply     = */ llama_sampler_min_p_backend_apply,
1701
    /* .backend_set_input = */ nullptr,
1702
};
1703
1704
0
struct llama_sampler * llama_sampler_init_min_p(float p, size_t min_keep) {
1705
0
    const bool is_empty = (p <= 0.0f);
1706
1707
0
    if (is_empty) {
1708
0
        return llama_sampler_init_empty("?min-p");
1709
0
    }
1710
1711
0
    return llama_sampler_init(
1712
0
        /* .iface = */ &llama_sampler_min_p_i,
1713
0
        /* .ctx   = */ new llama_sampler_min_p {
1714
0
            ("min-p"),
1715
0
            /* .p        = */ p,
1716
0
            /* .min_keep = */ min_keep,
1717
0
        }
1718
0
    );
1719
0
}
1720
1721
// typical
1722
1723
struct llama_sampler_typical {
1724
    const float  p;
1725
    const size_t min_keep;
1726
};
1727
1728
0
static const char * llama_sampler_typical_name(const struct llama_sampler * /*smpl*/) {
1729
0
    return "typical";
1730
0
}
1731
1732
0
static void llama_sampler_typical_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1733
0
    auto * ctx = (llama_sampler_typical *) smpl->ctx;
1734
1735
    // Reference implementation:
1736
    // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
1737
0
    if (ctx->p >= 1.0f) {
1738
0
        return;
1739
0
    }
1740
1741
    // Compute the softmax of logits and calculate entropy
1742
0
    llama_sampler_softmax_impl(cur_p, true);
1743
1744
0
    float entropy = 0.0f;
1745
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1746
0
        entropy += -cur_p->data[i].p * logf(cur_p->data[i].p);
1747
0
    }
1748
1749
    // Compute the absolute difference between negative log probability and entropy for each candidate
1750
0
    std::vector<float> shifted_scores;
1751
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1752
0
        float shifted_score = fabsf(-logf(cur_p->data[i].p) - entropy);
1753
0
        shifted_scores.push_back(shifted_score);
1754
0
    }
1755
1756
    // Sort tokens based on the shifted_scores and their corresponding indices
1757
0
    std::vector<size_t> indices(cur_p->size);
1758
0
    std::iota(indices.begin(), indices.end(), 0);
1759
1760
0
    std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
1761
0
        return shifted_scores[a] < shifted_scores[b];
1762
0
    });
1763
1764
    // Compute the cumulative probabilities
1765
0
    float cum_sum = 0.0f;
1766
0
    size_t last_idx = indices.size();
1767
1768
0
    for (size_t i = 0; i < indices.size(); ++i) {
1769
0
        size_t idx = indices[i];
1770
0
        cum_sum += cur_p->data[idx].p;
1771
1772
        // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
1773
0
        if (cum_sum > ctx->p && (ctx->min_keep == 0 || i >= ctx->min_keep - 1)) {
1774
0
            last_idx = i + 1;
1775
0
            break;
1776
0
        }
1777
0
    }
1778
1779
    // Resize the output vector to keep only the locally typical tokens
1780
0
    std::vector<llama_token_data> cur_p_new;
1781
0
    for (size_t i = 0; i < last_idx; ++i) {
1782
0
        size_t idx = indices[i];
1783
0
        cur_p_new.push_back(cur_p->data[idx]);
1784
0
    }
1785
1786
    // Replace the data in cur_p with the cur_p_new data
1787
0
    std::copy(cur_p_new.begin(), cur_p_new.end(), cur_p->data);
1788
0
    cur_p->size = cur_p_new.size();
1789
0
    cur_p->sorted = false;
1790
0
}
1791
1792
0
static struct llama_sampler * llama_sampler_typical_clone(const struct llama_sampler * smpl) {
1793
0
    const auto * ctx = (const llama_sampler_typical *) smpl->ctx;
1794
0
    return llama_sampler_init_typical(ctx->p, ctx->min_keep);
1795
0
}
1796
1797
0
static void llama_sampler_typical_free(struct llama_sampler * smpl) {
1798
0
    delete (llama_sampler_typical *) smpl->ctx;
1799
0
}
1800
1801
static struct llama_sampler_i llama_sampler_typical_i = {
1802
    /* .name              = */ llama_sampler_typical_name,
1803
    /* .accept            = */ nullptr,
1804
    /* .apply             = */ llama_sampler_typical_apply,
1805
    /* .reset             = */ nullptr,
1806
    /* .clone             = */ llama_sampler_typical_clone,
1807
    /* .free              = */ llama_sampler_typical_free,
1808
    /* .backend_init      = */ nullptr,
1809
    /* .backend_accept    = */ nullptr,
1810
    /* .backend_apply     = */ nullptr,
1811
    /* .backend_set_input = */ nullptr,
1812
};
1813
1814
0
struct llama_sampler * llama_sampler_init_typical(float p, size_t min_keep) {
1815
0
    const bool is_empty = (p >= 1.0f);
1816
1817
0
    if (is_empty) {
1818
0
        return llama_sampler_init_empty("?typical");
1819
0
    }
1820
1821
0
    return llama_sampler_init(
1822
0
        /* .iface = */ &llama_sampler_typical_i,
1823
0
        /* .ctx   = */ new llama_sampler_typical {
1824
0
            /* .p        = */ p,
1825
0
            /* .min_keep = */ min_keep,
1826
0
        }
1827
0
    );
1828
0
}
1829
1830
// temp
1831
1832
struct llama_sampler_temp : public llama_sampler_backend {
1833
    const float temp;
1834
};
1835
1836
0
static const char * llama_sampler_temp_name(const struct llama_sampler * smpl) {
1837
0
    auto * sctx = (llama_sampler_temp *) smpl->ctx;
1838
0
    return sctx->get_name();
1839
0
}
1840
1841
0
static void llama_sampler_temp_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1842
0
    const auto * ctx = (llama_sampler_temp *) smpl->ctx;
1843
1844
0
    llama_sampler_temp_impl(cur_p, ctx->temp);
1845
0
}
1846
1847
0
static struct llama_sampler * llama_sampler_temp_clone(const struct llama_sampler * smpl) {
1848
0
    const auto * ctx = (const llama_sampler_temp *) smpl->ctx;
1849
0
    return llama_sampler_init_temp(ctx->temp);
1850
0
}
1851
1852
0
static void llama_sampler_temp_free(struct llama_sampler * smpl) {
1853
0
    delete (llama_sampler_temp *) smpl->ctx;
1854
0
}
1855
1856
static void llama_sampler_backend_temp_sampling(
1857
        struct ggml_context       * ctx,
1858
        struct ggml_cgraph        * gf,
1859
        struct llama_sampler_data * data,
1860
0
        float                       temp) {
1861
0
    if (temp <= 0.0f) {
1862
        // Find the most probable token index.
1863
0
        struct ggml_tensor * max_idx = ggml_argmax(ctx, data->logits);
1864
0
        ggml_set_name(max_idx, "temp_max_idx");
1865
1866
0
        if (data->candidates) {
1867
0
            struct ggml_tensor * candidates_rows = ggml_reshape_2d(ctx, data->candidates, 1, data->candidates->ne[0]);
1868
0
            data->candidates = ggml_get_rows(ctx, candidates_rows, max_idx);
1869
0
        } else {
1870
0
            data->candidates = max_idx;
1871
0
        }
1872
1873
0
        struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, data->logits, 1, data->logits->ne[0]);
1874
0
        data->logits = ggml_get_rows(ctx, logits_rows, max_idx);
1875
1876
0
        return;
1877
0
    }
1878
1879
0
    data->logits = ggml_scale(ctx, data->logits, 1.0f / temp);
1880
1881
0
    GGML_UNUSED(gf);
1882
0
}
1883
1884
static bool llama_sampler_temp_backend_init(
1885
        struct llama_sampler       * smpl,
1886
0
        ggml_backend_buffer_type_t   buft) {
1887
0
    auto * sctx = (llama_sampler_temp *) smpl->ctx;
1888
1889
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1890
1891
0
    sctx->init(res);
1892
1893
0
    return res;
1894
0
}
1895
1896
static void llama_sampler_temp_backend_apply(
1897
        struct llama_sampler      * smpl,
1898
        struct ggml_context       * ctx,
1899
        struct ggml_cgraph        * gf,
1900
0
        struct llama_sampler_data * data) {
1901
0
    auto * sctx = (llama_sampler_temp *) smpl->ctx;
1902
0
    llama_sampler_backend_temp_sampling(ctx, gf, data, sctx->temp);
1903
0
}
1904
1905
static struct llama_sampler_i llama_sampler_temp_i = {
1906
    /* .name              = */ llama_sampler_temp_name,
1907
    /* .accept            = */ nullptr,
1908
    /* .apply             = */ llama_sampler_temp_apply,
1909
    /* .reset             = */ nullptr,
1910
    /* .clone             = */ llama_sampler_temp_clone,
1911
    /* .free              = */ llama_sampler_temp_free,
1912
    /* .backend_init      = */ llama_sampler_temp_backend_init,
1913
    /* .backend_accept    = */ nullptr,
1914
    /* .backend_apply     = */ llama_sampler_temp_backend_apply,
1915
    /* .backend_set_input = */ nullptr,
1916
};
1917
1918
0
struct llama_sampler * llama_sampler_init_temp(float temp) {
1919
0
    const bool is_empty = temp == 1.0f;
1920
1921
0
    if (is_empty) {
1922
0
        return llama_sampler_init_empty("?temp");
1923
0
    }
1924
1925
0
    return llama_sampler_init(
1926
0
        /* .iface = */ &llama_sampler_temp_i,
1927
0
        /* .ctx   = */ new llama_sampler_temp {
1928
0
            ("temp"),
1929
0
            /*.temp = */ temp,
1930
0
        }
1931
0
    );
1932
0
}
1933
1934
// temp-ext
1935
1936
struct llama_sampler_temp_ext : public llama_sampler_backend {
1937
    const float temp;
1938
    const float delta;
1939
    const float exponent;
1940
};
1941
1942
0
static const char * llama_sampler_temp_ext_name(const struct llama_sampler * smpl) {
1943
0
    auto * sctx = (llama_sampler_temp_ext *) smpl->ctx;
1944
0
    return sctx->get_name();
1945
0
}
1946
1947
0
static void llama_sampler_temp_ext_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1948
0
    auto * ctx = (llama_sampler_temp_ext *) smpl->ctx;
1949
0
    if (ctx->delta > 0) {
1950
0
        const float min_temp = std::max(0.0f, ctx->temp - ctx->delta);
1951
0
        const float max_temp = ctx->temp + ctx->delta;
1952
1953
0
        float exponent_val = ctx->exponent;
1954
1955
        // no need to do anything if there is only one (or zero) candidates
1956
0
        if (cur_p->size <= 1) {
1957
0
            return;
1958
0
        }
1959
1960
        // Calculate maximum possible entropy
1961
0
        float max_entropy = -logf(1.0f / cur_p->size);
1962
1963
0
        llama_sampler_softmax_impl(cur_p, true);
1964
1965
        // Calculate entropy of the softmax probabilities
1966
0
        float entropy = 0.0f;
1967
0
        for (size_t i = 0; i < cur_p->size; ++i) {
1968
0
            float prob = cur_p->data[i].p;
1969
0
            if (prob > 0.0f) { // Ensure no log(0)
1970
0
                entropy -= prob * logf(prob);
1971
0
            }
1972
0
        }
1973
1974
        // Normalize the entropy (max_entropy cannot be 0 here because we checked cur_p->size != 1 above)
1975
0
        float normalized_entropy = entropy / max_entropy;
1976
1977
        // Map the normalized entropy to the desired temperature range using the power function
1978
0
        float dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent_val);
1979
1980
    #ifdef DEBUG
1981
        LLAMA_LOG_INFO("Your text maxtemp value is: %f\n", max_temp);
1982
        LLAMA_LOG_INFO("Entropy: %f\n", entropy);
1983
        LLAMA_LOG_INFO("Max Possible Entropy: %f\n", max_entropy);
1984
        LLAMA_LOG_INFO("Normalized Entropy: %f\n", normalized_entropy);
1985
        LLAMA_LOG_INFO("Exponent: %f\n", exponent_val);
1986
        LLAMA_LOG_INFO("Dynamic Temperature (dyn_temp): %f\n", dyn_temp);
1987
    #endif
1988
1989
        // Apply the dynamically calculated temperature scaling
1990
0
        llama_sampler_temp_impl(cur_p, dyn_temp);
1991
1992
        // Re-compute softmax probabilities after scaling logits with dynamic temperature
1993
0
        const double max_l_double = cur_p->data[0].logit;
1994
1995
0
        double cum_sum_double = 0.0;
1996
0
        for (size_t i = 0; i < cur_p->size; ++i) {
1997
0
            double p = exp(cur_p->data[i].logit - max_l_double);
1998
0
            cur_p->data[i].p = p; // Store the scaled probability
1999
0
            cum_sum_double += p;
2000
0
        }
2001
2002
0
        for (size_t i = 0; i < cur_p->size; ++i) {
2003
0
            cur_p->data[i].p /= cum_sum_double; // Re-normalize the probabilities
2004
0
        }
2005
2006
    #ifdef DEBUG
2007
        // Print the updated top 25 probabilities after temperature scaling
2008
        LLAMA_LOG_INFO("\nUpdated Top 25 Probabilities After Dynamic Temperature Scaling (in percentages):\n");
2009
        for (size_t i = 0; i < 25 && i < cur_p->size; ++i) {
2010
            LLAMA_LOG_INFO("Token %zu: %f%%\n", i + 1, cur_p->data[i].p * 100.0f);
2011
        }
2012
    #endif
2013
0
    } else {
2014
0
        llama_sampler_temp_impl(cur_p, ctx->temp);
2015
0
    }
2016
0
}
2017
2018
0
static struct llama_sampler * llama_sampler_temp_ext_clone(const struct llama_sampler * smpl) {
2019
0
    const auto * ctx = (const llama_sampler_temp_ext *) smpl->ctx;
2020
0
    return llama_sampler_init_temp_ext(ctx->temp, ctx->delta, ctx->exponent);
2021
0
}
2022
2023
0
static void llama_sampler_temp_ext_free(struct llama_sampler * smpl) {
2024
0
    delete (llama_sampler_temp_ext *) smpl->ctx;
2025
0
}
2026
2027
static bool llama_sampler_temp_ext_backend_init(
2028
        struct llama_sampler       * smpl,
2029
0
        ggml_backend_buffer_type_t   buft) {
2030
0
    auto * sctx = (llama_sampler_temp_ext *) smpl->ctx;
2031
2032
0
    const bool res = llama_sampler_backend_support(smpl, buft);
2033
2034
0
    sctx->init(res);
2035
2036
0
    return res;
2037
0
}
2038
2039
static void llama_sampler_temp_ext_backend_apply(
2040
        struct llama_sampler      * smpl,
2041
        struct ggml_context       * ctx,
2042
        struct ggml_cgraph        * gf,
2043
0
        struct llama_sampler_data * data) {
2044
0
    auto * sctx = (llama_sampler_temp_ext *) smpl->ctx;
2045
2046
    // Revert to standard temperature scaling if delta or temp are non-positive.
2047
0
    if (sctx->delta <= 0.0f || sctx->temp <= 0.0f) {
2048
0
        llama_sampler_backend_temp_sampling(ctx, gf, data, sctx->temp);
2049
0
        return;
2050
0
    }
2051
2052
    // Calculate min_temp, max_temp, and max_entropy.
2053
0
    const float min_temp    = std::max(0.0f, sctx->temp - sctx->delta);
2054
0
    const float max_temp    = sctx->temp + sctx->delta;
2055
0
    const float max_entropy = logf(data->logits->ne[0]);
2056
2057
    // Calculate the probabilities.
2058
0
    struct ggml_tensor * probs = ggml_soft_max(ctx, data->logits);
2059
0
    ggml_set_name(probs, "temp_ext_softmax_probs");
2060
2061
    // Clamp probabilities to avoid log(0) which would give -inf
2062
0
    struct ggml_tensor * probs_clamped = ggml_clamp(ctx, probs, 1e-10f, 1.0f);
2063
0
    ggml_set_name(probs_clamped, "temp_ext_probs_clamped");
2064
2065
    // Calculate the entropy, entropy = -Σ(p * log(p)).
2066
0
    struct ggml_tensor * log_probs   = ggml_log(ctx, probs_clamped);
2067
0
    struct ggml_tensor * p_log_p     = ggml_mul(ctx, probs_clamped, log_probs);
2068
0
    struct ggml_tensor * sum_p_log_p = ggml_sum(ctx, p_log_p);
2069
0
    struct ggml_tensor * entropy     = ggml_scale(ctx, sum_p_log_p, -1.0f);
2070
0
    ggml_set_name(log_probs,   "temp_ext_log_probs");
2071
0
    ggml_set_name(p_log_p,     "temp_ext_p_log_p");
2072
0
    ggml_set_name(sum_p_log_p, "temp_ext_sum_p_log_p");
2073
0
    ggml_set_name(entropy,     "temp_ext_entropy");
2074
2075
    // Normalize the entropy, norm_entropy = entropy / max_entropy
2076
0
    struct ggml_tensor * norm_entropy = ggml_scale(ctx, entropy, 1.0f / max_entropy);
2077
0
    ggml_set_name(norm_entropy, "temp_ext_norm_entropy");
2078
2079
    // Calculate the dynamic temperature:
2080
    // dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent);
2081
    //
2082
    // Calculate powf(normalized_entropy, exponent) as
2083
    // norm_entropy^exponent = exp(exponent * log(norm_entropy))
2084
0
    struct ggml_tensor * log_norm_entropy = ggml_log(ctx, norm_entropy);
2085
0
    struct ggml_tensor * scaled_log       = ggml_scale(ctx, log_norm_entropy, sctx->exponent);
2086
0
    struct ggml_tensor * pow_entropy      = ggml_exp(ctx, scaled_log);
2087
    // With pow_entropy computed we can now compute dyn_temp, scaling by
2088
    // (max_temp - min_temp) and then adding min_temp.
2089
0
    struct ggml_tensor * dyn_temp         = ggml_scale_bias(ctx, pow_entropy, max_temp - min_temp, min_temp);
2090
0
    ggml_set_name(log_norm_entropy, "temp_ext_log_norm_entropy");
2091
0
    ggml_set_name(scaled_log,       "temp_ext_scaled_log");
2092
0
    ggml_set_name(pow_entropy,      "temp_ext_pow_entropy");
2093
0
    ggml_set_name(dyn_temp,         "temp_ext_dyn_temp");
2094
2095
    // Scale the logits by the dynamic temperature
2096
0
    struct ggml_tensor * scaled_logits = ggml_div(ctx, data->logits, dyn_temp);
2097
0
    ggml_set_name(scaled_logits, "temp_ext_scaled_logits");
2098
2099
0
    data->logits = scaled_logits;
2100
0
}
2101
2102
static struct llama_sampler_i llama_sampler_temp_ext_i = {
2103
    /* .name              = */ llama_sampler_temp_ext_name,
2104
    /* .accept            = */ nullptr,
2105
    /* .apply             = */ llama_sampler_temp_ext_apply,
2106
    /* .reset             = */ nullptr,
2107
    /* .clone             = */ llama_sampler_temp_ext_clone,
2108
    /* .free              = */ llama_sampler_temp_ext_free,
2109
    /* .backend_init      = */ llama_sampler_temp_ext_backend_init,
2110
    /* .backend_accept    = */ nullptr,
2111
    /* .backend_apply     = */ llama_sampler_temp_ext_backend_apply,
2112
    /* .backend_set_input = */ nullptr,
2113
};
2114
2115
0
struct llama_sampler * llama_sampler_init_temp_ext(float temp, float delta, float exponent) {
2116
0
    const bool is_empty = temp == 1.0f && delta <= 0.0f;
2117
2118
0
    if (is_empty) {
2119
0
        return llama_sampler_init_empty("?temp-ext");
2120
0
    }
2121
2122
0
    auto * res = llama_sampler_init(
2123
0
        /* .iface = */ &llama_sampler_temp_ext_i,
2124
0
        /* .ctx   = */ new llama_sampler_temp_ext {
2125
0
            ("temp-ext"),
2126
0
            /* .temp     = */ temp,
2127
0
            /* .delta    = */ delta,
2128
0
            /* .exponent = */ exponent,
2129
0
        }
2130
0
    );
2131
2132
0
    return res;
2133
0
}
2134
2135
// xtc
2136
2137
struct llama_sampler_xtc {
2138
    const float    probability;
2139
    const float    threshold;
2140
    const size_t   min_keep;
2141
2142
    const uint32_t seed;
2143
    uint32_t       seed_cur;
2144
2145
    std::mt19937    rng;
2146
};
2147
2148
0
static const char * llama_sampler_xtc_name(const struct llama_sampler * /*smpl*/) {
2149
0
    return "xtc";
2150
0
}
2151
2152
0
static void llama_sample_xtc_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2153
0
    auto * ctx = (llama_sampler_xtc *) smpl->ctx;
2154
2155
0
    if (ctx->probability <= 0.0f
2156
0
        || ctx->threshold > 0.5f
2157
0
        || cur_p->size < 2) {
2158
0
        return;
2159
0
    }
2160
2161
0
    std::uniform_real_distribution<float> distribution(0.0f, 1.0f);
2162
0
    float chance = distribution(ctx->rng);
2163
0
    if (chance > ctx->probability) {
2164
0
        return;
2165
0
    }
2166
2167
0
    llama_sampler_softmax_impl(cur_p, true);
2168
2169
0
    int pos_last = 0;
2170
2171
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2172
0
        if (cur_p->data[i].p >= ctx->threshold) {
2173
0
            pos_last = i;
2174
0
        } else {
2175
0
            break;
2176
0
        }
2177
0
    }
2178
2179
0
    if (cur_p->size - pos_last >= ctx->min_keep && pos_last > 0) {
2180
0
        cur_p->data += pos_last;
2181
0
        cur_p->size -= pos_last;
2182
0
    }
2183
0
}
2184
2185
0
static struct llama_sampler * llama_sampler_xtc_clone(const struct llama_sampler * smpl) {
2186
0
    const auto * ctx = (const llama_sampler_xtc *) smpl->ctx;
2187
0
    auto * result = llama_sampler_init_xtc(ctx->probability, ctx->threshold, ctx->min_keep, ctx->seed);
2188
2189
    // copy the state
2190
0
    {
2191
0
        auto * result_ctx = (llama_sampler_xtc *) result->ctx;
2192
2193
0
        result_ctx->rng = ctx->rng;
2194
0
    }
2195
2196
0
    return result;
2197
0
}
2198
2199
0
static void llama_sampler_xtc_free(struct llama_sampler * smpl) {
2200
0
    delete (llama_sampler_xtc *) smpl->ctx;
2201
0
}
2202
2203
0
static void llama_sampler_xtc_reset(struct llama_sampler * smpl) {
2204
0
    auto * ctx = (llama_sampler_xtc *) smpl->ctx;
2205
0
    ctx->seed_cur = get_rng_seed(ctx->seed);
2206
0
    ctx->rng.seed(ctx->seed_cur);
2207
0
}
2208
2209
static struct llama_sampler_i llama_sampler_xtc_i = {
2210
    /* .name              = */ llama_sampler_xtc_name,
2211
    /* .accept            = */ nullptr,
2212
    /* .apply             = */ llama_sample_xtc_apply,
2213
    /* .reset             = */ llama_sampler_xtc_reset,
2214
    /* .clone             = */ llama_sampler_xtc_clone,
2215
    /* .free              = */ llama_sampler_xtc_free,
2216
    /* .backend_init      = */ nullptr,
2217
    /* .backend_accept    = */ nullptr,
2218
    /* .backend_apply     = */ nullptr,
2219
    /* .backend_set_input = */ nullptr,
2220
};
2221
2222
0
struct llama_sampler * llama_sampler_init_xtc(float p, float t, size_t min_keep, uint32_t seed) {
2223
0
    const bool is_empty = (p <= 0.0f || t > 0.5f);
2224
2225
0
    if (is_empty) {
2226
0
        return llama_sampler_init_empty("?xtc");
2227
0
    }
2228
2229
0
    const auto seed_cur = get_rng_seed(seed);
2230
2231
0
    return llama_sampler_init(
2232
0
        /* .iface = */ &llama_sampler_xtc_i,
2233
0
        /* .ctx   = */ new llama_sampler_xtc {
2234
0
            /* .probability   = */ p,
2235
0
            /* .threshold     = */ t,
2236
0
            /* .min_keep      = */ min_keep,
2237
0
            /* .seed          = */ seed,
2238
0
            /* .seed_cur      = */ seed_cur,
2239
0
            /* .rng           = */ std::mt19937(seed_cur),
2240
0
        }
2241
0
    );
2242
0
}
2243
2244
// mirostat
2245
2246
struct llama_sampler_mirostat {
2247
    const int32_t n_vocab;
2248
2249
    const uint32_t seed;
2250
          uint32_t seed_cur;
2251
2252
    const float tau;
2253
    const float eta;
2254
2255
    const int32_t m;
2256
2257
    float mu;
2258
2259
    std::mt19937    rng;
2260
};
2261
2262
0
static const char * llama_sampler_mirostat_name(const struct llama_sampler * /*smpl*/) {
2263
0
    return "mirostat";
2264
0
}
2265
2266
0
static void llama_sampler_mirostat_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2267
0
    auto * ctx = (llama_sampler_mirostat *) smpl->ctx;
2268
2269
0
    llama_sampler_softmax_impl(cur_p, true);
2270
2271
    // Estimate s_hat using the most probable m tokens
2272
0
    float s_hat = 0.0;
2273
0
    float sum_ti_bi = 0.0;
2274
0
    float sum_ti_sq = 0.0;
2275
0
    for (size_t i = 0; i < size_t(ctx->m - 1) && i < cur_p->size - 1; ++i) {
2276
0
        float t_i = logf(float(i + 2) / float(i + 1));
2277
0
        float b_i = logf(cur_p->data[i].p / cur_p->data[i + 1].p);
2278
0
        sum_ti_bi += t_i * b_i;
2279
0
        sum_ti_sq += t_i * t_i;
2280
0
    }
2281
0
    s_hat = sum_ti_bi / sum_ti_sq;
2282
2283
    // Compute k from the estimated s_hat and target surprise value
2284
0
    float epsilon_hat = s_hat - 1;
2285
0
    float k = powf((epsilon_hat * powf(2, ctx->mu)) / (1 - powf(ctx->n_vocab, -epsilon_hat)), 1 / s_hat);
2286
2287
0
    llama_sampler_top_k_impl(cur_p, std::max(int(k), 1));
2288
2289
0
    llama_sampler_softmax_impl(cur_p, true);
2290
2291
0
    const int idx = llama_sample_dist(cur_p, ctx->rng);
2292
2293
0
    cur_p->selected = idx;
2294
2295
0
    float observed_surprise = -log2f(cur_p->data[idx].p);
2296
0
    float e = observed_surprise - ctx->tau;
2297
2298
    // Update mu using the learning rate and error
2299
0
    ctx->mu = ctx->mu - ctx->eta * e;
2300
0
}
2301
2302
0
static struct llama_sampler * llama_sampler_mirostat_clone(const struct llama_sampler * smpl) {
2303
0
    const auto * ctx = (const llama_sampler_mirostat *) smpl->ctx;
2304
0
    auto * result = llama_sampler_init_mirostat(ctx->n_vocab, ctx->seed, ctx->tau, ctx->eta, ctx->m);
2305
2306
    // copy the state
2307
0
    {
2308
0
        auto * result_ctx = (llama_sampler_mirostat *) smpl->ctx;
2309
2310
0
        result_ctx->mu  = ctx->mu;
2311
0
        result_ctx->rng = ctx->rng;
2312
0
    }
2313
2314
0
    return result;
2315
0
}
2316
2317
0
static void llama_sampler_mirostat_reset(struct llama_sampler * smpl) {
2318
0
    auto * ctx = (llama_sampler_mirostat *) smpl->ctx;
2319
0
    ctx->mu = 2.0f*ctx->tau;
2320
0
    ctx->seed_cur = get_rng_seed(ctx->seed);
2321
0
    ctx->rng.seed(ctx->seed_cur);
2322
0
}
2323
2324
0
static void llama_sampler_mirostat_free(struct llama_sampler * smpl) {
2325
0
    delete (llama_sampler_mirostat *) smpl->ctx;
2326
0
}
2327
2328
static struct llama_sampler_i llama_sampler_mirostat_i = {
2329
    /* .name              = */ llama_sampler_mirostat_name,
2330
    /* .accept            = */ nullptr,
2331
    /* .apply             = */ llama_sampler_mirostat_apply,
2332
    /* .reset             = */ llama_sampler_mirostat_reset,
2333
    /* .clone             = */ llama_sampler_mirostat_clone,
2334
    /* .free              = */ llama_sampler_mirostat_free,
2335
    /* .backend_init      = */ nullptr,
2336
    /* .backend_accept    = */ nullptr,
2337
    /* .backend_apply     = */ nullptr,
2338
    /* .backend_set_input = */ nullptr,
2339
};
2340
2341
0
struct llama_sampler * llama_sampler_init_mirostat(int32_t n_vocab, uint32_t seed, float tau, float eta, int32_t m) {
2342
0
    const auto seed_cur = get_rng_seed(seed);
2343
2344
0
    return llama_sampler_init(
2345
0
        /* .iface = */ &llama_sampler_mirostat_i,
2346
0
        /* .ctx   = */ new llama_sampler_mirostat {
2347
0
            /* .n_vocab  = */ n_vocab,
2348
0
            /* .seed     = */ seed,
2349
0
            /* .seed_cur = */ seed_cur,
2350
0
            /* .tau      = */ tau,
2351
0
            /* .eta      = */ eta,
2352
0
            /* .m        = */ m,
2353
0
            /* .mu       = */ 2.0f*tau,
2354
0
            /* .rng      = */ std::mt19937(seed_cur),
2355
0
        }
2356
0
    );
2357
0
}
2358
2359
// mirostat v2
2360
2361
struct llama_sampler_mirostat_v2 {
2362
    const uint32_t seed;
2363
          uint32_t seed_cur;
2364
2365
    const float tau;
2366
    const float eta;
2367
2368
    float mu;
2369
2370
    std::mt19937 rng;
2371
};
2372
2373
0
static const char * llama_sampler_mirostat_v2_name(const struct llama_sampler * /*smpl*/) {
2374
0
    return "mirostat-v2";
2375
0
}
2376
2377
0
static void llama_sampler_mirostat_v2_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2378
0
    auto * ctx = (llama_sampler_mirostat_v2 *) smpl->ctx;
2379
2380
0
    llama_sampler_softmax_impl(cur_p, true);
2381
2382
    // Truncate the words with surprise values greater than mu
2383
0
    cur_p->size = std::distance(cur_p->data, std::find_if(cur_p->data, cur_p->data + cur_p->size, [&](const llama_token_data & candidate) {
2384
0
        return -log2f(candidate.p) > ctx->mu;
2385
0
    }));
2386
2387
0
    if (cur_p->size == 0) {
2388
0
        cur_p->size = 1;
2389
0
    }
2390
2391
    // Normalize the probabilities of the remaining words
2392
0
    llama_sampler_softmax_impl(cur_p, true);
2393
2394
0
    const int idx = llama_sample_dist(cur_p, ctx->rng);
2395
2396
0
    cur_p->selected = idx;
2397
2398
0
    float observed_surprise = -log2f(cur_p->data[idx].p);
2399
0
    float e = observed_surprise - ctx->tau;
2400
2401
    // Update mu using the learning rate and error
2402
0
    ctx->mu = ctx->mu - ctx->eta * e;
2403
0
}
2404
2405
0
static void llama_sampler_mirostat_v2_reset(struct llama_sampler * smpl) {
2406
0
    auto * ctx = (llama_sampler_mirostat_v2 *) smpl->ctx;
2407
0
    ctx->mu = 2.0f*ctx->tau;
2408
0
    ctx->seed_cur = get_rng_seed(ctx->seed);
2409
0
    ctx->rng.seed(ctx->seed_cur);
2410
0
}
2411
2412
0
static struct llama_sampler * llama_sampler_mirostat_v2_clone(const struct llama_sampler * smpl) {
2413
0
    const auto * ctx = (const llama_sampler_mirostat_v2 *) smpl->ctx;
2414
2415
0
    auto * result = llama_sampler_init_mirostat_v2(ctx->seed, ctx->tau, ctx->eta);
2416
2417
    // copy the state
2418
0
    {
2419
0
        auto * result_ctx = (llama_sampler_mirostat_v2 *) result->ctx;
2420
2421
0
        result_ctx->mu  = ctx->mu;
2422
0
        result_ctx->rng = ctx->rng;
2423
0
    }
2424
2425
0
    return result;
2426
0
}
2427
2428
0
static void llama_sampler_mirostat_v2_free(struct llama_sampler * smpl) {
2429
0
    delete (llama_sampler_mirostat_v2 *) smpl->ctx;
2430
0
}
2431
2432
static struct llama_sampler_i llama_sampler_mirostat_v2_i = {
2433
    /* .name              = */ llama_sampler_mirostat_v2_name,
2434
    /* .accept            = */ nullptr,
2435
    /* .apply             = */ llama_sampler_mirostat_v2_apply,
2436
    /* .reset             = */ llama_sampler_mirostat_v2_reset,
2437
    /* .clone             = */ llama_sampler_mirostat_v2_clone,
2438
    /* .free              = */ llama_sampler_mirostat_v2_free,
2439
    /* .backend_init      = */ nullptr,
2440
    /* .backend_accept    = */ nullptr,
2441
    /* .backend_apply     = */ nullptr,
2442
    /* .backend_set_input = */ nullptr,
2443
};
2444
2445
0
struct llama_sampler * llama_sampler_init_mirostat_v2(uint32_t seed, float tau, float eta) {
2446
0
    auto seed_cur = get_rng_seed(seed);
2447
0
    return llama_sampler_init(
2448
0
        /* .iface = */ &llama_sampler_mirostat_v2_i,
2449
0
        /* .ctx   = */ new llama_sampler_mirostat_v2 {
2450
0
            /* .seed     = */ seed,
2451
0
            /* .seed_cur = */ seed_cur,
2452
0
            /* .tau      = */ tau,
2453
0
            /* .eta      = */ eta,
2454
0
            /* .mu       = */ 2.0f*tau,
2455
0
            /* .rng      = */ std::mt19937(seed_cur),
2456
0
        }
2457
0
    );
2458
0
}
2459
2460
// grammar
2461
2462
struct llama_sampler_grammar {
2463
    const struct llama_vocab * vocab;
2464
2465
    std::string grammar_str;
2466
    std::string grammar_root;
2467
2468
    struct llama_grammar * grammar;
2469
};
2470
2471
0
static const char * llama_sampler_grammar_name(const struct llama_sampler * /*smpl*/) {
2472
0
    return "grammar";
2473
0
}
2474
2475
0
static void llama_sampler_grammar_accept_impl(struct llama_sampler * smpl, llama_token token) {
2476
0
    auto * ctx = (llama_sampler_grammar *) smpl->ctx;
2477
0
    if (ctx->grammar) {
2478
0
        llama_grammar_accept_impl(*ctx->grammar, token);
2479
0
    }
2480
0
}
2481
2482
0
static void llama_sampler_grammar_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2483
0
    auto * ctx = (llama_sampler_grammar *) smpl->ctx;
2484
0
    if (ctx->grammar) {
2485
0
        llama_grammar_apply_impl(*ctx->grammar, cur_p);
2486
0
    }
2487
0
}
2488
2489
// Fwd declare to break reset --> init_impl --> llama_sampler_grammar_i --> reset cycle.
2490
static struct llama_sampler * llama_sampler_init_grammar_impl(
2491
        const struct llama_vocab * vocab,
2492
                      const char * grammar_str,
2493
                      const char * grammar_root,
2494
                              bool lazy,
2495
                     const char ** trigger_words,
2496
                            size_t num_trigger_words,
2497
               const llama_token * trigger_tokens,
2498
                            size_t num_trigger_tokens,
2499
                     const char ** trigger_patterns,
2500
                            size_t num_trigger_patterns);
2501
2502
0
static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
2503
0
    auto * ctx = (llama_sampler_grammar *) smpl->ctx;
2504
0
    if (!ctx->grammar) {
2505
0
        return;
2506
0
    }
2507
2508
0
    std::vector<const char *>  trigger_patterns_c;
2509
0
    trigger_patterns_c.reserve(ctx->grammar->trigger_patterns.size());
2510
0
    for (auto & trigger_pattern : ctx->grammar->trigger_patterns) {
2511
0
        trigger_patterns_c.push_back(trigger_pattern.pattern.c_str());
2512
0
    }
2513
2514
0
    auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(),
2515
0
                                                 ctx->grammar->lazy, trigger_patterns_c.data(), trigger_patterns_c.size(),
2516
0
                                                 ctx->grammar->trigger_tokens.data(), ctx->grammar->trigger_tokens.size());
2517
2518
0
    llama_grammar_free_impl(ctx->grammar);
2519
0
    ctx->grammar = grammar_new;
2520
0
}
2521
2522
0
static struct llama_sampler * llama_sampler_grammar_clone(const struct llama_sampler * smpl) {
2523
0
    const auto * ctx = (const llama_sampler_grammar *) smpl->ctx;
2524
2525
0
    auto * result = llama_sampler_init_grammar_impl(ctx->vocab, nullptr, nullptr, false, nullptr, 0, nullptr, 0, nullptr, 0);
2526
0
    GGML_ASSERT(result);
2527
2528
    // copy the state
2529
0
    {
2530
0
        auto * result_ctx = (llama_sampler_grammar *) result->ctx;
2531
2532
0
        if (ctx->grammar) {
2533
0
            result_ctx->grammar_str  = ctx->grammar_str;
2534
0
            result_ctx->grammar_root = ctx->grammar_root;
2535
2536
0
            result_ctx->grammar = llama_grammar_clone_impl(*ctx->grammar);
2537
0
        }
2538
0
    }
2539
2540
0
    return result;
2541
0
}
2542
2543
0
static void llama_sampler_grammar_free(struct llama_sampler * smpl) {
2544
0
    const auto * ctx = (llama_sampler_grammar *) smpl->ctx;
2545
2546
0
    if (ctx->grammar) {
2547
0
        llama_grammar_free_impl(ctx->grammar);
2548
0
    }
2549
2550
0
    delete ctx;
2551
0
}
2552
2553
static struct llama_sampler_i llama_sampler_grammar_i = {
2554
    /* .name              = */ llama_sampler_grammar_name,
2555
    /* .accept            = */ llama_sampler_grammar_accept_impl,
2556
    /* .apply             = */ llama_sampler_grammar_apply,
2557
    /* .reset             = */ llama_sampler_grammar_reset,
2558
    /* .clone             = */ llama_sampler_grammar_clone,
2559
    /* .free              = */ llama_sampler_grammar_free,
2560
    /* .backend_init      = */ nullptr,
2561
    /* .backend_accept    = */ nullptr,
2562
    /* .backend_apply     = */ nullptr,
2563
    /* .backend_set_input = */ nullptr,
2564
};
2565
2566
static struct llama_sampler * llama_sampler_init_grammar_impl(
2567
        const struct llama_vocab * vocab,
2568
                      const char * grammar_str,
2569
                      const char * grammar_root,
2570
                              bool lazy,
2571
                     const char ** trigger_words,
2572
                            size_t num_trigger_words,
2573
               const llama_token * trigger_tokens,
2574
                            size_t num_trigger_tokens,
2575
                     const char ** trigger_patterns,
2576
0
                            size_t num_trigger_patterns) {
2577
0
    auto * ctx = new llama_sampler_grammar;
2578
2579
0
    if (grammar_str != nullptr && grammar_str[0] != '\0') {
2580
0
        std::string trigger_pattern;
2581
0
        llama_grammar * grammar = nullptr;
2582
        // TODO: remove trigger_words support.
2583
0
        if (trigger_words != nullptr && num_trigger_words > 0) {
2584
0
            GGML_ASSERT(trigger_patterns == nullptr && num_trigger_patterns == 0);
2585
0
            trigger_pattern = "[\\s\\S]*?(";
2586
0
            for (size_t i = 0; i < num_trigger_words; ++i) {
2587
0
                static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]");
2588
0
                if (i > 0) {
2589
0
                    trigger_pattern += "|";
2590
0
                }
2591
0
                trigger_pattern += std::regex_replace(trigger_words[i], special_chars, "\\$0");
2592
0
            }
2593
0
            trigger_pattern += ")[\\s\\S]*";
2594
2595
0
            std::array<const char *, 1> tmp_trigger_patterns = { trigger_pattern.c_str() };
2596
0
            grammar = llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, tmp_trigger_patterns.data(), tmp_trigger_patterns.size(), trigger_tokens, num_trigger_tokens);
2597
0
        } else {
2598
0
            grammar = llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, trigger_patterns, num_trigger_patterns, trigger_tokens, num_trigger_tokens);
2599
0
        }
2600
0
        *ctx = {
2601
0
            /* .vocab        = */ vocab,
2602
0
            /* .grammar_str  = */ grammar_str,
2603
0
            /* .grammar_root = */ grammar_root,
2604
0
            /* .grammar      = */ grammar,
2605
0
        };
2606
0
        if (!ctx->grammar) {
2607
0
            delete ctx;
2608
0
            return nullptr;
2609
0
        }
2610
0
    } else {
2611
0
        *ctx = {
2612
0
            /* .vocab        = */ vocab,
2613
0
            /* .grammar_str  = */ {},
2614
0
            /* .grammar_root = */ {},
2615
0
            /* .grammar      = */ nullptr,
2616
0
        };
2617
0
    }
2618
2619
0
    return llama_sampler_init(
2620
0
        /* .iface = */ &llama_sampler_grammar_i,
2621
0
        /* .ctx   = */ ctx
2622
0
    );
2623
0
}
2624
2625
struct llama_sampler * llama_sampler_init_grammar(
2626
        const struct llama_vocab * vocab,
2627
                      const char * grammar_str,
2628
0
                      const char * grammar_root) {
2629
0
    return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ false, nullptr, 0, nullptr, 0, nullptr, 0);
2630
0
}
2631
2632
struct llama_sampler * llama_sampler_init_grammar_lazy(
2633
        const struct llama_vocab * vocab,
2634
                      const char * grammar_str,
2635
                      const char * grammar_root,
2636
                     const char ** trigger_words,
2637
                            size_t num_trigger_words,
2638
               const llama_token * trigger_tokens,
2639
0
                            size_t num_trigger_tokens) {
2640
0
    return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ true, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens, nullptr, 0);
2641
0
}
2642
2643
struct llama_sampler * llama_sampler_init_grammar_lazy_patterns(
2644
        const struct llama_vocab * vocab,
2645
                      const char * grammar_str,
2646
                      const char * grammar_root,
2647
                     const char ** trigger_patterns,
2648
                            size_t num_trigger_patterns,
2649
               const llama_token * trigger_tokens,
2650
0
                            size_t num_trigger_tokens) {
2651
0
    return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ true, nullptr, 0, trigger_tokens, num_trigger_tokens, trigger_patterns, num_trigger_patterns);
2652
0
}
2653
2654
// penalties
2655
2656
struct llama_sampler_penalties {
2657
    const int32_t penalty_last_n;
2658
    const float   penalty_repeat;
2659
    const float   penalty_freq;
2660
    const float   penalty_present;
2661
2662
    ring_buffer<llama_token> prev;
2663
2664
    // a frequency map to count token occurrences
2665
    std::unordered_map<llama_token, int> token_count;
2666
};
2667
2668
0
static const char * llama_sampler_penalties_name(const struct llama_sampler * /*smpl*/) {
2669
0
    return "penalties";
2670
0
}
2671
2672
0
static void llama_sampler_penalties_accept(struct llama_sampler * smpl, llama_token token) {
2673
0
    auto * ctx = (llama_sampler_penalties *) smpl->ctx;
2674
0
    if (ctx->penalty_last_n == 0) {
2675
0
        return;
2676
0
    }
2677
2678
0
    ctx->token_count[token]++;
2679
2680
    // if the ring buffer is full, remove the oldest token
2681
0
    if (ctx->prev.size() >= (size_t) ctx->penalty_last_n) {
2682
0
        const auto old = ctx->prev.front();
2683
2684
0
        ctx->token_count[old]--;
2685
0
        if (ctx->token_count[old] == 0) {
2686
0
            ctx->token_count.erase(old);
2687
0
        }
2688
0
    }
2689
2690
0
    ctx->prev.push_back(token);
2691
2692
#if 0
2693
    // sanity check
2694
    std::unordered_map<llama_token, int> tmp;
2695
    for (int i = 0; i < std::min<int>(ctx->penalty_last_n, ctx->prev.size()); ++i) {
2696
        tmp[ctx->prev.rat(i)]++;
2697
    }
2698
2699
    assert(ctx->token_count == tmp);
2700
#endif
2701
0
}
2702
2703
0
static void llama_sampler_penalties_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2704
0
    auto * ctx = (llama_sampler_penalties *) smpl->ctx;
2705
2706
0
    if ((ctx->penalty_last_n == 0) ||
2707
0
        (ctx->penalty_repeat == 1.0f && ctx->penalty_freq == 0.0f && ctx->penalty_present == 0.0f)) {
2708
0
        return;
2709
0
    }
2710
2711
    // Apply frequency and presence penalties to the cur_p
2712
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2713
0
        const auto token_iter = ctx->token_count.find(cur_p->data[i].id);
2714
0
        if (token_iter == ctx->token_count.end()) {
2715
0
            continue;
2716
0
        }
2717
2718
0
        const int count = token_iter->second;
2719
2720
0
        assert(count > 0 && count <= ctx->penalty_last_n);
2721
2722
        // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
2723
        // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
2724
0
        if (cur_p->data[i].logit <= 0) {
2725
0
            cur_p->data[i].logit *= ctx->penalty_repeat;
2726
0
        } else {
2727
0
            cur_p->data[i].logit /= ctx->penalty_repeat;
2728
0
        }
2729
2730
0
        cur_p->data[i].logit -= float(count) * ctx->penalty_freq + float(count > 0) * ctx->penalty_present;
2731
0
    }
2732
2733
0
    cur_p->sorted = false;
2734
0
}
2735
2736
0
static void llama_sampler_penalties_reset(struct llama_sampler * smpl) {
2737
0
    auto * ctx = (llama_sampler_penalties *) smpl->ctx;
2738
0
    ctx->prev.clear();
2739
0
    ctx->token_count.clear();
2740
0
}
2741
2742
0
static struct llama_sampler * llama_sampler_penalties_clone(const struct llama_sampler * smpl) {
2743
0
    const auto * ctx = (const llama_sampler_penalties *) smpl->ctx;
2744
0
    auto * result = llama_sampler_init_penalties(
2745
0
            ctx->penalty_last_n,
2746
0
            ctx->penalty_repeat,
2747
0
            ctx->penalty_freq,
2748
0
            ctx->penalty_present);
2749
2750
    // copy the state
2751
0
    {
2752
0
        auto * result_ctx = (llama_sampler_penalties *) result->ctx;
2753
2754
0
        result_ctx->prev = ctx->prev;
2755
0
    }
2756
2757
0
    return result;
2758
0
}
2759
2760
0
static void llama_sampler_penalties_free(struct llama_sampler * smpl) {
2761
0
    delete (llama_sampler_penalties *) smpl->ctx;
2762
0
}
2763
2764
static struct llama_sampler_i llama_sampler_penalties_i = {
2765
    /* .name              = */ llama_sampler_penalties_name,
2766
    /* .accept            = */ llama_sampler_penalties_accept,
2767
    /* .apply             = */ llama_sampler_penalties_apply,
2768
    /* .reset             = */ llama_sampler_penalties_reset,
2769
    /* .clone             = */ llama_sampler_penalties_clone,
2770
    /* .free              = */ llama_sampler_penalties_free,
2771
    /* .backend_init      = */ nullptr,
2772
    /* .backend_accept    = */ nullptr,
2773
    /* .backend_apply     = */ nullptr,
2774
    /* .backend_set_input = */ nullptr,
2775
};
2776
2777
struct llama_sampler * llama_sampler_init_penalties(
2778
        int32_t penalty_last_n,
2779
        float penalty_repeat,
2780
        float penalty_freq,
2781
0
        float penalty_present) {
2782
0
    penalty_last_n = std::max(penalty_last_n, 0);
2783
2784
0
    const bool is_empty = (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f));
2785
2786
0
    if (is_empty) {
2787
0
        return llama_sampler_init_empty("?penalties");
2788
0
    }
2789
2790
0
    return llama_sampler_init(
2791
0
        /* .iface = */ &llama_sampler_penalties_i,
2792
0
        /* .ctx   = */ new llama_sampler_penalties {
2793
0
            /* .penalty_last_n  = */ penalty_last_n,
2794
0
            /* .penalty_repeat  = */ penalty_repeat,
2795
0
            /* .penalty_freq    = */ penalty_freq,
2796
0
            /* .penalty_present = */ penalty_present,
2797
0
            /* .prev            = */ ring_buffer<llama_token>(penalty_last_n),
2798
0
            /* .token_count     = */ {},
2799
0
        }
2800
0
    );
2801
0
}
2802
2803
// top-n-sigma
2804
2805
struct llama_sampler_top_n_sigma {
2806
    const float n;
2807
};
2808
2809
0
static const char * llama_sampler_top_n_sigma_name(const struct llama_sampler * /*smpl*/) {
2810
0
    return "top-n-sigma";
2811
0
}
2812
2813
0
static void llama_sampler_top_n_sigma_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2814
0
    auto * ctx = (llama_sampler_top_n_sigma *) smpl->ctx;
2815
2816
0
    if (ctx->n <= 0.0f || cur_p->size <= 1) {
2817
0
        return;
2818
0
    }
2819
2820
    // find max logit and calculate mean
2821
0
    float max = cur_p->data[0].logit;
2822
0
    float logits_sum = 0;
2823
0
    size_t valid_count = 0;
2824
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2825
        // Only count non-negative infinity values
2826
0
        if (cur_p->data[i].logit != -INFINITY) {
2827
0
            max = std::max(max, cur_p->data[i].logit);
2828
0
            logits_sum += cur_p->data[i].logit;
2829
0
            valid_count++;
2830
0
        }
2831
0
    }
2832
0
    float mean = valid_count > 0 ? logits_sum/valid_count : 0;
2833
2834
    // calculate standard deviation
2835
0
    float acc = 0;
2836
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2837
        // Skip -infinity in std calculation
2838
0
        if (cur_p->data[i].logit != -INFINITY) {
2839
0
            acc += pow(cur_p->data[i].logit - mean, 2);
2840
0
        }
2841
0
    }
2842
0
    float std = valid_count > 0 ? sqrt(acc/valid_count) : 0;
2843
2844
    // apply mask
2845
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2846
0
        if (cur_p->data[i].logit < max - (ctx->n * std)) {
2847
0
            cur_p->data[i].logit = -INFINITY;
2848
0
        }
2849
0
    }
2850
2851
0
    llama_sampler_softmax_impl(cur_p, true);
2852
0
}
2853
2854
0
static struct llama_sampler * llama_sampler_top_n_sigma_clone(const struct llama_sampler * smpl) {
2855
0
    const auto * ctx = (const llama_sampler_top_n_sigma *) smpl->ctx;
2856
0
    return llama_sampler_init_top_n_sigma(ctx->n);
2857
0
}
2858
2859
0
static void llama_sampler_top_n_sigma_free(struct llama_sampler * smpl) {
2860
0
    delete (llama_sampler_top_n_sigma *) smpl->ctx;
2861
0
}
2862
2863
static struct llama_sampler_i llama_sampler_top_n_sigma_i = {
2864
    /* .name              = */ llama_sampler_top_n_sigma_name,
2865
    /* .accept            = */ nullptr,
2866
    /* .apply             = */ llama_sampler_top_n_sigma_apply,
2867
    /* .reset             = */ nullptr,
2868
    /* .clone             = */ llama_sampler_top_n_sigma_clone,
2869
    /* .free              = */ llama_sampler_top_n_sigma_free,
2870
    /* .backend_init      = */ nullptr,
2871
    /* .backend_accept    = */ nullptr,
2872
    /* .backend_apply     = */ nullptr,
2873
    /* .backend_set_input = */ nullptr,
2874
};
2875
2876
0
struct llama_sampler * llama_sampler_init_top_n_sigma(float n) {
2877
0
    const bool is_empty = (n <= 0.0f);
2878
2879
0
    if (is_empty) {
2880
0
        return llama_sampler_init_empty("?top-n-sigma");
2881
0
    }
2882
2883
0
    return llama_sampler_init(
2884
0
        /* .iface = */ &llama_sampler_top_n_sigma_i,
2885
0
        /* .ctx   = */ new llama_sampler_top_n_sigma {
2886
0
            /* .n = */ n,
2887
0
        }
2888
0
    );
2889
0
}
2890
2891
// DRY
2892
2893
struct llama_sampler_dry {
2894
    int32_t total_context_size;
2895
2896
    const float   dry_multiplier;
2897
    const float   dry_base;
2898
    const int32_t dry_allowed_length;
2899
    const int32_t dry_penalty_last_n;
2900
2901
    std::unordered_multimap<llama_token, std::vector<llama_token>> dry_processed_breakers;
2902
    std::vector<int> dry_repeat_count;
2903
    std::unordered_map<llama_token, int> dry_max_token_repeat;
2904
    ring_buffer<llama_token> last_tokens;
2905
};
2906
2907
// Ported from Koboldcpp, original PR: https://github.com/LostRuins/koboldcpp/pull/982 (Original author: pi6am)
2908
0
static void get_overlapping_token_sequences(const llama_vocab & vocab, const std::string& str, std::unordered_multimap<llama_token, std::vector<llama_token>>& token_sequences, int max_tail_len = -1) {
2909
0
    for (llama_token token_id = 0; token_id < (llama_token) vocab.n_tokens(); token_id++) {
2910
0
        std::string word = vocab.detokenize({token_id}, true);
2911
0
        if (word.find(str) != std::string::npos) {
2912
0
            token_sequences.emplace(token_id, std::vector<llama_token>());
2913
0
        } else {
2914
0
            size_t word_len = word.size();
2915
0
            size_t str_len = str.size();
2916
0
            size_t pos = -1;
2917
0
            while ((pos = word.find(str[0], pos + 1)) != std::string::npos) {
2918
0
                bool match = true;
2919
0
                size_t i;
2920
0
                for (i = 1; i < str_len && i + pos < word_len; ++i) {
2921
0
                    if (word[pos + i] != str[i]) {
2922
0
                        match = false;
2923
0
                        break;
2924
0
                    }
2925
0
                }
2926
0
                if (match) {
2927
0
                    std::vector<llama_token> tokenization = vocab.tokenize(str.substr(i), false, false);
2928
0
                    if (max_tail_len >= 0 && tokenization.size() > (size_t)max_tail_len) {
2929
0
                        tokenization.resize(max_tail_len);
2930
0
                    }
2931
2932
                    // Ensure we don't already have a duplicate matching tokenization
2933
0
                    auto its = token_sequences.equal_range(token_id);
2934
0
                    bool found = false;
2935
0
                    for (auto it = its.first; it != its.second; ++it) {
2936
0
                        if (tokenization == it->second) {
2937
0
                            found = true;
2938
0
                            break;
2939
0
                        }
2940
0
                    }
2941
0
                    if (!found) {
2942
0
                        token_sequences.emplace(token_id, tokenization);
2943
0
                    }
2944
0
                }
2945
0
            }
2946
0
        }
2947
0
    }
2948
0
}
2949
2950
0
static const char * llama_sampler_dry_name(const struct llama_sampler * /*smpl*/) {
2951
0
    return "dry";
2952
0
}
2953
2954
0
static void llama_sampler_dry_accept(struct llama_sampler * smpl, llama_token token) {
2955
0
    auto * ctx = (llama_sampler_dry *) smpl->ctx;
2956
0
    if (ctx->dry_multiplier == 0.0f || ctx->dry_base < 1.0f || ctx->dry_penalty_last_n == 0) {
2957
0
        return;
2958
0
    }
2959
2960
0
    ctx->last_tokens.push_back(token);
2961
0
}
2962
2963
// Ported from Koboldcpp, original PR: https://github.com/LostRuins/koboldcpp/pull/982 (Original author: pi6am)
2964
0
static void llama_sampler_dry_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2965
0
    auto * ctx = (llama_sampler_dry *) smpl->ctx;
2966
2967
0
    if (ctx->dry_multiplier == 0.0f || ctx->dry_base < 1.0f || ctx->dry_penalty_last_n == 0) {
2968
0
        return;
2969
0
    }
2970
2971
0
    int32_t effective_dry_penalty_last_n = (ctx->dry_penalty_last_n == -1) ? ctx->total_context_size : std::max(ctx->dry_penalty_last_n, 0);
2972
0
    int last_n_repeat = std::min(std::min((int)ctx->last_tokens.size(), effective_dry_penalty_last_n), ctx->total_context_size);
2973
2974
0
    if (last_n_repeat <= ctx->dry_allowed_length) {
2975
0
        return;
2976
0
    }
2977
2978
0
    ctx->dry_repeat_count.assign(last_n_repeat, 0);
2979
0
    ctx->dry_max_token_repeat.clear();
2980
2981
    // Step 1: Look for restart sequences to limit the maximum repetition length.
2982
    // Work backwards through the context looking for any token that begins a restart sequence.
2983
    //
2984
    // The collection `restart_sequences` is a mapping from a "head" token to all "tail"
2985
    // sequences that together comprise a restart sequence. This allows us to quickly check
2986
    // whether each token is the head of a complete sequence. Most restart sequences are actually
2987
    // a single token, and for these the "tail" is an empty vector.
2988
    //
2989
    // If the token is a "head", test all restart sequences that begin with this token
2990
    // (there will often only be one sequence for each token, but if sequences like 'aaaq1' and
2991
    // 'aaa1' are used as restart strings, both could start with 'aaa' when tokenized). The
2992
    // longest matching sequence (if any) is used to limit the maximum repetition length.
2993
    //
2994
    // Note that in the case case of a short sequence contained in a longer one, this might fail to
2995
    // find the smallest value for `rep_limit`. For example, if 'amniotic' and 'ni' are both used as
2996
    // restart sequences, 'ni' will be found first, and since it's shorter it will fail to suppress
2997
    // 'otic'. This is a minor issue since fully contained restart sequences are likely to be rare.
2998
    //
2999
    // This is theoretically worst-case O(N^2) for arbitrary restart sequences, which is why we
3000
    // have already clamped the maximum tail sequence length when generating `restart_sequences`.
3001
    // With clamping, this scan is O(N) in the context length.
3002
3003
0
    int rep_limit = last_n_repeat;
3004
0
    for (int i = 0; i < last_n_repeat; ++i) {
3005
0
        llama_token token = ctx->last_tokens.rat(i);
3006
0
        auto its = ctx->dry_processed_breakers.equal_range(token);
3007
0
        if (its.first == ctx->dry_processed_breakers.end()) {
3008
0
            continue;
3009
0
        }
3010
0
        int longest_match = -1;
3011
0
        for (auto it = its.first; it != its.second; ++it) {
3012
            // Note that (*it) does not contain the head character, so seq_len will be
3013
            // the restart sequence length minus 1.
3014
            // In the common case of a single-token restart sequence, (*it) will be empty
3015
            // and we will trivially match.
3016
0
            int seq_len = (int)it->second.size();
3017
0
            if (seq_len > longest_match && seq_len <= (int)i) {
3018
0
                bool match = true;
3019
0
                for (int offset = 0; offset < seq_len; ++offset) {
3020
                    // The -1 when indexing `last_tokens` is because we already matched the head.
3021
0
                    if (it->second[offset] != ctx->last_tokens.rat(i - offset - 1)) {
3022
0
                        match = false;
3023
0
                        break;
3024
0
                    }
3025
0
                }
3026
0
                if (match) {
3027
0
                    longest_match = seq_len;
3028
0
                }
3029
0
            }
3030
0
        }
3031
0
        if (longest_match >= 0) {
3032
            // We found a restart sequence starting `i` tokens from the end and continuing for
3033
            // `longest_match` tokens.
3034
0
            rep_limit = i - longest_match;
3035
0
            break;
3036
0
        }
3037
0
    }
3038
0
    if (rep_limit < ctx->dry_allowed_length) {
3039
0
        return;
3040
0
    }
3041
3042
    // Step 2: Iterate in reverse over the last N tokens of the context, using the "Z-algorithm" (in
3043
    // the reverse direction) to efficiently compute the positions and lengths of suffixes appearing
3044
    // elsewhere in the context. We limit the suffix length to `rep_limit` to respect restart sequences.
3045
    //
3046
    // This algorithm is not currently documented on Wikipedia, but there is a clear description here:
3047
    // https://ivanyu.me/blog/2014/10/15/z-algorithm/
3048
    //
3049
    // The code below is adapted from the public domain implementation by the same author here:
3050
    // https://github.com/ivanyu/string-algorithms/blob/master/z_algorithm.py
3051
    //
3052
    // Example:
3053
    // Last N tokens: a b c c b c y a b c
3054
    // Repeat counts: 0 0 3 1 0 2 0 0 0 0
3055
    //                    ^
3056
    //   This `3` means that the last three tokens of the context (a b c) also appear here.
3057
    //
3058
    // This step is worst case O(N) since the Z-algorithm is linear, despite the appearance of nested
3059
    // for/while loops. This can be seen by observing that the `lt` and `rt` bounds are set after each
3060
    // repeated suffix is detected (i.e. after each while loop when n > 0). These bound variables
3061
    // ensure that the inner while loops only examine each token in the context once as the outer
3062
    // for loop iterates over the context.
3063
3064
0
    {
3065
0
        const int last = last_n_repeat - 1;
3066
3067
0
        int rt = 0;
3068
0
        int lt = 0;
3069
3070
0
        for (int k = 1; k < last_n_repeat; ++k) {
3071
0
            if (k > rt) {
3072
                // If k is outside the current Z-box, do naive computation.
3073
0
                int n = 0;
3074
0
                while (n + k < last_n_repeat && ctx->last_tokens.rat(n) == ctx->last_tokens.rat(n+k)) {
3075
0
                    ++n;
3076
0
                }
3077
0
                ctx->dry_repeat_count[last - k] = std::min(n, rep_limit);
3078
0
                if (n > 0) {
3079
0
                    lt = k;
3080
0
                    rt = k + n - 1;
3081
0
                }
3082
0
            } else {
3083
                // If k is inside the current Z-box, consider two cases.
3084
3085
0
                int p = k - lt; // Pair index.
3086
0
                int right_part_len = rt - k + 1;
3087
3088
0
                if (ctx->dry_repeat_count[last - p] < right_part_len) {
3089
0
                    int n = std::min(ctx->dry_repeat_count[last - p], rep_limit);
3090
0
                    ctx->dry_repeat_count[last - k] = n;
3091
0
                } else {
3092
0
                    int i = rt + 1;
3093
0
                    while (i < last_n_repeat && ctx->last_tokens.rat(i) == ctx->last_tokens.rat(i - k)) {
3094
0
                        i += 1;
3095
0
                    }
3096
3097
0
                    int n = std::min(i - k, rep_limit);
3098
0
                    ctx->dry_repeat_count[last - k] = n;
3099
0
                    lt = k;
3100
0
                    rt = i - 1;
3101
0
                }
3102
0
            }
3103
0
        }
3104
0
    }
3105
3106
    // Step 3: Iterate over dry_repeat_count and last_tokens, examining the maximum repeat length
3107
    // that would be generated by emitting each new token that would extend a sequence.
3108
    //
3109
    // Following the same example as above:
3110
    // Last N tokens: a b c c b c y a b c
3111
    // Repeat counts: 0 0 3 1 0 2 0 0 0 0
3112
    //
3113
    // For each non-zero, look ahead one token. This token, if emitted, would extend the repetition.
3114
    // c: 3 -> 4 (from `a b c` to `a b c c`)
3115
    // b: 1 -> 2 (from `c` to `c b`)
3116
    // y: 2 -> 3 (from `b c` to `b c y`)
3117
3118
0
    for (int i = 0; i < last_n_repeat - 1; ++i) {
3119
0
        int repeat_len = ctx->dry_repeat_count[i];
3120
0
        if (repeat_len >= ctx->dry_allowed_length) {
3121
            // This token ends a repeat, so the next token would continue one.
3122
            // By convention, the value of `repeat_len` only includes the tokens currently
3123
            // in the context, not the new token that would be added.
3124
0
            llama_token token = ctx->last_tokens.rat(last_n_repeat - 2 - i);
3125
            // Track the maximum sequence ending in this token.
3126
0
            const auto& it = ctx->dry_max_token_repeat.find(token);
3127
0
            if (it == ctx->dry_max_token_repeat.end() || it->second < repeat_len) {
3128
0
                ctx->dry_max_token_repeat[token] = repeat_len;
3129
0
            }
3130
0
        }
3131
0
    }
3132
3133
    // Step 4: Apply logit penalties based on the maximum repeat length for relevant tokens.
3134
3135
    // Prevent floating point overflow in `pow(penalty_base, exponent)` by clamping to `max_exponent`.
3136
    // Compute it from `penalty_base` and the approximate log of `std::numeric_limits<float>::max()`
3137
0
    const float FLOAT_MAX_LOG = 88.7228391f;
3138
0
    int max_exponent = 0;
3139
0
    if (ctx->dry_base > 1.000001f) {
3140
0
        max_exponent = FLOAT_MAX_LOG / std::log(ctx->dry_base);
3141
0
    }
3142
3143
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3144
0
        const auto& af_kvp = ctx->dry_max_token_repeat.find(cur_p->data[i].id);
3145
0
        if (af_kvp != ctx->dry_max_token_repeat.end()) {
3146
            // Check all sequence breakers starting with this token
3147
0
            auto range = ctx->dry_processed_breakers.equal_range(cur_p->data[i].id);
3148
0
            bool is_single_token_breaker = false;
3149
3150
0
            for (auto it = range.first; it != range.second; ++it) {
3151
0
                if (it->second.empty()) {
3152
0
                    is_single_token_breaker = true;
3153
0
                    break;
3154
0
                }
3155
0
            }
3156
3157
            // Apply penalty only if it's not a single-token sequence breaker
3158
0
            if (!is_single_token_breaker) {
3159
0
                int repeat_exp = af_kvp->second - ctx->dry_allowed_length;
3160
0
                if (max_exponent > 0 && repeat_exp > max_exponent) {
3161
0
                    repeat_exp = max_exponent;
3162
0
                }
3163
0
                float penalty = ctx->dry_multiplier * std::pow(ctx->dry_base, repeat_exp);
3164
0
                cur_p->data[i].logit -= penalty;
3165
0
            }
3166
0
        }
3167
0
    }
3168
3169
0
    cur_p->sorted = false;
3170
0
}
3171
3172
0
static void llama_sampler_dry_reset(struct llama_sampler * smpl) {
3173
0
    auto * ctx = (llama_sampler_dry *) smpl->ctx;
3174
0
    ctx->last_tokens.clear();
3175
0
    ctx->dry_repeat_count.clear();
3176
0
    ctx->dry_max_token_repeat.clear();
3177
0
}
3178
3179
0
static struct llama_sampler * llama_sampler_dry_clone(const struct llama_sampler * smpl) {
3180
0
    const auto * ctx = (llama_sampler_dry *) smpl->ctx;
3181
3182
0
    llama_vocab dummy_vocab;
3183
3184
    // dummy vocab is passed because it is only needed for raw sequence breaker processing, which we have already done and will simply be copying
3185
0
    auto * result = llama_sampler_init_dry(&dummy_vocab, ctx->total_context_size, ctx->dry_multiplier, ctx->dry_base, ctx->dry_allowed_length, ctx->dry_penalty_last_n, NULL, 0);
3186
3187
    // Copy the state, including the processed breakers
3188
0
    {
3189
0
        auto * result_ctx = (llama_sampler_dry *) result->ctx;
3190
0
        result_ctx->dry_processed_breakers = ctx->dry_processed_breakers;
3191
0
        result_ctx->dry_repeat_count = ctx->dry_repeat_count;
3192
0
        result_ctx->dry_max_token_repeat = ctx->dry_max_token_repeat;
3193
0
        result_ctx->last_tokens = ctx->last_tokens;
3194
0
    }
3195
3196
0
    return result;
3197
0
}
3198
3199
0
static void llama_sampler_dry_free(struct llama_sampler * smpl) {
3200
0
    delete (llama_sampler_dry *) smpl->ctx;
3201
0
}
3202
3203
static struct llama_sampler_i llama_sampler_dry_i = {
3204
    /* .name              = */ llama_sampler_dry_name,
3205
    /* .accept            = */ llama_sampler_dry_accept,
3206
    /* .apply             = */ llama_sampler_dry_apply,
3207
    /* .reset             = */ llama_sampler_dry_reset,
3208
    /* .clone             = */ llama_sampler_dry_clone,
3209
    /* .free              = */ llama_sampler_dry_free,
3210
    /* .backend_init      = */ nullptr,
3211
    /* .backend_accept    = */ nullptr,
3212
    /* .backend_apply     = */ nullptr,
3213
    /* .backend_set_input = */ nullptr,
3214
};
3215
3216
0
struct llama_sampler * llama_sampler_init_dry(const struct llama_vocab * vocab, int32_t n_ctx_train, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const char** seq_breakers, size_t num_breakers) {
3217
0
    int32_t effective_dry_penalty_last_n = (dry_penalty_last_n == -1) ? n_ctx_train : std::max(dry_penalty_last_n, 0);
3218
0
    std::unordered_multimap<llama_token, std::vector<llama_token>> processed_breakers;
3219
0
    const int MAX_CHAR_LEN = 40;
3220
0
    const int MAX_SEQ_LEN = 20;
3221
3222
0
    const bool dry_enabled = (dry_multiplier != 0.0f && dry_base >= 1.0f && dry_penalty_last_n != 0);
3223
3224
0
    if (!dry_enabled) {
3225
0
        return llama_sampler_init_empty("?dry");
3226
0
    }
3227
3228
0
    if (dry_enabled && seq_breakers != nullptr && num_breakers > 0) {
3229
        // Process sequence breakers
3230
0
        for (size_t i = 0; i < num_breakers; ++i) {
3231
0
            if (seq_breakers[i] == nullptr || std::strlen(seq_breakers[i]) == 0) {
3232
0
                LLAMA_LOG_WARN("skipping null or empty DRY sequence breaker at index %zu\n", i);
3233
0
                continue;
3234
0
            }
3235
3236
0
            std::string sequence_break(seq_breakers[i]);
3237
0
            if (sequence_break.empty()) {
3238
0
                LLAMA_LOG_WARN("skipping empty DRY sequence breaker\n");
3239
0
                continue;
3240
0
            }
3241
3242
0
            if (sequence_break.size() > MAX_CHAR_LEN) {
3243
0
                LLAMA_LOG_WARN("truncating DRY sequence breaker to %d characters\n", MAX_CHAR_LEN);
3244
0
                sequence_break.resize(MAX_CHAR_LEN);
3245
0
            }
3246
3247
0
            get_overlapping_token_sequences(*vocab, sequence_break, processed_breakers, MAX_SEQ_LEN);
3248
0
        }
3249
0
    }
3250
3251
0
    return llama_sampler_init(
3252
0
        /* .iface = */ &llama_sampler_dry_i,
3253
0
        /* .ctx   = */ new llama_sampler_dry {
3254
0
            /* .total_context_size     = */ n_ctx_train,
3255
0
            /* .dry_multiplier         = */ dry_multiplier,
3256
0
            /* .dry_base               = */ dry_base,
3257
0
            /* .dry_allowed_length     = */ dry_allowed_length,
3258
0
            /* .dry_penalty_last_n     = */ dry_penalty_last_n,
3259
0
            /* .dry_processed_breakers = */ std::move(processed_breakers),
3260
0
            /* .dry_repeat_count       = */ dry_enabled ? std::vector<int>(effective_dry_penalty_last_n, 0) : std::vector<int>{},
3261
0
            /* .dry_max_token_repeat   = */ {},
3262
0
            /* .last_tokens            = */ dry_enabled ? ring_buffer<llama_token>(effective_dry_penalty_last_n) : ring_buffer<llama_token>(0),
3263
0
        }
3264
0
    );
3265
0
}
3266
3267
// wrapper for test-sampling.cpp
3268
0
struct llama_sampler * llama_sampler_init_dry_testing(int32_t context_size, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const std::vector<std::vector<llama_token>>& seq_breakers) {
3269
0
    llama_vocab dummy_vocab;
3270
0
    auto * result = llama_sampler_init_dry(&dummy_vocab, context_size, dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n, NULL, 0);
3271
0
    auto * ctx = (llama_sampler_dry *) result->ctx;
3272
3273
    // Process the token-based sequence breakers
3274
0
    ctx->dry_processed_breakers.clear();
3275
0
    if (seq_breakers.empty()) {
3276
0
        LLAMA_LOG_WARN("empty DRY sequence breakers list in llama_sampler_init_dry_testing\n");
3277
0
    } else {
3278
0
        for (const auto& breaker : seq_breakers) {
3279
0
            if (breaker.empty()) {
3280
0
                LLAMA_LOG_WARN("skipping DRY empty sequence breaker\n");
3281
0
                continue;
3282
0
            }
3283
0
            llama_token head_token = breaker[0];
3284
0
            std::vector<llama_token> tail_tokens(breaker.begin() + 1, breaker.end());
3285
0
            ctx->dry_processed_breakers.emplace(head_token, std::move(tail_tokens));
3286
0
        }
3287
3288
0
        if (ctx->dry_processed_breakers.empty()) {
3289
0
            LLAMA_LOG_WARN("no valid DRY sequence breakers processed in llama_sampler_init_dry_testing\n");
3290
0
        }
3291
0
    }
3292
3293
0
    return result;
3294
0
}
3295
3296
// logit-bias
3297
3298
struct llama_sampler_logit_bias : public llama_sampler_backend {
3299
    const int32_t n_vocab;
3300
3301
    const std::vector<llama_logit_bias> logit_bias;
3302
3303
    std::vector<llama_logit_bias> to_search;
3304
3305
    struct ggml_tensor * inp_logit_bias;
3306
    struct ggml_tensor * inp_logit_idxs;
3307
3308
    ggml_context_ptr        inp_ctx;
3309
    ggml_backend_buffer_ptr inp_buf;
3310
};
3311
3312
0
static const char * llama_sampler_logit_bias_name(const struct llama_sampler * smpl) {
3313
0
    auto * ctx = (llama_sampler_logit_bias *) smpl->ctx;
3314
0
    return ctx->get_name();
3315
0
}
3316
3317
0
static void llama_sampler_logit_bias_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
3318
0
    auto * ctx = (llama_sampler_logit_bias *) smpl->ctx;
3319
3320
0
    if (ctx->logit_bias.empty()) {
3321
0
        return;
3322
0
    }
3323
3324
0
    ctx->to_search.clear();
3325
3326
    // update the candidates that have not been shuffled in the vocabulary (i.e. idx == id)
3327
0
    for (const auto & lb : ctx->logit_bias) {
3328
0
        if (lb.token >= 0 && cur_p->size > (size_t) lb.token && cur_p->data[lb.token].id == lb.token) {
3329
0
            cur_p->data[lb.token].logit += lb.bias;
3330
0
        } else {
3331
0
            ctx->to_search.push_back(lb);
3332
0
        }
3333
0
    }
3334
3335
0
    if (ctx->to_search.empty()) {
3336
0
        return;
3337
0
    }
3338
3339
    // search for the remaining candidates that were not found in the previous step
3340
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3341
0
        for (const auto & lb : ctx->to_search) {
3342
0
            if (cur_p->data[i].id == lb.token) {
3343
0
                cur_p->data[i].logit += lb.bias;
3344
0
                break;
3345
0
            }
3346
0
        }
3347
0
    }
3348
0
}
3349
3350
0
static struct llama_sampler * llama_sampler_logit_bias_clone(const struct llama_sampler * smpl) {
3351
0
    const auto * ctx = (const llama_sampler_logit_bias *) smpl->ctx;
3352
0
    return llama_sampler_init_logit_bias(ctx->n_vocab, ctx->logit_bias.size(), ctx->logit_bias.data());
3353
0
}
3354
3355
0
static void llama_sampler_logit_bias_free(struct llama_sampler * smpl) {
3356
0
    delete (llama_sampler_logit_bias *) smpl->ctx;
3357
0
}
3358
3359
static void llama_sampler_logit_bias_backend_apply(
3360
        struct llama_sampler      * smpl,
3361
        struct ggml_context       * ctx,
3362
        struct ggml_cgraph        * gf,
3363
0
        struct llama_sampler_data * data) {
3364
0
    GGML_UNUSED(gf);
3365
0
    GGML_UNUSED(ctx);
3366
3367
0
    auto * sctx = (llama_sampler_logit_bias *) smpl->ctx;
3368
0
    if (sctx->logit_bias.empty()) {
3369
0
        return;
3370
0
    }
3371
3372
0
    ggml_tensor * cur = ggml_fill(ctx, data->logits, 0.0f);
3373
3374
0
    cur = ggml_reshape_2d(ctx, cur, 1, ggml_nelements(cur));
3375
0
    cur = ggml_set_rows(ctx, cur, sctx->inp_logit_bias, sctx->inp_logit_idxs);
3376
0
    cur = ggml_reshape_1d(ctx, cur, ggml_nelements(cur));
3377
3378
0
    data->logits = ggml_add(ctx, data->logits, cur);
3379
0
}
3380
3381
0
static void llama_sampler_logit_bias_backend_set_input(struct llama_sampler * smpl) {
3382
0
    auto * sctx = (llama_sampler_logit_bias *) smpl->ctx;
3383
0
    if (sctx->logit_bias.empty()) {
3384
0
        return;
3385
0
    }
3386
3387
0
    GGML_ASSERT(sctx->inp_logit_bias != nullptr);
3388
0
    GGML_ASSERT(sctx->inp_logit_idxs != nullptr);
3389
3390
0
    const size_t n = sctx->logit_bias.size();
3391
3392
0
    std::vector<float>   data_logit_bias(n, 0.0f);
3393
0
    std::vector<int32_t> data_logit_idxs(n, 0);
3394
0
    for (size_t i = 0; i < n; ++i) {
3395
0
        const auto & lb = sctx->logit_bias[i];
3396
0
        GGML_ASSERT(lb.token >= 0 && lb.token < (int32_t) sctx->n_vocab);
3397
0
        data_logit_bias[i] = lb.bias;
3398
0
        data_logit_idxs[i] = lb.token;
3399
0
    }
3400
3401
0
    ggml_backend_tensor_set(sctx->inp_logit_bias, data_logit_bias.data(), 0, ggml_nbytes(sctx->inp_logit_bias));
3402
0
    ggml_backend_tensor_set(sctx->inp_logit_idxs, data_logit_idxs.data(), 0, ggml_nbytes(sctx->inp_logit_idxs));
3403
0
}
3404
3405
static bool llama_sampler_logit_bias_backend_init(
3406
        struct llama_sampler       * smpl,
3407
0
        ggml_backend_buffer_type_t   buft) {
3408
0
    auto * sctx = (llama_sampler_logit_bias *) smpl->ctx;
3409
3410
0
    sctx->init(true);
3411
3412
0
    if (sctx->logit_bias.empty()) {
3413
0
        return true;
3414
0
    }
3415
3416
0
    ggml_init_params params = {
3417
0
        /*.mem_size   =*/ 2*ggml_tensor_overhead(),
3418
0
        /*.mem_buffer =*/ nullptr,
3419
0
        /*.no_alloc   =*/ true,
3420
0
    };
3421
3422
0
    sctx->inp_ctx.reset(ggml_init(params));
3423
3424
0
    const size_t n = sctx->logit_bias.size();
3425
3426
0
    sctx->inp_logit_bias = ggml_new_tensor_2d(sctx->inp_ctx.get(), GGML_TYPE_F32, 1, n);
3427
0
    ggml_set_name(sctx->inp_logit_bias, "logit_bias");
3428
0
    ggml_set_input(sctx->inp_logit_bias);
3429
3430
0
    sctx->inp_logit_idxs = ggml_new_tensor_1d(sctx->inp_ctx.get(), GGML_TYPE_I32, n);
3431
0
    ggml_set_name(sctx->inp_logit_idxs, "logit_idxs");
3432
0
    ggml_set_input(sctx->inp_logit_idxs);
3433
3434
    // Allocate all tensors from our context to the backend
3435
0
    sctx->inp_buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(sctx->inp_ctx.get(), buft));
3436
3437
0
    ggml_backend_buffer_clear(sctx->inp_buf.get(), 0);
3438
3439
0
    return true;
3440
0
}
3441
3442
static struct llama_sampler_i llama_sampler_logit_bias_i = {
3443
    /* .name              = */ llama_sampler_logit_bias_name,
3444
    /* .accept            = */ nullptr,
3445
    /* .apply             = */ llama_sampler_logit_bias_apply,
3446
    /* .reset             = */ nullptr,
3447
    /* .clone             = */ llama_sampler_logit_bias_clone,
3448
    /* .free              = */ llama_sampler_logit_bias_free,
3449
    /* .backend_init      = */ llama_sampler_logit_bias_backend_init,
3450
    /* .backend_accept    = */ nullptr,
3451
    /* .backend_apply     = */ llama_sampler_logit_bias_backend_apply,
3452
    /* .backend_set_input = */ llama_sampler_logit_bias_backend_set_input,
3453
};
3454
3455
struct llama_sampler * llama_sampler_init_logit_bias(
3456
                         int32_t   n_vocab,
3457
                         int32_t   n_logit_bias,
3458
0
          const llama_logit_bias * logit_bias) {
3459
0
    const bool is_empty = n_logit_bias <= 0;
3460
3461
0
    if (is_empty) {
3462
0
        return llama_sampler_init_empty("?logit-bias");
3463
0
    }
3464
3465
0
    return llama_sampler_init(
3466
0
        /* .iface = */ &llama_sampler_logit_bias_i,
3467
0
        /* .ctx   = */ new llama_sampler_logit_bias {
3468
0
            ("logit-bias"),
3469
0
            /* .n_vocab        = */ n_vocab,
3470
0
            /* .logit_bias     = */ std::vector<llama_logit_bias>(logit_bias, logit_bias + n_logit_bias),
3471
0
            /* .to_search      = */ {},
3472
0
            /* .inp_logit_bias = */ nullptr,
3473
0
            /* .inp_logit_idxs = */ nullptr,
3474
0
            /* .inp_ctx        = */ nullptr,
3475
0
            /* .inp_buf        = */ nullptr,
3476
0
        }
3477
0
    );
3478
0
}
3479
3480
// infill
3481
3482
//#define GGML_DEBUG_SAMPLER_INFILL
3483
3484
struct llama_sampler_infill {
3485
    const struct llama_vocab * vocab;
3486
3487
    std::vector<char> buf0;
3488
    std::vector<char> buf1;
3489
};
3490
3491
0
static const char * llama_sampler_infill_name(const struct llama_sampler * /*smpl*/) {
3492
0
    return "infill";
3493
0
}
3494
3495
0
static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
3496
0
    auto * ctx = (llama_sampler_infill *) smpl->ctx;
3497
3498
0
    llama_sampler_softmax_impl(cur_p, true);
3499
3500
#if defined(GGML_DEBUG_SAMPLER_INFILL)
3501
#define LOG_DBG_CUR LLAMA_LOG_DEBUG
3502
#else
3503
0
#define LOG_DBG_CUR(...)
3504
0
#endif
3505
3506
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3507
0
        LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit);
3508
0
    }
3509
3510
0
    float p_txt_sum = 0.0f;
3511
0
    float p_eog_sum = 0.0f;
3512
3513
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3514
0
        if (ctx->vocab->is_eog(cur_p->data[i].id)) {
3515
0
            p_eog_sum += cur_p->data[i].p;
3516
0
        } else {
3517
0
            p_txt_sum += cur_p->data[i].p;
3518
0
        }
3519
0
    }
3520
3521
0
    const float rat = p_eog_sum == 0.0 ? INFINITY : p_txt_sum / p_eog_sum; GGML_UNUSED(rat);
3522
3523
0
    LOG_DBG_CUR("%s: p_txt_sum = %.2f, p_eog_sum = %.2f, rat = %.2f, n = %zu\n", __func__, p_txt_sum, p_eog_sum, rat, cur_p->size);
3524
3525
0
    if (3*p_eog_sum*cur_p->size > p_txt_sum) {
3526
0
        LOG_DBG_CUR("%s: the ratio p_txt/p_eog = %.2f is too low -> sampling EOG\n", __func__, p_txt_sum/p_eog_sum);
3527
3528
        // keep just the EOG tokens
3529
0
        const auto size_org = cur_p->size;
3530
3531
0
        cur_p->size = 0;
3532
3533
0
        float p_sum = 0.0f;
3534
3535
0
        for (size_t i = 0; i < size_org; ++i) {
3536
0
            if (ctx->vocab->is_eog(cur_p->data[i].id)) {
3537
0
                p_sum += cur_p->data[i].p;
3538
3539
0
                cur_p->data[cur_p->size++] = cur_p->data[i];
3540
0
            }
3541
0
        }
3542
3543
        // normalize probs
3544
0
        for (size_t i = 0; i < cur_p->size; ++i) {
3545
0
            cur_p->data[i].p /= p_sum;
3546
0
        }
3547
3548
0
        return;
3549
0
    }
3550
3551
0
    size_t n_combined = 0; GGML_UNUSED(n_combined);
3552
3553
    // combine tokens with common prefix
3554
0
    for (size_t i0 = 0; i0 < cur_p->size; ++i0) {
3555
0
        for (size_t i1 = 0; i1 < cur_p->size; ++i1) {
3556
0
            if (cur_p->data[i0].logit == -INFINITY) {
3557
0
                break;
3558
0
            }
3559
3560
0
            if (i0 == i1 || cur_p->data[i1].logit == -INFINITY) {
3561
0
                continue;
3562
0
            }
3563
3564
0
            int len0 = ctx->vocab->token_to_piece(cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false);
3565
0
            if (len0 < 0) {
3566
0
                ctx->buf0.resize(len0);
3567
0
                len0 = ctx->vocab->token_to_piece(cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false);
3568
0
                assert(len0 > 0);
3569
0
            }
3570
3571
0
            int len1 = ctx->vocab->token_to_piece(cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false);
3572
0
            if (len1 < 0) {
3573
0
                ctx->buf1.resize(len1);
3574
0
                len1 = ctx->vocab->token_to_piece(cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false);
3575
0
                assert(len1 > 0);
3576
0
            }
3577
3578
            // token i0 is a prefix of token i1
3579
0
            if (len0 > 0 && len0 <= len1 && memcmp(ctx->buf0.data(), ctx->buf1.data(), len0) == 0) {
3580
0
                int dst = i0;
3581
0
                int src = i1;
3582
3583
                // merge into the token with higher probability
3584
0
                if (cur_p->data[i1].p > cur_p->data[i0].p) {
3585
0
                    std::swap(dst, src);
3586
0
                }
3587
3588
0
                cur_p->data[dst].p += cur_p->data[src].p;
3589
0
                cur_p->data[src].logit = -INFINITY;
3590
0
                cur_p->data[src].p     = 0.0f;
3591
3592
0
                n_combined++;
3593
0
            }
3594
0
        }
3595
0
    }
3596
3597
0
    size_t n_non_eog = 0;
3598
3599
0
    size_t size_org = cur_p->size;
3600
3601
0
    float p_sum = 0.0f;
3602
0
    float thold = 0.2f;
3603
3604
0
    cur_p->size = 0;
3605
3606
0
    LOG_DBG_CUR("%s: n_combined = %zu, applying thold = %.3f\n", __func__, n_combined, thold);
3607
3608
0
    for (size_t i = 0; i < size_org; ++i) {
3609
0
        const bool is_eog = ctx->vocab->is_eog(cur_p->data[i].id);
3610
3611
0
        if (cur_p->data[i].p < thold && !is_eog) {
3612
0
            continue;
3613
0
        }
3614
3615
0
        if (!is_eog) {
3616
0
            ++n_non_eog;
3617
0
        }
3618
3619
0
        p_sum += cur_p->data[i].p;
3620
3621
        // keep this token
3622
0
        cur_p->data[cur_p->size++] = cur_p->data[i];
3623
0
    }
3624
3625
0
    LOG_DBG_CUR("%s: n_non_eog = %zu\n", __func__, n_non_eog);
3626
3627
    // if no non-EOG tokens are left -> reduce cur_p to single EOT token
3628
0
    if (n_non_eog == 0) {
3629
0
        cur_p->size = 1;
3630
0
        cur_p->data[0].id = ctx->vocab->token_eot();
3631
0
        if (cur_p->data[0].id == LLAMA_TOKEN_NULL) {
3632
0
            cur_p->data[0].id = ctx->vocab->token_eos();
3633
0
        }
3634
0
        cur_p->data[0].logit = 1.0f;
3635
3636
0
        GGML_ASSERT(cur_p->data[0].id != LLAMA_TOKEN_NULL);
3637
3638
0
        return;
3639
0
    }
3640
3641
    // normalize probs
3642
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3643
0
        cur_p->data[i].p /= p_sum;
3644
3645
0
        LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit);
3646
0
    }
3647
3648
0
    size_org = cur_p->size;
3649
0
    p_sum = 0.0f;
3650
0
    thold = 1.0/(n_non_eog + 1);
3651
3652
0
    cur_p->size = 0;
3653
3654
0
    LOG_DBG_CUR("%s: applying thold = %.3f\n", __func__, thold);
3655
3656
0
    for (size_t i = 0; i < size_org; ++i) {
3657
0
        const bool is_eog = ctx->vocab->is_eog(cur_p->data[i].id);
3658
3659
0
        if (cur_p->data[i].p < thold && !is_eog) {
3660
0
            continue;
3661
0
        }
3662
3663
0
        p_sum += cur_p->data[i].p;
3664
3665
0
        cur_p->data[cur_p->size++] = cur_p->data[i];
3666
0
    }
3667
3668
    // normalize probs
3669
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3670
0
        cur_p->data[i].p /= p_sum;
3671
3672
0
        LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit);
3673
0
    }
3674
3675
0
#undef LOG_DBG_CUR
3676
0
}
3677
3678
0
static struct llama_sampler * llama_sampler_infill_clone(const struct llama_sampler * smpl) {
3679
0
    const auto * ctx = (const llama_sampler_infill *) smpl->ctx;
3680
0
    return llama_sampler_init_infill(ctx->vocab);
3681
0
}
3682
3683
0
static void llama_sampler_infill_free(struct llama_sampler * smpl) {
3684
0
    delete (llama_sampler_infill *) smpl->ctx;
3685
0
}
3686
3687
static struct llama_sampler_i llama_sampler_infill_i = {
3688
    /* .name              = */ llama_sampler_infill_name,
3689
    /* .accept            = */ nullptr,
3690
    /* .apply             = */ llama_sampler_infill_apply,
3691
    /* .reset             = */ nullptr,
3692
    /* .clone             = */ llama_sampler_infill_clone,
3693
    /* .free              = */ llama_sampler_infill_free,
3694
    /* .backend_apply     = */ nullptr,
3695
    /* .backend_accept    = */ nullptr,
3696
    /* .backend_set_input = */ nullptr,
3697
    /* .backend_init      = */ nullptr,
3698
};
3699
3700
0
struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab) {
3701
0
    return llama_sampler_init(
3702
0
        /* .iface = */ &llama_sampler_infill_i,
3703
0
        /* .ctx   = */ new llama_sampler_infill {
3704
0
            /* .vocab = */ vocab,
3705
0
            /* .buf0  = */ std::vector<char>(512),
3706
0
            /* .buf1  = */ std::vector<char>(512),
3707
0
        }
3708
0
    );
3709
0
}
3710
3711
// utils
3712
3713
0
uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl) {
3714
0
    if (smpl->iface == &llama_sampler_dist_i) {
3715
0
        return ((const llama_sampler_dist *) smpl->ctx)->seed_cur;
3716
0
    }
3717
3718
0
    if (smpl->iface == &llama_sampler_mirostat_i) {
3719
0
        return ((const llama_sampler_mirostat *) smpl->ctx)->seed_cur;
3720
0
    }
3721
3722
0
    if (smpl->iface == &llama_sampler_mirostat_v2_i) {
3723
0
        return ((const llama_sampler_mirostat_v2 *) smpl->ctx)->seed_cur;
3724
0
    }
3725
3726
0
    if (smpl->iface == &llama_sampler_chain_i) {
3727
0
        const auto * ctx = (const llama_sampler_chain *) smpl->ctx;
3728
0
        for (auto it = ctx->samplers.rbegin(); it != ctx->samplers.rend(); ++it) {
3729
0
            const uint32_t seed = llama_sampler_get_seed(it->ptr);
3730
0
            if (seed != LLAMA_DEFAULT_SEED) {
3731
0
                return seed;
3732
0
            }
3733
0
        }
3734
0
    }
3735
3736
0
    return LLAMA_DEFAULT_SEED;
3737
0
}
3738
3739
// perf
3740
3741
0
struct llama_perf_sampler_data llama_perf_sampler(const struct llama_sampler * chain) {
3742
0
    struct llama_perf_sampler_data data = {};
3743
3744
0
    if (chain == nullptr || chain->iface != &llama_sampler_chain_i) {
3745
0
        GGML_ABORT("%s: invalid sampler passed - requires a sampler created with llama_sampler_chain_init()\n", __func__);
3746
0
    }
3747
3748
0
    const auto * ctx = (const struct llama_sampler_chain *) chain->ctx;
3749
3750
0
    data.t_sample_ms = 1e-3 * ctx->t_sample_us;
3751
0
    data.n_sample    = std::max(0, ctx->n_sample);
3752
3753
0
    return data;
3754
0
}
3755
3756
0
void llama_perf_sampler_print(const struct llama_sampler * chain) {
3757
0
    const auto data = llama_perf_sampler(chain);
3758
3759
0
    LLAMA_LOG_INFO("%s:    samplers time = %10.2f ms / %5d runs\n", __func__, data.t_sample_ms, data.n_sample);
3760
0
}
3761
3762
0
void llama_perf_sampler_reset(struct llama_sampler * chain) {
3763
0
    if (chain == nullptr || chain->iface != &llama_sampler_chain_i) {
3764
0
        GGML_ABORT("%s: invalid sampler passed - requires a sampler created with llama_sampler_chain_init()\n", __func__);
3765
0
    }
3766
3767
0
    auto * ctx = (struct llama_sampler_chain *) chain->ctx;
3768
3769
0
    ctx->t_sample_us = 0;
3770
0
    ctx->n_sample    = 0;
3771
0
}