Coverage Report

Created: 2026-01-18 06:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-sampling.cpp
Line
Count
Source
1
#include "llama-sampling.h"
2
3
#include "llama-impl.h"
4
#include "llama-vocab.h"
5
#include "llama-grammar.h"
6
7
#include "ggml-cpp.h"
8
9
#include <array>
10
#include <algorithm>
11
#include <cassert>
12
#include <cfloat>
13
#include <chrono>
14
#include <cmath>
15
#include <cstdlib>
16
#include <cstring>
17
#include <ctime>
18
#include <numeric>
19
#include <random>
20
#include <unordered_map>
21
#include <stdexcept>
22
23
// the ring buffer works similarly to std::deque, but with a fixed capacity
24
template<typename T>
25
struct ring_buffer {
26
    ring_buffer(size_t cap) : capacity(cap), data(cap) {}
27
28
0
    T & front() {
29
0
        if (sz == 0) {
30
0
            throw std::runtime_error("ring buffer is empty");
31
0
        }
32
0
        return data[first];
33
0
    }
34
35
    const T & front() const {
36
        if (sz == 0) {
37
            throw std::runtime_error("ring buffer is empty");
38
        }
39
        return data[first];
40
    }
41
42
    T & back() {
43
        if (sz == 0) {
44
            throw std::runtime_error("ring buffer is empty");
45
        }
46
        return data[pos];
47
    }
48
49
    const T & back() const {
50
        if (sz == 0) {
51
            throw std::runtime_error("ring buffer is empty");
52
        }
53
        return data[pos];
54
    }
55
56
    void push_back(const T & value) {
57
        if (capacity == 0) {
58
            throw std::runtime_error("ring buffer: capacity is zero");
59
        }
60
61
        if (sz == capacity) {
62
            // advance the start when buffer is full
63
            first = (first + 1) % capacity;
64
        } else {
65
            sz++;
66
        }
67
        data[pos] = value;
68
        pos = (pos + 1) % capacity;
69
    }
70
71
    T pop_front() {
72
        if (sz == 0) {
73
            throw std::runtime_error("ring buffer is empty");
74
        }
75
        T value = data[first];
76
        first = (first + 1) % capacity;
77
        sz--;
78
        return value;
79
    }
80
81
    //T & operator[](size_t i) {
82
    //    if (i >= sz) {
83
    //        throw std::runtime_error("ring buffer: index out of bounds");
84
    //    }
85
    //    return data[(first + i) % capacity];
86
    //}
87
88
    //const T & at(size_t i) const {
89
    //    if (i >= sz) {
90
    //        throw std::runtime_error("ring buffer: index out of bounds");
91
    //    }
92
    //    return data[(first + i) % capacity];
93
    //}
94
95
    const T & rat(size_t i) const {
96
        if (i >= sz) {
97
            throw std::runtime_error("ring buffer: index out of bounds");
98
        }
99
        return data[(first + sz - i - 1) % capacity];
100
    }
101
102
    std::vector<T> to_vector() const {
103
        std::vector<T> result;
104
        result.reserve(sz);
105
        for (size_t i = 0; i < sz; i++) {
106
            result.push_back(data[(first + i) % capacity]);
107
        }
108
        return result;
109
    }
110
111
    void clear() {
112
        // here only reset the status of the buffer
113
        sz = 0;
114
        first = 0;
115
        pos = 0;
116
    }
117
118
    bool empty() const {
119
        return sz == 0;
120
    }
121
122
    size_t size() const {
123
        return sz;
124
    }
125
126
    size_t capacity = 0;
127
    size_t sz = 0;
128
    size_t first = 0;
129
    size_t pos = 0;
130
131
    std::vector<T> data;
132
};
133
134
// writes result in res, does not mutate cur
135
0
static void llama_token_data_array_partial_sort(const llama_token_data_array & cur, int npartial, std::vector<llama_token_data> & res) {
136
0
    static const auto comp = [](const llama_token_data & a, const llama_token_data & b) {
137
0
        return a.logit > b.logit;
138
0
    };
139
140
0
    constexpr int   nbuckets     = 128;
141
0
    constexpr float bucket_low   = -10.0f;
142
0
    constexpr float bucket_high  =  10.0f;
143
0
    constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low);
144
0
    constexpr float bucket_inter = -bucket_low * bucket_scale;
145
146
0
    std::vector<int> bucket_idx;
147
0
    std::vector<int> histo(nbuckets, 0);
148
149
0
    std::vector<llama_token_data*> bucket_ptrs;
150
151
0
    bucket_idx.reserve(cur.size);
152
153
0
    for (int i = 0; i < (int)cur.size; ++i) {
154
0
        const float val = cur.data[i].logit;
155
0
        int ib = int(bucket_scale * val + bucket_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low);
156
0
        ib = std::max(0, std::min(nbuckets - 1, ib));
157
0
        bucket_idx.push_back(ib);
158
0
        ++histo[ib];
159
0
    }
160
0
    int nhave = 0;
161
0
    int ib = nbuckets - 1;
162
0
    for ( ; ib >= 0; --ib) {
163
0
        nhave += histo[ib];
164
0
        if (nhave >= npartial) {
165
0
            break;
166
0
        }
167
0
    }
168
0
    res.resize(nhave);
169
0
    auto * ptr = res.data();
170
0
    bucket_ptrs.reserve(nbuckets - ib);
171
0
    for (int j = nbuckets - 1; j >= ib; --j) {
172
0
        bucket_ptrs.push_back(ptr);
173
0
        ptr += histo[j];
174
0
    }
175
0
    for (int i = 0; i < (int)cur.size; ++i) {
176
0
        int j = bucket_idx[i];
177
0
        if (j >= ib) {
178
0
            *bucket_ptrs[nbuckets - 1 - j]++ = cur.data[i];
179
0
        }
180
0
    }
181
182
0
    ptr = res.data();
183
0
    int ndone = 0;
184
0
    for (int j = nbuckets - 1; j > ib; --j) {
185
0
        std::sort(ptr, ptr + histo[j], comp);
186
0
        ptr += histo[j];
187
0
        ndone += histo[j];
188
0
    }
189
0
    std::partial_sort(ptr, ptr + npartial - ndone, ptr + histo[ib], comp);
190
0
}
191
192
// reduces the size of cur_p to npartial, keeping only the top npartial elements
193
0
static void llama_token_data_array_partial_sort_inplace(llama_token_data_array * cur_p, int npartial) {
194
0
    static const auto comp = [](const llama_token_data & a, const llama_token_data & b) {
195
0
        return a.logit > b.logit;
196
0
    };
197
198
0
    if (npartial <= 128) {
199
0
        std::partial_sort(cur_p->data, cur_p->data + npartial, cur_p->data + cur_p->size, comp);
200
201
0
        cur_p->size = npartial;
202
0
        cur_p->sorted = true;
203
204
0
        return;
205
0
    }
206
207
0
    std::vector<llama_token_data> tmp;
208
209
0
    llama_token_data_array_partial_sort(*cur_p, npartial, tmp);
210
211
0
    std::copy(tmp.data(), tmp.data() + npartial, cur_p->data);
212
213
0
    cur_p->size = npartial;
214
0
    cur_p->sorted = true;
215
0
}
216
217
0
static int llama_sample_dist(llama_token_data_array * cur_p, std::mt19937 & rng) {
218
    // iterator for the probabilities
219
0
#ifdef __GNUC__
220
0
    #pragma GCC diagnostic push
221
0
    #pragma GCC diagnostic ignored "-Wunused-local-typedefs"
222
0
#endif
223
224
0
    struct probs_iterator {
225
0
        typedef std::input_iterator_tag iterator_category;
226
0
        typedef float value_type;
227
0
        typedef float * pointer;
228
0
        typedef float & reference;
229
0
        typedef ptrdiff_t difference_type;
230
231
0
        const llama_token_data * data;
232
233
0
        bool operator==(const probs_iterator & other) const { return data == other.data; }
234
0
        bool operator!=(const probs_iterator & other) const { return data != other.data; }
235
0
        const float & operator*() const { return data->p; }
236
0
        probs_iterator & operator++() { ++data; return *this; }
237
0
        probs_iterator operator++(int) { probs_iterator tmp = *this; ++data; return tmp; }
238
0
    };
239
240
0
#ifdef __GNUC__
241
0
    #pragma GCC diagnostic pop
242
0
#endif
243
244
0
    std::discrete_distribution<int> dist(probs_iterator{cur_p->data}, probs_iterator{cur_p->data + cur_p->size});
245
246
0
    return dist(rng);
247
0
}
248
249
/*
250
static void llama_log_softmax(float * array, size_t size) {
251
    float max_l = *std::max_element(array, array + size);
252
    float sum = 0.f;
253
    for (size_t i = 0; i < size; ++i) {
254
        float p = expf(array[i] - max_l);
255
        sum += p;
256
        array[i] = p;
257
    }
258
259
    for (size_t i = 0; i < size; ++i) {
260
        array[i] = logf(array[i] / sum);
261
    }
262
}
263
*/
264
265
0
static void llama_sampler_temp_impl(llama_token_data_array * cur_p, float temp) {
266
0
    if (temp <= 0.0f) {
267
        // find the token with the highest logit and set the rest to -inf
268
0
        size_t max_i = 0;
269
0
        float  max_l = cur_p->data[0].logit;
270
271
0
        for (size_t i = 1; i < cur_p->size; ++i) {
272
0
            if (cur_p->data[i    ].logit > max_l) {
273
0
                cur_p->data[max_i].logit = -INFINITY;
274
0
                max_i = i;
275
0
                max_l = cur_p->data[i].logit;
276
0
            } else {
277
0
                cur_p->data[i].logit = -INFINITY;
278
0
            }
279
0
        }
280
281
0
        return;
282
0
    }
283
284
0
    for (size_t i = 0; i < cur_p->size; ++i) {
285
0
        cur_p->data[i].logit /= temp;
286
0
    }
287
0
}
288
289
0
static void llama_sampler_softmax_impl(llama_token_data_array * cur_p, bool do_sort) {
290
0
    GGML_ASSERT(cur_p->size > 0);
291
292
    // Sort the logits in descending order if requested
293
0
    if (do_sort && !cur_p->sorted) {
294
0
        llama_token_data_array_partial_sort_inplace(cur_p, cur_p->size);
295
0
    }
296
297
0
    float max_l = cur_p->data[0].logit;
298
0
    if (!cur_p->sorted) {
299
0
        for (size_t i = 1; i < cur_p->size; ++i) {
300
0
            max_l = std::max(max_l, cur_p->data[i].logit);
301
0
        }
302
0
    }
303
304
0
    float cum_sum = 0.0f;
305
306
0
    for (size_t i = 0; i < cur_p->size; ++i) {
307
0
        float p = expf(cur_p->data[i].logit - max_l);
308
0
        cur_p->data[i].p = p;
309
0
        cum_sum += p;
310
0
    }
311
312
0
    for (size_t i = 0; i < cur_p->size; ++i) {
313
0
        cur_p->data[i].p /= cum_sum;
314
0
    }
315
0
}
316
317
0
static void llama_sampler_top_k_impl(llama_token_data_array * cur_p, int32_t k) {
318
    // if (k >= (int32_t)cur_p->size) {
319
    //     return;
320
    // }
321
322
0
    if (k <= 0) {
323
0
        return;
324
0
    }
325
326
0
    k = std::min(k, (int) cur_p->size);
327
328
    // Sort scores in descending order
329
0
    if (!cur_p->sorted) {
330
0
        llama_token_data_array_partial_sort_inplace(cur_p, k);
331
0
    }
332
333
0
    cur_p->size = k;
334
0
}
335
336
0
static uint32_t get_rng_seed(uint32_t seed) {
337
0
    if (seed == LLAMA_DEFAULT_SEED) {
338
        // use system clock if std::random_device is not a true RNG
339
0
        static bool is_rd_prng = std::random_device().entropy() == 0;
340
0
        if (is_rd_prng) {
341
0
            return (uint32_t) std::chrono::system_clock::now().time_since_epoch().count();
342
0
        }
343
0
        std::random_device rd;
344
0
        return rd();
345
0
    }
346
0
    return seed;
347
0
}
348
349
// llama_sampler API
350
351
struct llama_sampler * llama_sampler_init(
352
        struct llama_sampler_i * iface,
353
0
        llama_sampler_context_t ctx) {
354
0
    return new llama_sampler {
355
0
        /* .iface = */ iface,
356
0
        /* .ctx   = */ ctx,
357
0
    };
358
0
}
359
360
0
const char * llama_sampler_name(const struct llama_sampler * smpl) {
361
0
    if (!smpl->iface) {
362
0
        return "(null)";
363
0
    }
364
365
0
    return smpl->iface->name(smpl);
366
0
}
367
368
0
void llama_sampler_accept(struct llama_sampler * smpl, llama_token token) {
369
0
    if (!smpl) {
370
0
        return;
371
0
    }
372
373
0
    if (smpl->iface->accept) {
374
0
        smpl->iface->accept(smpl, token);
375
0
    }
376
0
}
377
378
0
void llama_sampler_apply(struct llama_sampler * smpl, struct llama_token_data_array * cur_p) {
379
0
    if (!smpl) {
380
0
        return;
381
0
    }
382
383
0
    GGML_ASSERT(smpl->iface->apply);
384
0
    smpl->iface->apply(smpl, cur_p);
385
0
}
386
387
0
void llama_sampler_reset(struct llama_sampler * smpl) {
388
0
    if (!smpl) {
389
0
        return;
390
0
    }
391
392
0
    if (smpl->iface->reset) {
393
0
        smpl->iface->reset(smpl);
394
0
    }
395
0
}
396
397
0
struct llama_sampler * llama_sampler_clone(const struct llama_sampler * smpl) {
398
0
    if (!smpl) {
399
0
        return nullptr;
400
0
    }
401
402
0
    if (smpl->iface->clone) {
403
0
        return smpl->iface->clone(smpl);
404
0
    }
405
406
0
    if (smpl->ctx == nullptr) {
407
0
        return llama_sampler_init(
408
0
            /* .iface = */ smpl->iface,
409
0
            /* .ctx   = */ nullptr
410
0
        );
411
0
    }
412
413
0
    GGML_ABORT("the sampler does not support cloning");
414
0
}
415
416
0
void llama_sampler_free(struct llama_sampler * smpl) {
417
0
    if (smpl == nullptr) {
418
0
        return;
419
0
    }
420
421
0
    if (smpl->iface->free) {
422
0
        smpl->iface->free(smpl);
423
0
    }
424
425
0
    delete smpl;
426
0
}
427
428
// empty sampler
429
430
struct llama_sampler_empty {
431
    const char * name;
432
};
433
434
static struct llama_sampler * llama_sampler_init_empty(const char * name);
435
436
0
static const char * llama_sampler_empty_name(const struct llama_sampler * smpl) {
437
0
    auto * ctx = (llama_sampler_empty *) smpl->ctx;
438
0
    return ctx->name;
439
0
}
440
441
0
static void llama_sampler_empty_accept(struct llama_sampler * smpl, llama_token token) {
442
0
    GGML_UNUSED(smpl);
443
0
    GGML_UNUSED(token);
444
0
}
445
446
0
static void llama_sampler_empty_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
447
0
    GGML_UNUSED(smpl);
448
0
    GGML_UNUSED(cur_p);
449
0
}
450
451
0
static void llama_sampler_empty_reset(struct llama_sampler * smpl) {
452
0
    GGML_UNUSED(smpl);
453
0
}
454
455
0
static struct llama_sampler * llama_sampler_empty_clone(const struct llama_sampler * smpl) {
456
0
    auto * ctx = (llama_sampler_empty *) smpl->ctx;
457
0
    return llama_sampler_init_empty(ctx->name);
458
0
}
459
460
0
static void llama_sampler_empty_free(struct llama_sampler * smpl) {
461
0
    delete (llama_sampler_empty *) smpl->ctx;
462
0
}
463
464
static bool llama_sampler_empty_backend_init(
465
        struct llama_sampler       * smpl,
466
0
        ggml_backend_buffer_type_t   buft) {
467
0
    GGML_UNUSED(smpl);
468
0
    GGML_UNUSED(buft);
469
470
0
    return true;
471
0
}
472
473
static void llama_sampler_empty_backend_accept(
474
        struct llama_sampler * smpl,
475
        ggml_context * ctx,
476
        ggml_cgraph * gf,
477
0
        struct ggml_tensor * selected_token) {
478
0
    GGML_UNUSED(smpl);
479
0
    GGML_UNUSED(ctx);
480
0
    GGML_UNUSED(gf);
481
0
    GGML_UNUSED(selected_token);
482
0
}
483
484
static void llama_sampler_empty_backend_apply(
485
          struct llama_sampler      * smpl,
486
          struct ggml_context       * ctx,
487
          struct ggml_cgraph        * gf,
488
0
          struct llama_sampler_data * data) {
489
0
    GGML_UNUSED(smpl);
490
0
    GGML_UNUSED(ctx);
491
0
    GGML_UNUSED(gf);
492
0
    GGML_UNUSED(data);
493
0
}
494
495
0
static void llama_sampler_empty_backend_set_input(struct llama_sampler * smpl) {
496
0
    GGML_UNUSED(smpl);
497
0
}
498
499
static struct llama_sampler_i llama_sampler_empty_i = {
500
    /* .name              = */ llama_sampler_empty_name,
501
    /* .accept            = */ llama_sampler_empty_accept,
502
    /* .apply             = */ llama_sampler_empty_apply,
503
    /* .reset             = */ llama_sampler_empty_reset,
504
    /* .clone             = */ llama_sampler_empty_clone,
505
    /* .free              = */ llama_sampler_empty_free,
506
    /* .backend_init      = */ llama_sampler_empty_backend_init,
507
    /* .backend_accept    = */ llama_sampler_empty_backend_accept,
508
    /* .backend_apply     = */ llama_sampler_empty_backend_apply,
509
    /* .backend_set_input = */ llama_sampler_empty_backend_set_input,
510
};
511
512
0
struct llama_sampler * llama_sampler_init_empty(const char * name) {
513
0
    return llama_sampler_init(
514
0
        /* .iface = */ &llama_sampler_empty_i,
515
0
        /* .ctx   = */ new llama_sampler_empty {
516
0
            /* .name = */ name,
517
0
        }
518
0
    );
519
0
}
520
521
// common backend sampler functionality
522
//
523
// +name : means that the sampler is support and will run on the backend
524
// -name : means that a ggml operator is not supported by the backend
525
//
526
struct llama_sampler_backend {
527
0
    llama_sampler_backend(const char * name) : name(name), name_ext(name), is_init(false), support(false) {}
528
529
0
    const char * get_name() {
530
0
        if (!is_init) {
531
0
            return name.c_str();
532
0
        }
533
534
0
        if (support) {
535
0
            name_ext = "+" + name;
536
0
        } else {
537
0
            name_ext = "-" + name;
538
0
        }
539
540
0
        return name_ext.c_str();
541
0
    }
542
543
0
    void init(bool support) {
544
0
        GGML_ASSERT(this->is_init == false);
545
546
0
        this->is_init = true;
547
0
        this->support = support;
548
0
    }
549
550
private:
551
    std::string name;
552
    std::string name_ext;
553
554
    bool is_init;
555
    bool support;
556
};
557
558
// check if all ggml ops used by the sampler are supported by the backend
559
static bool llama_sampler_backend_support(
560
        llama_sampler              * smpl,
561
0
        ggml_backend_buffer_type_t   buft) {
562
0
    auto * device = ggml_backend_buft_get_device(buft);
563
0
    if (!device) {
564
        // CPU backend always supported
565
0
        return true;
566
0
    }
567
568
0
    ggml_init_params params = {
569
0
        /*.mem_size   =*/ 128*ggml_tensor_overhead() + ggml_graph_overhead(),
570
0
        /*.mem_buffer =*/ NULL,
571
0
        /*.no_alloc   =*/ true,
572
0
    };
573
574
0
    ggml_context_ptr ctx_ptr { ggml_init(params) };
575
0
    if (!ctx_ptr) {
576
0
        throw std::runtime_error(format("failed to create ggml context"));
577
0
    }
578
579
0
    ggml_context * ctx = ctx_ptr.get();
580
581
0
    const int64_t n = 1024*1024;
582
583
0
    llama_sampler_data data = {
584
0
        /*.logits     = */ ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n),
585
0
        /*.probs      = */ nullptr,
586
0
        /*.sampled    = */ nullptr,
587
0
        /*.candidates = */ ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n),
588
0
    };
589
590
0
    ggml_cgraph * gf = ggml_new_graph(ctx);
591
592
0
    smpl->iface->backend_apply(smpl, ctx, gf, &data);
593
594
0
    if (data.logits) {
595
0
        ggml_build_forward_expand(gf, data.logits);
596
0
    }
597
598
0
    if (data.probs) {
599
0
        ggml_build_forward_expand(gf, data.probs);
600
0
    }
601
602
0
    if (data.sampled) {
603
0
        ggml_build_forward_expand(gf, data.sampled);
604
0
    }
605
606
0
    if (data.candidates) {
607
0
        ggml_build_forward_expand(gf, data.candidates);
608
0
    }
609
610
0
    for (int i = 0; i < ggml_graph_n_nodes(gf); i++) {
611
0
        struct ggml_tensor * op = ggml_graph_node(gf, i);
612
613
0
        if (!ggml_backend_dev_supports_op(device, op)) {
614
0
            LLAMA_LOG_WARN("%s: device '%s' does not have support for op %s needed for sampler '%s'\n",
615
0
                    __func__, ggml_backend_dev_name(device), ggml_op_name(op->op), smpl->iface->name(smpl));
616
617
0
            return false;
618
0
        }
619
0
    }
620
621
0
    return true;
622
0
}
623
624
// sampler chain
625
626
0
static const char * llama_sampler_chain_name(const struct llama_sampler * /*smpl*/) {
627
0
    return "chain";
628
0
}
629
630
0
static void llama_sampler_chain_accept(struct llama_sampler * smpl, llama_token token) {
631
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
632
633
0
    time_meas tm(chain->t_sample_us, chain->params.no_perf);
634
635
0
    for (auto & smpl : chain->samplers) {
636
0
        llama_sampler_accept(smpl.ptr, token);
637
0
    }
638
639
0
    chain->n_sample++;
640
0
}
641
642
0
static void llama_sampler_chain_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
643
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
644
645
0
    time_meas tm(chain->t_sample_us, chain->params.no_perf);
646
647
0
    bool is_backend = chain->is_init;
648
649
0
    for (auto & smpl : chain->samplers) {
650
0
        if (is_backend && smpl.is_backend) {
651
0
            continue;
652
0
        }
653
654
0
        is_backend = false;
655
656
0
        if (smpl.ptr->iface->apply == nullptr) {
657
0
            continue;
658
0
        }
659
660
0
        llama_sampler_apply(smpl.ptr, cur_p);
661
0
    }
662
0
}
663
664
0
static void llama_sampler_chain_reset(struct llama_sampler * smpl) {
665
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
666
667
0
    for (auto & smpl : chain->samplers) {
668
0
        llama_sampler_reset(smpl.ptr);
669
0
    }
670
0
}
671
672
0
static struct llama_sampler * llama_sampler_chain_clone(const struct llama_sampler * smpl) {
673
0
    const auto * chain_src = (const llama_sampler_chain *) smpl->ctx;
674
675
0
    auto * result = llama_sampler_chain_init(chain_src->params);
676
677
0
    for (const auto & smpl : chain_src->samplers) {
678
0
        llama_sampler_chain_add(result, llama_sampler_clone(smpl.ptr));
679
0
    }
680
681
0
    return result;
682
0
}
683
684
0
static void llama_sampler_chain_free(struct llama_sampler * smpl) {
685
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
686
687
0
    for (auto & smpl : chain->samplers) {
688
0
        llama_sampler_free(smpl.ptr);
689
0
    }
690
691
0
    delete chain;
692
0
}
693
694
static bool llama_sampler_chain_backend_init(
695
        struct llama_sampler       * smpl,
696
0
        ggml_backend_buffer_type_t   buft) {
697
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
698
699
0
    GGML_ASSERT(chain->is_init == false && "llama_sampler_chain_backend_init() called twice");
700
701
0
    chain->is_init = true;
702
703
0
    bool res = true;
704
705
0
    for (auto & smpl : chain->samplers) {
706
0
        bool res_cur = true;
707
708
        // to be able to run a sampler on the backend, it has to:
709
        // - have the .backend_init() API implemented
710
        // - return true during .backend_init()
711
0
        if (smpl.ptr->iface->backend_init) {
712
0
            if (!smpl.ptr->iface->backend_init(smpl.ptr, buft)) {
713
0
                res_cur = false;
714
0
            }
715
0
        } else {
716
0
            res_cur = false;
717
0
        }
718
719
0
        smpl.is_backend = res_cur;
720
721
0
        res = res && res_cur;
722
0
    }
723
724
0
    return res;
725
0
}
726
727
static void llama_sampler_chain_backend_accept(
728
        struct llama_sampler * smpl,
729
        ggml_context * ctx,
730
        ggml_cgraph * gf,
731
0
        struct ggml_tensor * selected_token) {
732
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
733
734
0
    for (auto & smpl : chain->samplers) {
735
0
        if (!smpl.is_backend) {
736
0
            break;
737
0
        }
738
739
0
        if (smpl.ptr->iface->backend_accept) {
740
0
            smpl.ptr->iface->backend_accept(smpl.ptr, ctx, gf, selected_token);
741
0
        }
742
0
    }
743
0
}
744
745
static void llama_sampler_chain_backend_apply(
746
          struct llama_sampler      * smpl,
747
          struct ggml_context       * ctx,
748
          struct ggml_cgraph        * gf,
749
0
          struct llama_sampler_data * data) {
750
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
751
752
0
    GGML_ASSERT(chain->is_init && "llama_sampler_chain_backend_init() not called");
753
754
0
    for (auto & smpl : chain->samplers) {
755
0
        if (!smpl.is_backend) {
756
0
            break;
757
0
        }
758
759
0
        if (smpl.ptr->iface->backend_apply) {
760
0
            smpl.ptr->iface->backend_apply(smpl.ptr, ctx, gf, data);
761
0
        }
762
0
    }
763
0
}
764
765
0
static void llama_sampler_chain_backend_set_input(struct llama_sampler * smpl) {
766
0
    auto * chain = (llama_sampler_chain *) smpl->ctx;
767
768
0
    for (auto & smpl : chain->samplers) {
769
0
        if (!smpl.is_backend) {
770
0
            break;
771
0
        }
772
773
0
        if (smpl.ptr->iface->backend_set_input) {
774
0
            smpl.ptr->iface->backend_set_input(smpl.ptr);
775
0
        }
776
0
    }
777
0
}
778
779
static struct llama_sampler_i llama_sampler_chain_i = {
780
    /* .name              = */ llama_sampler_chain_name,
781
    /* .accept            = */ llama_sampler_chain_accept,
782
    /* .apply             = */ llama_sampler_chain_apply,
783
    /* .reset             = */ llama_sampler_chain_reset,
784
    /* .clone             = */ llama_sampler_chain_clone,
785
    /* .free              = */ llama_sampler_chain_free,
786
    /* .backend_init      = */ llama_sampler_chain_backend_init,
787
    /* .backend_accept    = */ llama_sampler_chain_backend_accept,
788
    /* .backend_apply     = */ llama_sampler_chain_backend_apply,
789
    /* .backend_set_input = */ llama_sampler_chain_backend_set_input,
790
};
791
792
0
struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params) {
793
0
    return llama_sampler_init(
794
0
        /* .iface = */ &llama_sampler_chain_i,
795
0
        /* .ctx   = */ new llama_sampler_chain {
796
0
            /* .params      = */ params,
797
0
            /* .is_init     = */ false,
798
0
            /* .samplers    = */ {},
799
0
            /* .cur         = */ {},
800
0
            /* .t_sample_us = */ 0,
801
0
            /* .n_sample    = */ 0,
802
0
        }
803
0
    );
804
0
}
805
806
0
llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx) {
807
0
    const llama_token   sampled_token  = llama_get_sampled_token_ith     (ctx, idx);
808
0
    const float *       sampled_probs  = llama_get_sampled_probs_ith     (ctx, idx);
809
0
    const float *       sampled_logits = llama_get_sampled_logits_ith    (ctx, idx);
810
0
    const llama_token * sampled_ids    = llama_get_sampled_candidates_ith(ctx, idx);
811
812
    // If a backend sampler has already sampled a token, return it.
813
0
    if (sampled_token != LLAMA_TOKEN_NULL) {
814
0
        LLAMA_LOG_DEBUG("%s: Backend sampler selected token for idx %d. Skipping CPU samplers\n", __func__, idx);
815
0
        return sampled_token;
816
0
    }
817
818
0
    const llama_model * model = llama_get_model(ctx);
819
0
    const llama_vocab * vocab = llama_model_get_vocab(model);
820
821
0
    const int n_vocab = llama_vocab_n_tokens(vocab);
822
823
    // use pre-allocated buffer from chain if available, otherwise allocate locally
824
0
    std::vector<llama_token_data> * cur_ptr;
825
0
    std::vector<llama_token_data> cur_local;
826
827
0
    if (smpl->iface == &llama_sampler_chain_i) {
828
0
        auto * chain = (llama_sampler_chain *) smpl->ctx;
829
0
        cur_ptr = &chain->cur;
830
0
    } else {
831
0
        cur_ptr = &cur_local;
832
0
    }
833
834
0
    auto & cur = *cur_ptr;
835
836
0
    if (sampled_probs) {
837
0
        const uint32_t sampled_probs_count = llama_get_sampled_probs_count_ith(ctx, idx);
838
0
        cur.resize(sampled_probs_count);
839
0
        for (uint32_t i = 0; i < sampled_probs_count; ++i) {
840
0
            cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], sampled_probs[i]};
841
0
        }
842
0
    } else if (sampled_logits) {
843
0
        const uint32_t sampled_logits_count = llama_get_sampled_logits_count_ith(ctx, idx);
844
0
        cur.resize(sampled_logits_count);
845
0
        for (llama_token i = 0; i < (int)sampled_logits_count; i++) {
846
0
            cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], 0.0f};
847
0
        }
848
0
    } else {
849
0
        const auto * logits = llama_get_logits_ith(ctx, idx);
850
0
        GGML_ASSERT(logits != nullptr);
851
0
        cur.resize(n_vocab);
852
0
        for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
853
0
            cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
854
0
        }
855
0
    }
856
857
0
    llama_token_data_array cur_p = {
858
0
        /* .data       = */ cur.data(),
859
0
        /* .size       = */ cur.size(),
860
0
        /* .selected   = */ -1,
861
0
        /* .sorted     = */ false,
862
0
    };
863
864
0
    llama_sampler_apply(smpl, &cur_p);
865
866
0
    GGML_ASSERT(cur_p.selected >= 0 && cur_p.selected < (int32_t) cur_p.size);
867
868
0
    auto token = cur_p.data[cur_p.selected].id;
869
870
0
    llama_sampler_accept(smpl, token);
871
872
0
    return token;
873
0
}
874
875
876
0
void llama_sampler_chain_add(struct llama_sampler * chain, struct llama_sampler * smpl) {
877
0
    auto * p = (llama_sampler_chain *) chain->ctx;
878
0
    p->samplers.push_back({
879
0
        /* .is_backend = */ false,
880
0
        /* .ptr        = */ smpl,
881
0
    });
882
0
}
883
884
0
struct llama_sampler * llama_sampler_chain_get(struct llama_sampler * chain, int32_t i) {
885
0
    if (chain == nullptr) {
886
0
        return nullptr;
887
0
    }
888
889
0
    if (chain->iface != &llama_sampler_chain_i) {
890
0
        return nullptr;
891
0
    }
892
893
0
    if (i == -1) {
894
0
        return chain;
895
0
    }
896
897
0
    const auto * p = (const llama_sampler_chain *) chain->ctx;
898
899
0
    if (i < 0 || (size_t) i >= p->samplers.size()) {
900
0
        return nullptr;
901
0
    }
902
903
0
    return p->samplers[i].ptr;
904
0
}
905
906
0
struct llama_sampler * llama_sampler_chain_remove(struct llama_sampler * chain, int32_t i) {
907
0
    auto * p = (llama_sampler_chain *) chain->ctx;
908
909
0
    if (i < 0 || (size_t) i >= p->samplers.size()) {
910
0
        return nullptr;
911
0
    }
912
913
0
    auto * result = p->samplers[i].ptr;
914
0
    p->samplers.erase(p->samplers.begin() + i);
915
916
0
    return result;
917
0
}
918
919
0
int llama_sampler_chain_n(const struct llama_sampler * chain) {
920
0
    const auto * p = (const llama_sampler_chain *) chain->ctx;
921
922
0
    return p->samplers.size();
923
0
}
924
925
//
926
// samplers
927
//
928
929
// greedy
930
931
struct llama_sampler_greedy : public llama_sampler_backend {
932
};
933
934
0
static const char * llama_sampler_greedy_name(const struct llama_sampler * smpl) {
935
0
    auto * sctx = (llama_sampler_greedy *) smpl->ctx;
936
0
    return sctx->get_name();
937
0
}
938
939
0
static void llama_sampler_greedy_reset(struct llama_sampler * smpl) {
940
0
    auto * ctx = (llama_sampler_greedy *) smpl->ctx;
941
0
    GGML_UNUSED(ctx);
942
0
}
943
944
0
static struct llama_sampler * llama_sampler_greedy_clone(const struct llama_sampler * smpl) {
945
0
    const auto * ctx = (const llama_sampler_greedy *) smpl->ctx;
946
0
    auto * result = llama_sampler_init_greedy();
947
948
    // copy the state
949
0
    {
950
0
        auto * result_ctx = (llama_sampler_greedy *) result->ctx;
951
952
0
        GGML_UNUSED(ctx);
953
0
        GGML_UNUSED(result_ctx);
954
0
    }
955
956
0
    return result;
957
0
}
958
959
0
static void llama_sampler_greedy_free(struct llama_sampler * smpl) {
960
0
    delete (llama_sampler_greedy *) smpl->ctx;
961
0
}
962
963
0
static void llama_sampler_greedy_apply(struct llama_sampler * /*smpl*/, llama_token_data_array * cur_p) {
964
0
    cur_p->selected = 0;
965
0
    for (size_t i = 1; i < cur_p->size; ++i) {
966
0
        if (cur_p->data[i].logit > cur_p->data[cur_p->selected].logit) {
967
0
            cur_p->selected = i;
968
0
        }
969
0
    }
970
0
}
971
972
static bool llama_sampler_greedy_backend_init(
973
        struct llama_sampler       * smpl,
974
0
        ggml_backend_buffer_type_t   buft) {
975
0
    auto * sctx = (llama_sampler_greedy *) smpl->ctx;
976
977
0
    const bool res = llama_sampler_backend_support(smpl, buft);
978
979
0
    sctx->init(res);
980
981
0
    return res;
982
0
}
983
984
static void llama_sampler_greedy_backend_apply(
985
        struct llama_sampler      * smpl,
986
        struct ggml_context       * ctx,
987
        struct ggml_cgraph        * gf,
988
0
        struct llama_sampler_data * data) {
989
0
    GGML_UNUSED(gf);
990
0
    GGML_UNUSED(smpl);
991
992
0
    struct ggml_tensor * curl = ggml_argmax(ctx, data->logits);
993
0
    ggml_set_name(curl, "greedy_argmax");
994
995
0
    data->sampled = curl;
996
0
}
997
998
static struct llama_sampler_i llama_sampler_greedy_i = {
999
    /* .name              = */ llama_sampler_greedy_name,
1000
    /* .accept            = */ nullptr,
1001
    /* .apply             = */ llama_sampler_greedy_apply,
1002
    /* .reset             = */ llama_sampler_greedy_reset,
1003
    /* .clone             = */ llama_sampler_greedy_clone,
1004
    /* .free              = */ llama_sampler_greedy_free,
1005
    /* .backend_init      = */ llama_sampler_greedy_backend_init,
1006
    /* .backend_accept    = */ nullptr,
1007
    /* .backend_apply     = */ llama_sampler_greedy_backend_apply,
1008
    /* .backend_set_input = */ nullptr,
1009
};
1010
1011
0
struct llama_sampler * llama_sampler_init_greedy() {
1012
0
    return llama_sampler_init(
1013
0
        /* .iface = */ &llama_sampler_greedy_i,
1014
0
        /* .ctx   = */ new llama_sampler_greedy {
1015
0
            ("greedy"),
1016
0
        }
1017
0
    );
1018
0
}
1019
1020
// dist
1021
1022
struct llama_sampler_dist : public llama_sampler_backend {
1023
    const uint32_t seed;
1024
          uint32_t seed_cur;
1025
1026
    std::mt19937 rng;
1027
1028
    // backend input
1029
    struct ggml_tensor * inp_uniform;
1030
1031
    ggml_context_ptr        inp_ctx;
1032
    ggml_backend_buffer_ptr inp_buf;
1033
};
1034
1035
0
static const char * llama_sampler_dist_name(const struct llama_sampler * smpl) {
1036
0
    auto * sctx = (llama_sampler_dist *) smpl->ctx;
1037
0
    return sctx->get_name();
1038
0
}
1039
1040
0
static void llama_sampler_dist_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1041
0
    auto * ctx = (llama_sampler_dist *) smpl->ctx;
1042
1043
    // edge cases
1044
0
    if (cur_p->size == 0) {
1045
0
        cur_p->selected = -1;
1046
0
        return;
1047
0
    }
1048
1049
0
    cur_p->selected = 0;
1050
1051
0
    if (cur_p->size == 1) {
1052
0
        cur_p->data[0].p = 1.0f;
1053
0
        return;
1054
0
    }
1055
1056
    // max logit for numerical stability
1057
0
    float max_l = cur_p->data[0].logit;
1058
0
    if (!cur_p->sorted) {
1059
0
        for (size_t i = 1; i < cur_p->size; ++i) {
1060
0
            max_l = std::max(max_l, cur_p->data[i].logit);
1061
0
        }
1062
0
    }
1063
1064
    // apply softmax to obtain the probabilities
1065
0
    double sum_cum = 0.0f;
1066
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1067
0
        float p = expf(cur_p->data[i].logit - max_l);
1068
0
        cur_p->data[i].p = p;
1069
0
        sum_cum += p;
1070
0
    }
1071
1072
0
#if 1
1073
    // sample from the obtained probabilities and normalize the probs in a single pass
1074
    // this is ~3x faster on Mac with full gpt-oss vocab than the version below
1075
    //
1076
0
    std::uniform_real_distribution<double> dist(0.0f, 1.0f);
1077
0
    const double rnd = dist(ctx->rng);
1078
1079
0
          double sum_run = 0.0f;
1080
0
    const double sum_tgt = sum_cum*rnd;
1081
1082
0
    bool found = false;
1083
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1084
0
        if (!found) {
1085
            // accumulate probs until we reach the target sum
1086
0
            sum_run += cur_p->data[i].p;
1087
0
            if (sum_run >= sum_tgt) {
1088
0
                cur_p->selected = i;
1089
0
                found = true;
1090
0
            }
1091
0
        }
1092
1093
        // normalize probs
1094
0
        cur_p->data[i].p /= sum_cum;
1095
0
    }
1096
1097
    // fallback to the last token (don't think this can happen)
1098
0
    assert(found);
1099
0
    if (!found) {
1100
0
        cur_p->selected = cur_p->size - 1;
1101
0
    }
1102
#else
1103
    // for clarity, this is the same as above but does one pass for normalization and one extra pass for sampling
1104
    for (size_t i = 0; i < cur_p->size; ++i) {
1105
        cur_p->data[i].p /= sum_cum;
1106
    }
1107
1108
    cur_p->selected = llama_sample_dist(cur_p, ctx->rng);
1109
#endif
1110
0
}
1111
1112
0
static void llama_sampler_dist_reset(struct llama_sampler * smpl) {
1113
0
    auto * ctx = (llama_sampler_dist *) smpl->ctx;
1114
0
    ctx->seed_cur = get_rng_seed(ctx->seed);
1115
0
    ctx->rng.seed(ctx->seed_cur);
1116
0
}
1117
1118
0
static struct llama_sampler * llama_sampler_dist_clone(const struct llama_sampler * smpl) {
1119
0
    const auto * ctx = (const llama_sampler_dist *) smpl->ctx;
1120
0
    auto * result = llama_sampler_init_dist(ctx->seed);
1121
1122
    // copy the state
1123
0
    {
1124
0
        auto * result_ctx = (llama_sampler_dist *) result->ctx;
1125
1126
0
        result_ctx->rng = ctx->rng;
1127
0
    }
1128
1129
0
    return result;
1130
0
}
1131
1132
0
static void llama_sampler_dist_free(struct llama_sampler * smpl) {
1133
0
    delete (llama_sampler_dist *) smpl->ctx;
1134
0
}
1135
1136
static bool llama_sampler_dist_backend_init(
1137
        struct llama_sampler       * smpl,
1138
0
        ggml_backend_buffer_type_t   buft) {
1139
0
    auto * sctx = (llama_sampler_dist *) smpl->ctx;
1140
1141
    // allocate inputs
1142
0
    {
1143
0
        ggml_init_params params = {
1144
0
            /*.mem_size   =*/ ggml_tensor_overhead(),
1145
0
            /*.mem_buffer =*/ nullptr,
1146
0
            /*.no_alloc   =*/ true,
1147
0
        };
1148
1149
0
        sctx->inp_ctx.reset(ggml_init(params));
1150
1151
        // Create the uniform random scalar input tensor. This will be set by
1152
        // llama_sampler_dist_backend_set_input after this graph is built.
1153
0
        sctx->inp_uniform = ggml_new_tensor_1d(sctx->inp_ctx.get(), GGML_TYPE_F32, 1);
1154
0
        ggml_set_name (sctx->inp_uniform, "uniform");
1155
0
        ggml_set_input(sctx->inp_uniform);
1156
1157
        // Allocate all tensors from our context to the backend
1158
0
        sctx->inp_buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(sctx->inp_ctx.get(), buft));
1159
1160
0
        ggml_backend_buffer_clear(sctx->inp_buf.get(), 0);
1161
0
    }
1162
1163
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1164
1165
0
    sctx->init(res);
1166
1167
0
    if (!res) {
1168
0
        sctx->inp_ctx.reset(nullptr);
1169
0
        sctx->inp_buf.reset(nullptr);
1170
0
    }
1171
1172
0
    return res;
1173
0
}
1174
1175
static void llama_sampler_dist_backend_apply(
1176
        struct llama_sampler      * smpl,
1177
        struct ggml_context       * ctx,
1178
        struct ggml_cgraph        * gf,
1179
0
        struct llama_sampler_data * data) {
1180
0
    GGML_UNUSED(gf);
1181
0
    auto * sctx = (llama_sampler_dist *) smpl->ctx;
1182
1183
0
    struct ggml_tensor * probs = ggml_soft_max(ctx, data->logits);
1184
0
    ggml_set_name(probs, "dist_probs");
1185
1186
0
    struct ggml_tensor * cumsum = ggml_cumsum(ctx, probs);
1187
0
    ggml_set_name(cumsum, "dist_cumsum");
1188
1189
    // The uniform tensor has a random value and we subtract this tensor with
1190
    // the cumsum tensor (the uniform tensor will be broadcasted by ggml_sub).
1191
    // Recall that each entry in cumsum is the cumulative probability up to that
1192
    // index so values stay negative while the cumulative total is below the
1193
    // random value, and become zero/positive once the threshold is crossed.
1194
0
    struct ggml_tensor * diff = ggml_sub(ctx, cumsum, sctx->inp_uniform);
1195
0
    ggml_set_name(diff, "dist_cumsum");
1196
1197
    // The ggml_step function produces a tensor where entries are 1 if the
1198
    // corresponding entry in diff is > 0, and 0 otherwise. So all values up to
1199
    // the index where the cumulative probability exceeds the random value are 0,
1200
    // and all entries after that are 1.
1201
0
    struct ggml_tensor * mask = ggml_step(ctx, diff);
1202
0
    ggml_set_name(mask, "dist_mask");
1203
1204
    // Taking the sum of the mask gives us the sum of elements after the threshold
1205
    // we are interested in.
1206
0
    struct ggml_tensor * idxf = ggml_sum(ctx, mask);
1207
0
    ggml_set_name(idxf, "dist_index_f32");
1208
1209
    // Use ggml_scale_bias to scale the index value by -1 and then add the size
1210
    // of the mask to that value so we get the correct index ((-1 * idxf) + n).
1211
0
    struct ggml_tensor * idx = ggml_cast(ctx, ggml_scale_bias(ctx, idxf, -1.0f, mask->ne[0]), GGML_TYPE_I32);
1212
0
    ggml_set_name(idx, "dist_index_i32");
1213
1214
    // Map back to original vocab ids if a candidates tensor is available.
1215
0
    struct ggml_tensor * sampled_token = idx;
1216
0
    if (data->candidates != nullptr) {
1217
0
        struct ggml_tensor * candidates = ggml_reshape_2d(ctx, data->candidates, 1, ggml_nelements(data->candidates));
1218
1219
0
        sampled_token = ggml_get_rows(ctx, candidates, idx);
1220
0
        ggml_set_name(sampled_token, "dist_sampled_token");
1221
0
    }
1222
1223
0
    data->sampled = sampled_token;
1224
0
    data->probs = probs;
1225
0
}
1226
1227
0
static void llama_sampler_dist_backend_set_input(struct llama_sampler * smpl) {
1228
0
    auto * sctx = (llama_sampler_dist *) smpl->ctx;
1229
0
    GGML_ASSERT(sctx->inp_uniform != nullptr);
1230
1231
    // We sample in double precision and cast to float to match rnd numbers of
1232
    // llama_dampler_dist which uses double precision (sampling from
1233
    // std::uniform_real_distribution<double> and
1234
    // std::uniform_real_distribution<float> with same rng will produce
1235
    // different sequences).
1236
0
    std::uniform_real_distribution<double> dist(0.0f, 1.0f);
1237
0
    const float rnd = dist(sctx->rng);
1238
1239
0
    ggml_backend_tensor_set(sctx->inp_uniform, &rnd, 0, sizeof(float));
1240
0
}
1241
1242
static struct llama_sampler_i llama_sampler_dist_i = {
1243
    /* .name              = */ llama_sampler_dist_name,
1244
    /* .accept            = */ nullptr,
1245
    /* .apply             = */ llama_sampler_dist_apply,
1246
    /* .reset             = */ llama_sampler_dist_reset,
1247
    /* .clone             = */ llama_sampler_dist_clone,
1248
    /* .free              = */ llama_sampler_dist_free,
1249
    /* .backend_init      = */ llama_sampler_dist_backend_init,
1250
    /* .backend_accept    = */ nullptr,
1251
    /* .backend_apply     = */ llama_sampler_dist_backend_apply,
1252
    /* .backend_set_input = */ llama_sampler_dist_backend_set_input,
1253
};
1254
1255
0
struct llama_sampler * llama_sampler_init_dist(uint32_t seed) {
1256
0
    auto seed_cur = get_rng_seed(seed);
1257
0
    return llama_sampler_init(
1258
0
        /* .iface = */ &llama_sampler_dist_i,
1259
0
        /* .ctx   = */ new llama_sampler_dist {
1260
0
            ("dist"),
1261
0
            /* .seed        = */ seed,
1262
0
            /* .seed_cur    = */ seed_cur,
1263
0
            /* .rng         = */ std::mt19937(seed_cur),
1264
0
            /* .inp_uniform = */ nullptr,
1265
0
            /* .inp_ctx     = */ nullptr,
1266
0
            /* .inp_buf     = */ nullptr,
1267
0
        }
1268
0
    );
1269
0
}
1270
1271
// top-k
1272
1273
struct llama_sampler_top_k : public llama_sampler_backend {
1274
    const int32_t k;
1275
};
1276
1277
0
static const char * llama_sampler_top_k_name(const struct llama_sampler * smpl) {
1278
0
    auto * sctx = (llama_sampler_top_k *) smpl->ctx;
1279
0
    return sctx->get_name();
1280
0
}
1281
1282
0
static void llama_sampler_top_k_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1283
0
    auto * ctx = (llama_sampler_top_k *) smpl->ctx;
1284
0
    llama_sampler_top_k_impl(cur_p, ctx->k);
1285
0
}
1286
1287
0
static struct llama_sampler * llama_sampler_top_k_clone(const struct llama_sampler * smpl) {
1288
0
    const auto * ctx = (const llama_sampler_top_k *) smpl->ctx;
1289
0
    return llama_sampler_init_top_k(ctx->k);
1290
0
}
1291
1292
0
static void llama_sampler_top_k_free(struct llama_sampler * smpl) {
1293
0
    delete (llama_sampler_top_k *) smpl->ctx;
1294
0
}
1295
1296
static bool llama_sampler_top_k_backend_init(
1297
        struct llama_sampler       * smpl,
1298
0
        ggml_backend_buffer_type_t   buft) {
1299
0
    auto * sctx = (llama_sampler_top_k *) smpl->ctx;
1300
1301
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1302
1303
0
    sctx->init(res);
1304
1305
0
    return res;
1306
0
}
1307
1308
static void llama_sampler_top_k_backend_apply(
1309
        struct llama_sampler      * smpl,
1310
        struct ggml_context       * ctx,
1311
        struct ggml_cgraph        * gf,
1312
0
        struct llama_sampler_data * data) {
1313
0
    auto * sctx = (llama_sampler_top_k *) smpl->ctx;
1314
1315
0
    struct ggml_tensor * top_k = ggml_top_k(ctx, data->logits, sctx->k);
1316
0
    ggml_set_name(top_k, "top_k");
1317
1318
0
    if (data->candidates) {
1319
0
        struct ggml_tensor * candidates_rows = ggml_reshape_2d(ctx, data->candidates, 1, data->candidates->ne[0]);
1320
0
        data->candidates = ggml_get_rows(ctx, candidates_rows, top_k);
1321
0
        data->candidates = ggml_reshape_1d(ctx, data->candidates, sctx->k);
1322
0
        ggml_set_name(data->candidates, "top_k_candidates");
1323
0
    } else {
1324
0
        data->candidates = top_k;
1325
0
    }
1326
1327
0
    struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, data->logits, 1, data->logits->ne[0]);
1328
0
    struct ggml_tensor * top_k_rows = ggml_get_rows(ctx, logits_rows, top_k);
1329
0
    data->logits = ggml_reshape_1d(ctx, top_k_rows, sctx->k);
1330
0
    ggml_set_name(top_k_rows, "top_k_rows");
1331
1332
0
    GGML_UNUSED(gf);
1333
0
}
1334
1335
static struct llama_sampler_i llama_sampler_top_k_i = {
1336
    /* .name              = */ llama_sampler_top_k_name,
1337
    /* .accept            = */ nullptr,
1338
    /* .apply             = */ llama_sampler_top_k_apply,
1339
    /* .reset             = */ nullptr,
1340
    /* .clone             = */ llama_sampler_top_k_clone,
1341
    /* .free              = */ llama_sampler_top_k_free,
1342
    /* .backend_init      = */ llama_sampler_top_k_backend_init,
1343
    /* .backend_accept    = */ nullptr,
1344
    /* .backend_apply     = */ llama_sampler_top_k_backend_apply,
1345
    /* .backend_set_input = */ nullptr,
1346
};
1347
1348
0
struct llama_sampler * llama_sampler_init_top_k(int32_t k) {
1349
0
    const bool is_empty = (k <= 0);
1350
1351
0
    if (is_empty) {
1352
0
        return llama_sampler_init_empty("?top-k");
1353
0
    }
1354
1355
0
    return llama_sampler_init(
1356
0
        /* .iface = */ &llama_sampler_top_k_i,
1357
0
        /* .ctx   = */ new llama_sampler_top_k {
1358
0
            ("top-k"),
1359
0
            /* .k = */ k,
1360
0
        }
1361
0
    );
1362
0
}
1363
1364
// top-p
1365
1366
struct llama_sampler_top_p : public llama_sampler_backend {
1367
    const float  p;
1368
    const size_t min_keep;
1369
1370
    std::vector<llama_token_data> buf_sort;
1371
};
1372
1373
0
static const char * llama_sampler_top_p_name(const struct llama_sampler * smpl) {
1374
0
    auto * sctx = (llama_sampler_top_p *) smpl->ctx;
1375
0
    return sctx->get_name();
1376
0
}
1377
1378
0
static void llama_sampler_top_p_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1379
0
    auto * ctx = (llama_sampler_top_p *) smpl->ctx;
1380
1381
0
    if (ctx->p >= 1.0f) {
1382
0
        return;
1383
0
    }
1384
1385
0
    llama_sampler_softmax_impl(cur_p, false);
1386
1387
0
    size_t k = cur_p->size;
1388
0
    auto * pdata = cur_p->data;
1389
1390
0
    auto & buf_sort = ctx->buf_sort;
1391
1392
    // if not sorted, try adaptive top-k sorting
1393
0
    if (!cur_p->sorted && cur_p->size > 1024) {
1394
0
        k = std::min<size_t>(256, cur_p->size);
1395
0
        llama_token_data_array_partial_sort(*cur_p, k, buf_sort);
1396
0
        pdata = buf_sort.data();
1397
0
    } else if (!cur_p->sorted) {
1398
        // small candidates -> sort inplace
1399
0
        llama_token_data_array_partial_sort_inplace(cur_p, k);
1400
0
    }
1401
1402
    // Compute the cumulative probabilities
1403
0
    float cum_sum = 0.0f;
1404
0
    size_t last_idx = cur_p->size;
1405
1406
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1407
0
        cum_sum += pdata[i].p;
1408
1409
        // Check if the running sum is at least p or if we have kept at least min_keep tokens
1410
        // we set the last index to i+1 to indicate that the current iterate should be included in the set
1411
0
        if (cum_sum >= ctx->p && i + 1 >= ctx->min_keep) {
1412
0
            last_idx = i + 1;
1413
0
            break;
1414
0
        }
1415
1416
        // we exceeded the current top-k heuristic -> increase k and continue
1417
0
        if (!cur_p->sorted && i == k - 1) {
1418
0
            k = cur_p->size;
1419
0
            llama_token_data_array_partial_sort(*cur_p, k, buf_sort);
1420
0
            pdata = buf_sort.data();
1421
0
        }
1422
0
    }
1423
1424
    // Resize the output vector to keep only the top-p tokens
1425
0
    if (!cur_p->sorted) {
1426
0
        std::copy(buf_sort.data(), buf_sort.data() + last_idx, cur_p->data);
1427
0
        cur_p->sorted = true;
1428
0
    }
1429
1430
0
    cur_p->size = last_idx;
1431
0
}
1432
1433
0
static struct llama_sampler * llama_sampler_top_p_clone(const struct llama_sampler * smpl) {
1434
0
    const auto * ctx = (const llama_sampler_top_p *) smpl->ctx;
1435
0
    return llama_sampler_init_top_p(ctx->p, ctx->min_keep);
1436
0
}
1437
1438
0
static void llama_sampler_top_p_free(struct llama_sampler * smpl) {
1439
0
    delete (llama_sampler_top_p *) smpl->ctx;
1440
0
}
1441
1442
static bool llama_sampler_top_p_backend_init(
1443
        struct llama_sampler       * smpl,
1444
0
        ggml_backend_buffer_type_t   buft) {
1445
0
    auto * sctx = (llama_sampler_top_p *) smpl->ctx;
1446
1447
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1448
1449
0
    sctx->init(res);
1450
1451
0
    return res;
1452
0
}
1453
1454
static void llama_sampler_top_p_backend_apply(
1455
        struct llama_sampler      * smpl,
1456
        struct ggml_context       * ctx,
1457
        struct ggml_cgraph        * gf,
1458
0
        struct llama_sampler_data * data) {
1459
0
    auto * sctx = (llama_sampler_top_p *) smpl->ctx;
1460
1461
0
    auto ggml_sort = [ctx](struct ggml_tensor * a, struct ggml_tensor * b) {
1462
0
        GGML_ASSERT(ggml_nrows(a) == 1);
1463
0
        struct ggml_tensor * a_reshaped = ggml_reshape_2d(ctx, a, 1, a->ne[0]);
1464
0
        struct ggml_tensor * a_sorted   = ggml_get_rows(ctx, a_reshaped, b);
1465
0
        return ggml_reshape_1d(ctx, a_sorted, a->ne[0]);
1466
0
    };
1467
1468
    // Get the sorted logits in descending order.
1469
0
    struct ggml_tensor * sorted_idx = ggml_argsort(ctx, data->logits, GGML_SORT_ORDER_DESC);
1470
0
    ggml_set_name(sorted_idx, "top_p_sorted_idx");
1471
1472
    // Do the sorting via reshape + get_rows
1473
0
    struct ggml_tensor * sorted_logits = ggml_sort(data->logits, sorted_idx);
1474
0
    ggml_set_name(sorted_logits, "top_p_sorted_logits");
1475
1476
0
    struct ggml_tensor * softmax = ggml_soft_max(ctx, sorted_logits);
1477
0
    ggml_set_name(softmax, "top_p_softmax");
1478
1479
    // If candidates are provided, sort them as well. Otherwise, set sorted indices as candidates.
1480
0
    if (data->candidates) {
1481
0
        data->candidates = ggml_sort(data->candidates, sorted_idx);
1482
0
    } else {
1483
0
        data->candidates = sorted_idx;
1484
0
    }
1485
0
    ggml_set_name(data->candidates, "top_p_candidates");
1486
1487
    // Compute Cumulative Distribution Function (CDF) by means of GGML_OP_CUMSUM.
1488
0
    struct ggml_tensor * cdf = ggml_cumsum(ctx, softmax);
1489
0
    ggml_set_name(cdf, "top_p_cdf");
1490
1491
    // Invert CDF and add top-p value so that ggml_step yields 1 for values we want to keep
1492
0
    struct ggml_tensor * cdf_scaled = ggml_scale_bias(ctx, cdf, -1.0f, sctx->p);
1493
0
    ggml_set_name(cdf_scaled, "top_p_cdf_scaled");
1494
1495
0
    struct ggml_tensor * mask = ggml_step(ctx, cdf_scaled);
1496
0
    ggml_set_name(mask, "top_p_mask");
1497
1498
    // Taking the sum of the mask gives us the sum of elements after the threshold
1499
    // we are interested in.
1500
0
    struct ggml_tensor * idxf = ggml_sum(ctx, mask);
1501
0
    ggml_set_name(idxf, "top_p_index_f32");
1502
1503
    // prevent out-of-bounds access
1504
0
    idxf = ggml_clamp(ctx, idxf, 0.0f, mask->ne[0] - 1);
1505
1506
    // construct ones tensor to set the value in the mask
1507
0
    struct ggml_tensor * ones = ggml_scale_bias(ctx, idxf, 0.0f, 1.0f);
1508
0
    ggml_set_name(ones, "top_p_ones");
1509
1510
    // Make top-p inclusive (i.e. return all values such that cum_sum/cdf >= p)
1511
0
    struct ggml_tensor * mask_reshaped = ggml_reshape_2d(ctx, mask, 1, mask->ne[0]);
1512
1513
0
    mask_reshaped = ggml_set_rows(ctx, mask_reshaped, ones, ggml_cast(ctx, idxf, GGML_TYPE_I32));
1514
0
    mask = ggml_reshape_1d(ctx, mask_reshaped, mask->ne[0]);
1515
1516
    // Apply -INFINITY bias for masked-out tokens
1517
    // log(1) = 0 (keep), log(0) = -INF (discard)
1518
0
    struct ggml_tensor * top_p_bias = ggml_log(ctx, mask);
1519
0
    ggml_set_name(top_p_bias, "top_p_bias");
1520
1521
0
    data->logits = ggml_add(ctx, sorted_logits, top_p_bias);
1522
0
    ggml_set_name(data->logits, "top_p_logits");
1523
1524
0
    GGML_UNUSED(gf);
1525
0
}
1526
1527
static struct llama_sampler_i llama_sampler_top_p_i = {
1528
    /* .name              = */ llama_sampler_top_p_name,
1529
    /* .accept            = */ nullptr,
1530
    /* .apply             = */ llama_sampler_top_p_apply,
1531
    /* .reset             = */ nullptr,
1532
    /* .clone             = */ llama_sampler_top_p_clone,
1533
    /* .free              = */ llama_sampler_top_p_free,
1534
    /* .backend_init      = */ llama_sampler_top_p_backend_init,
1535
    /* .backend_accept    = */ nullptr,
1536
    /* .backend_apply     = */ llama_sampler_top_p_backend_apply,
1537
    /* .backend_set_input = */ nullptr,
1538
};
1539
1540
0
struct llama_sampler * llama_sampler_init_top_p(float p, size_t min_keep) {
1541
0
    const bool is_empty = p >= 1.0f;
1542
1543
0
    if (is_empty) {
1544
0
        return llama_sampler_init_empty("?top-p");
1545
0
    }
1546
1547
0
    return llama_sampler_init(
1548
0
        /* .iface = */ &llama_sampler_top_p_i,
1549
0
        /* .ctx   = */ new llama_sampler_top_p {
1550
0
            ("top-p"),
1551
0
            /* .p        = */ p,
1552
0
            /* .min_keep = */ min_keep,
1553
0
            /* .buf_sort = */ {},
1554
0
        }
1555
0
    );
1556
0
}
1557
1558
// min-p
1559
1560
struct llama_sampler_min_p : public llama_sampler_backend {
1561
    const float  p;
1562
    const size_t min_keep;
1563
};
1564
1565
0
static const char * llama_sampler_min_p_name(const struct llama_sampler * smpl) {
1566
0
    auto * sctx = (llama_sampler_min_p *) smpl->ctx;
1567
0
    return sctx->get_name();
1568
0
}
1569
1570
0
static void llama_sampler_min_p_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1571
0
    auto * ctx = (llama_sampler_min_p *) smpl->ctx;
1572
1573
0
    if (ctx->p <= 0.0f || !cur_p->size) {
1574
0
        return;
1575
0
    }
1576
1577
0
    bool min_p_applied = false;
1578
1579
    // if the cur_p aren't sorted, try the unsorted implementation first
1580
0
    if (!cur_p->sorted) {
1581
0
        std::vector<llama_token_data> filtered_tokens;
1582
1583
0
        float max_logit = -FLT_MAX;
1584
0
        for (size_t i = 0; i < cur_p->size; ++i) {
1585
0
            max_logit = std::max(max_logit, cur_p->data[i].logit);
1586
0
        }
1587
0
        const float min_logit = max_logit + logf(ctx->p); // min logit for p_i >= p * p_max
1588
1589
0
        for (size_t i = 0; i < cur_p->size; ++i) {
1590
0
            if (cur_p->data[i].logit >= min_logit) {
1591
0
                filtered_tokens.push_back(cur_p->data[i]);
1592
0
            }
1593
0
        }
1594
1595
        // if we have enough values the operation was a success
1596
0
        if (!filtered_tokens.empty() && filtered_tokens.size() >= ctx->min_keep) {
1597
0
            std::copy(filtered_tokens.begin(), filtered_tokens.end(), cur_p->data);
1598
0
            cur_p->size = filtered_tokens.size();
1599
0
            min_p_applied = true;
1600
0
        }
1601
0
    }
1602
1603
    // if the cur_p are sorted or the unsorted implementation failed, use this implementation
1604
0
    if (!min_p_applied) {
1605
        // Sort the logits in descending order
1606
0
        if (!cur_p->sorted) {
1607
0
            llama_token_data_array_partial_sort_inplace(cur_p, cur_p->size);
1608
0
        }
1609
1610
0
        const float min_logit = cur_p->data[0].logit + logf(ctx->p); // min logit for p_i >= p * p_max
1611
0
        size_t i = 1; // first token always matches
1612
1613
0
        for (; i < cur_p->size; ++i) {
1614
0
            if (cur_p->data[i].logit < min_logit && i >= ctx->min_keep) {
1615
0
                break; // prob too small
1616
0
            }
1617
0
        }
1618
1619
        // Resize the output vector to keep only the matching tokens
1620
0
        cur_p->size = i;
1621
0
    }
1622
0
}
1623
1624
0
static struct llama_sampler * llama_sampler_min_p_clone(const struct llama_sampler * smpl) {
1625
0
    const auto * ctx = (const llama_sampler_min_p *) smpl->ctx;
1626
0
    return llama_sampler_init_min_p(ctx->p, ctx->min_keep);
1627
0
}
1628
1629
0
static void llama_sampler_min_p_free(struct llama_sampler * smpl) {
1630
0
    delete (llama_sampler_min_p *) smpl->ctx;
1631
0
}
1632
1633
static bool llama_sampler_min_p_backend_init(
1634
        struct llama_sampler       * smpl,
1635
0
        ggml_backend_buffer_type_t   buft) {
1636
0
    auto * sctx = (llama_sampler_min_p *) smpl->ctx;
1637
1638
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1639
1640
0
    sctx->init(res);
1641
1642
0
    return res;
1643
0
}
1644
1645
static void llama_sampler_min_p_backend_apply(
1646
        struct llama_sampler      * smpl,
1647
        struct ggml_context       * ctx,
1648
        struct ggml_cgraph        * gf,
1649
0
        struct llama_sampler_data * data) {
1650
0
    auto * sctx = (llama_sampler_min_p *) smpl->ctx;
1651
1652
0
    struct ggml_tensor * max_idx = ggml_argmax(ctx, data->logits);
1653
0
    ggml_set_name(max_idx, "max_idx");
1654
1655
0
    struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, data->logits, 1, data->logits->ne[0]);
1656
0
    ggml_set_name(logits_rows, "logits_rows");
1657
1658
0
    struct ggml_tensor * max_logit = ggml_get_rows(ctx, logits_rows, max_idx);
1659
0
    ggml_set_name(max_logit, "max_logit");
1660
1661
    // Calculate the threshold value.
1662
0
    struct ggml_tensor * threshold = ggml_scale_bias(ctx, max_logit, 1.0f, logf(sctx->p));
1663
0
    ggml_set_name(threshold, "min_p_threshold");
1664
1665
    // Subtract the threshold from logits.
1666
0
    struct ggml_tensor * sub = ggml_sub(ctx, data->logits, threshold);
1667
1668
    // Create a mask where logits below the threshold are 0 (discard),
1669
    // and others are 1 (keep).
1670
0
    struct ggml_tensor * mask = ggml_step(ctx, sub);
1671
0
    ggml_set_name(mask, "min_p_mask");
1672
1673
    // Apply -INFINITY bias for masked-out tokens
1674
    // log(1) = 0 (keep), log(0) = -INF (discard)
1675
0
    struct ggml_tensor * min_p_bias = ggml_log(ctx, mask);
1676
0
    ggml_set_name(min_p_bias, "min_p_bias");
1677
1678
0
    data->logits = ggml_add(ctx, data->logits, min_p_bias);
1679
0
    ggml_set_name(data->logits, "min_p_logits");
1680
1681
0
    GGML_UNUSED(gf);
1682
0
}
1683
1684
static struct llama_sampler_i llama_sampler_min_p_i = {
1685
    /* .name              = */ llama_sampler_min_p_name,
1686
    /* .accept            = */ nullptr,
1687
    /* .apply             = */ llama_sampler_min_p_apply,
1688
    /* .reset             = */ nullptr,
1689
    /* .clone             = */ llama_sampler_min_p_clone,
1690
    /* .free              = */ llama_sampler_min_p_free,
1691
    /* .backend_init      = */ llama_sampler_min_p_backend_init,
1692
    /* .backend_accept    = */ nullptr,
1693
    /* .backend_apply     = */ llama_sampler_min_p_backend_apply,
1694
    /* .backend_set_input = */ nullptr,
1695
};
1696
1697
0
struct llama_sampler * llama_sampler_init_min_p(float p, size_t min_keep) {
1698
0
    const bool is_empty = (p <= 0.0f);
1699
1700
0
    if (is_empty) {
1701
0
        return llama_sampler_init_empty("?min-p");
1702
0
    }
1703
1704
0
    return llama_sampler_init(
1705
0
        /* .iface = */ &llama_sampler_min_p_i,
1706
0
        /* .ctx   = */ new llama_sampler_min_p {
1707
0
            ("min-p"),
1708
0
            /* .p        = */ p,
1709
0
            /* .min_keep = */ min_keep,
1710
0
        }
1711
0
    );
1712
0
}
1713
1714
// typical
1715
1716
struct llama_sampler_typical {
1717
    const float  p;
1718
    const size_t min_keep;
1719
};
1720
1721
0
static const char * llama_sampler_typical_name(const struct llama_sampler * /*smpl*/) {
1722
0
    return "typical";
1723
0
}
1724
1725
0
static void llama_sampler_typical_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1726
0
    auto * ctx = (llama_sampler_typical *) smpl->ctx;
1727
1728
    // Reference implementation:
1729
    // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
1730
0
    if (ctx->p >= 1.0f) {
1731
0
        return;
1732
0
    }
1733
1734
    // Compute the softmax of logits and calculate entropy
1735
0
    llama_sampler_softmax_impl(cur_p, true);
1736
1737
0
    float entropy = 0.0f;
1738
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1739
0
        entropy += -cur_p->data[i].p * logf(cur_p->data[i].p);
1740
0
    }
1741
1742
    // Compute the absolute difference between negative log probability and entropy for each candidate
1743
0
    std::vector<float> shifted_scores;
1744
0
    for (size_t i = 0; i < cur_p->size; ++i) {
1745
0
        float shifted_score = fabsf(-logf(cur_p->data[i].p) - entropy);
1746
0
        shifted_scores.push_back(shifted_score);
1747
0
    }
1748
1749
    // Sort tokens based on the shifted_scores and their corresponding indices
1750
0
    std::vector<size_t> indices(cur_p->size);
1751
0
    std::iota(indices.begin(), indices.end(), 0);
1752
1753
0
    std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) {
1754
0
        return shifted_scores[a] < shifted_scores[b];
1755
0
    });
1756
1757
    // Compute the cumulative probabilities
1758
0
    float cum_sum = 0.0f;
1759
0
    size_t last_idx = indices.size();
1760
1761
0
    for (size_t i = 0; i < indices.size(); ++i) {
1762
0
        size_t idx = indices[i];
1763
0
        cum_sum += cur_p->data[idx].p;
1764
1765
        // Check if the running sum is greater than typical or if we have kept at least min_keep tokens
1766
0
        if (cum_sum > ctx->p && (ctx->min_keep == 0 || i >= ctx->min_keep - 1)) {
1767
0
            last_idx = i + 1;
1768
0
            break;
1769
0
        }
1770
0
    }
1771
1772
    // Resize the output vector to keep only the locally typical tokens
1773
0
    std::vector<llama_token_data> cur_p_new;
1774
0
    for (size_t i = 0; i < last_idx; ++i) {
1775
0
        size_t idx = indices[i];
1776
0
        cur_p_new.push_back(cur_p->data[idx]);
1777
0
    }
1778
1779
    // Replace the data in cur_p with the cur_p_new data
1780
0
    std::copy(cur_p_new.begin(), cur_p_new.end(), cur_p->data);
1781
0
    cur_p->size = cur_p_new.size();
1782
0
    cur_p->sorted = false;
1783
0
}
1784
1785
0
static struct llama_sampler * llama_sampler_typical_clone(const struct llama_sampler * smpl) {
1786
0
    const auto * ctx = (const llama_sampler_typical *) smpl->ctx;
1787
0
    return llama_sampler_init_typical(ctx->p, ctx->min_keep);
1788
0
}
1789
1790
0
static void llama_sampler_typical_free(struct llama_sampler * smpl) {
1791
0
    delete (llama_sampler_typical *) smpl->ctx;
1792
0
}
1793
1794
static struct llama_sampler_i llama_sampler_typical_i = {
1795
    /* .name              = */ llama_sampler_typical_name,
1796
    /* .accept            = */ nullptr,
1797
    /* .apply             = */ llama_sampler_typical_apply,
1798
    /* .reset             = */ nullptr,
1799
    /* .clone             = */ llama_sampler_typical_clone,
1800
    /* .free              = */ llama_sampler_typical_free,
1801
    /* .backend_init      = */ nullptr,
1802
    /* .backend_accept    = */ nullptr,
1803
    /* .backend_apply     = */ nullptr,
1804
    /* .backend_set_input = */ nullptr,
1805
};
1806
1807
0
struct llama_sampler * llama_sampler_init_typical(float p, size_t min_keep) {
1808
0
    const bool is_empty = (p >= 1.0f);
1809
1810
0
    if (is_empty) {
1811
0
        return llama_sampler_init_empty("?typical");
1812
0
    }
1813
1814
0
    return llama_sampler_init(
1815
0
        /* .iface = */ &llama_sampler_typical_i,
1816
0
        /* .ctx   = */ new llama_sampler_typical {
1817
0
            /* .p        = */ p,
1818
0
            /* .min_keep = */ min_keep,
1819
0
        }
1820
0
    );
1821
0
}
1822
1823
// temp
1824
1825
struct llama_sampler_temp : public llama_sampler_backend {
1826
    const float temp;
1827
};
1828
1829
0
static const char * llama_sampler_temp_name(const struct llama_sampler * smpl) {
1830
0
    auto * sctx = (llama_sampler_temp *) smpl->ctx;
1831
0
    return sctx->get_name();
1832
0
}
1833
1834
0
static void llama_sampler_temp_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1835
0
    const auto * ctx = (llama_sampler_temp *) smpl->ctx;
1836
1837
0
    llama_sampler_temp_impl(cur_p, ctx->temp);
1838
0
}
1839
1840
0
static struct llama_sampler * llama_sampler_temp_clone(const struct llama_sampler * smpl) {
1841
0
    const auto * ctx = (const llama_sampler_temp *) smpl->ctx;
1842
0
    return llama_sampler_init_temp(ctx->temp);
1843
0
}
1844
1845
0
static void llama_sampler_temp_free(struct llama_sampler * smpl) {
1846
0
    delete (llama_sampler_temp *) smpl->ctx;
1847
0
}
1848
1849
static void llama_sampler_backend_temp_sampling(
1850
        struct ggml_context       * ctx,
1851
        struct ggml_cgraph        * gf,
1852
        struct llama_sampler_data * data,
1853
0
        float                       temp) {
1854
0
    if (temp <= 0.0f) {
1855
        // Find the most probable token index.
1856
0
        struct ggml_tensor * max_idx = ggml_argmax(ctx, data->logits);
1857
0
        ggml_set_name(max_idx, "temp_max_idx");
1858
1859
0
        if (data->candidates) {
1860
0
            struct ggml_tensor * candidates_rows = ggml_reshape_2d(ctx, data->candidates, 1, data->candidates->ne[0]);
1861
0
            data->candidates = ggml_get_rows(ctx, candidates_rows, max_idx);
1862
0
        } else {
1863
0
            data->candidates = max_idx;
1864
0
        }
1865
1866
0
        struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, data->logits, 1, data->logits->ne[0]);
1867
0
        data->logits = ggml_get_rows(ctx, logits_rows, max_idx);
1868
1869
0
        return;
1870
0
    }
1871
1872
0
    data->logits = ggml_scale(ctx, data->logits, 1.0f / temp);
1873
1874
0
    GGML_UNUSED(gf);
1875
0
}
1876
1877
static bool llama_sampler_temp_backend_init(
1878
        struct llama_sampler       * smpl,
1879
0
        ggml_backend_buffer_type_t   buft) {
1880
0
    auto * sctx = (llama_sampler_temp *) smpl->ctx;
1881
1882
0
    const bool res = llama_sampler_backend_support(smpl, buft);
1883
1884
0
    sctx->init(res);
1885
1886
0
    return res;
1887
0
}
1888
1889
static void llama_sampler_temp_backend_apply(
1890
        struct llama_sampler      * smpl,
1891
        struct ggml_context       * ctx,
1892
        struct ggml_cgraph        * gf,
1893
0
        struct llama_sampler_data * data) {
1894
0
    auto * sctx = (llama_sampler_temp *) smpl->ctx;
1895
0
    llama_sampler_backend_temp_sampling(ctx, gf, data, sctx->temp);
1896
0
}
1897
1898
static struct llama_sampler_i llama_sampler_temp_i = {
1899
    /* .name              = */ llama_sampler_temp_name,
1900
    /* .accept            = */ nullptr,
1901
    /* .apply             = */ llama_sampler_temp_apply,
1902
    /* .reset             = */ nullptr,
1903
    /* .clone             = */ llama_sampler_temp_clone,
1904
    /* .free              = */ llama_sampler_temp_free,
1905
    /* .backend_init      = */ llama_sampler_temp_backend_init,
1906
    /* .backend_accept    = */ nullptr,
1907
    /* .backend_apply     = */ llama_sampler_temp_backend_apply,
1908
    /* .backend_set_input = */ nullptr,
1909
};
1910
1911
0
struct llama_sampler * llama_sampler_init_temp(float temp) {
1912
0
    const bool is_empty = temp == 1.0f;
1913
1914
0
    if (is_empty) {
1915
0
        return llama_sampler_init_empty("?temp");
1916
0
    }
1917
1918
0
    return llama_sampler_init(
1919
0
        /* .iface = */ &llama_sampler_temp_i,
1920
0
        /* .ctx   = */ new llama_sampler_temp {
1921
0
            ("temp"),
1922
0
            /*.temp = */ temp,
1923
0
        }
1924
0
    );
1925
0
}
1926
1927
// temp-ext
1928
1929
struct llama_sampler_temp_ext : public llama_sampler_backend {
1930
    const float temp;
1931
    const float delta;
1932
    const float exponent;
1933
};
1934
1935
0
static const char * llama_sampler_temp_ext_name(const struct llama_sampler * smpl) {
1936
0
    auto * sctx = (llama_sampler_temp_ext *) smpl->ctx;
1937
0
    return sctx->get_name();
1938
0
}
1939
1940
0
static void llama_sampler_temp_ext_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
1941
0
    auto * ctx = (llama_sampler_temp_ext *) smpl->ctx;
1942
0
    if (ctx->delta > 0) {
1943
0
        const float min_temp = std::max(0.0f, ctx->temp - ctx->delta);
1944
0
        const float max_temp = ctx->temp + ctx->delta;
1945
1946
0
        float exponent_val = ctx->exponent;
1947
1948
        // no need to do anything if there is only one (or zero) candidates
1949
0
        if (cur_p->size <= 1) {
1950
0
            return;
1951
0
        }
1952
1953
        // Calculate maximum possible entropy
1954
0
        float max_entropy = -logf(1.0f / cur_p->size);
1955
1956
0
        llama_sampler_softmax_impl(cur_p, true);
1957
1958
        // Calculate entropy of the softmax probabilities
1959
0
        float entropy = 0.0f;
1960
0
        for (size_t i = 0; i < cur_p->size; ++i) {
1961
0
            float prob = cur_p->data[i].p;
1962
0
            if (prob > 0.0f) { // Ensure no log(0)
1963
0
                entropy -= prob * logf(prob);
1964
0
            }
1965
0
        }
1966
1967
        // Normalize the entropy (max_entropy cannot be 0 here because we checked cur_p->size != 1 above)
1968
0
        float normalized_entropy = entropy / max_entropy;
1969
1970
        // Map the normalized entropy to the desired temperature range using the power function
1971
0
        float dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent_val);
1972
1973
    #ifdef DEBUG
1974
        LLAMA_LOG_INFO("Your text maxtemp value is: %f\n", max_temp);
1975
        LLAMA_LOG_INFO("Entropy: %f\n", entropy);
1976
        LLAMA_LOG_INFO("Max Possible Entropy: %f\n", max_entropy);
1977
        LLAMA_LOG_INFO("Normalized Entropy: %f\n", normalized_entropy);
1978
        LLAMA_LOG_INFO("Exponent: %f\n", exponent_val);
1979
        LLAMA_LOG_INFO("Dynamic Temperature (dyn_temp): %f\n", dyn_temp);
1980
    #endif
1981
1982
        // Apply the dynamically calculated temperature scaling
1983
0
        llama_sampler_temp_impl(cur_p, dyn_temp);
1984
1985
        // Re-compute softmax probabilities after scaling logits with dynamic temperature
1986
0
        const double max_l_double = cur_p->data[0].logit;
1987
1988
0
        double cum_sum_double = 0.0;
1989
0
        for (size_t i = 0; i < cur_p->size; ++i) {
1990
0
            double p = exp(cur_p->data[i].logit - max_l_double);
1991
0
            cur_p->data[i].p = p; // Store the scaled probability
1992
0
            cum_sum_double += p;
1993
0
        }
1994
1995
0
        for (size_t i = 0; i < cur_p->size; ++i) {
1996
0
            cur_p->data[i].p /= cum_sum_double; // Re-normalize the probabilities
1997
0
        }
1998
1999
    #ifdef DEBUG
2000
        // Print the updated top 25 probabilities after temperature scaling
2001
        LLAMA_LOG_INFO("\nUpdated Top 25 Probabilities After Dynamic Temperature Scaling (in percentages):\n");
2002
        for (size_t i = 0; i < 25 && i < cur_p->size; ++i) {
2003
            LLAMA_LOG_INFO("Token %zu: %f%%\n", i + 1, cur_p->data[i].p * 100.0f);
2004
        }
2005
    #endif
2006
0
    } else {
2007
0
        llama_sampler_temp_impl(cur_p, ctx->temp);
2008
0
    }
2009
0
}
2010
2011
0
static struct llama_sampler * llama_sampler_temp_ext_clone(const struct llama_sampler * smpl) {
2012
0
    const auto * ctx = (const llama_sampler_temp_ext *) smpl->ctx;
2013
0
    return llama_sampler_init_temp_ext(ctx->temp, ctx->delta, ctx->exponent);
2014
0
}
2015
2016
0
static void llama_sampler_temp_ext_free(struct llama_sampler * smpl) {
2017
0
    delete (llama_sampler_temp_ext *) smpl->ctx;
2018
0
}
2019
2020
static bool llama_sampler_temp_ext_backend_init(
2021
        struct llama_sampler       * smpl,
2022
0
        ggml_backend_buffer_type_t   buft) {
2023
0
    auto * sctx = (llama_sampler_temp_ext *) smpl->ctx;
2024
2025
0
    const bool res = llama_sampler_backend_support(smpl, buft);
2026
2027
0
    sctx->init(res);
2028
2029
0
    return res;
2030
0
}
2031
2032
static void llama_sampler_temp_ext_backend_apply(
2033
        struct llama_sampler      * smpl,
2034
        struct ggml_context       * ctx,
2035
        struct ggml_cgraph        * gf,
2036
0
        struct llama_sampler_data * data) {
2037
0
    auto * sctx = (llama_sampler_temp_ext *) smpl->ctx;
2038
2039
    // Revert to standard temperature scaling if delta or temp are non-positive.
2040
0
    if (sctx->delta <= 0.0f || sctx->temp <= 0.0f) {
2041
0
        llama_sampler_backend_temp_sampling(ctx, gf, data, sctx->temp);
2042
0
        return;
2043
0
    }
2044
2045
    // Calculate min_temp, max_temp, and max_entropy.
2046
0
    const float min_temp    = std::max(0.0f, sctx->temp - sctx->delta);
2047
0
    const float max_temp    = sctx->temp + sctx->delta;
2048
0
    const float max_entropy = logf(data->logits->ne[0]);
2049
2050
    // Calculate the probabilities.
2051
0
    struct ggml_tensor * probs = ggml_soft_max(ctx, data->logits);
2052
0
    ggml_set_name(probs, "temp_ext_softmax_probs");
2053
2054
    // Clamp probabilities to avoid log(0) which would give -inf
2055
0
    struct ggml_tensor * probs_clamped = ggml_clamp(ctx, probs, 1e-10f, 1.0f);
2056
0
    ggml_set_name(probs_clamped, "temp_ext_probs_clamped");
2057
2058
    // Calculate the entropy, entropy = -Σ(p * log(p)).
2059
0
    struct ggml_tensor * log_probs   = ggml_log(ctx, probs_clamped);
2060
0
    struct ggml_tensor * p_log_p     = ggml_mul(ctx, probs_clamped, log_probs);
2061
0
    struct ggml_tensor * sum_p_log_p = ggml_sum(ctx, p_log_p);
2062
0
    struct ggml_tensor * entropy     = ggml_scale(ctx, sum_p_log_p, -1.0f);
2063
0
    ggml_set_name(log_probs,   "temp_ext_log_probs");
2064
0
    ggml_set_name(p_log_p,     "temp_ext_p_log_p");
2065
0
    ggml_set_name(sum_p_log_p, "temp_ext_sum_p_log_p");
2066
0
    ggml_set_name(entropy,     "temp_ext_entropy");
2067
2068
    // Normalize the entropy, norm_entropy = entropy / max_entropy
2069
0
    struct ggml_tensor * norm_entropy = ggml_scale(ctx, entropy, 1.0f / max_entropy);
2070
0
    ggml_set_name(norm_entropy, "temp_ext_norm_entropy");
2071
2072
    // Calculate the dynamic temperature:
2073
    // dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent);
2074
    //
2075
    // Calculate powf(normalized_entropy, exponent) as
2076
    // norm_entropy^exponent = exp(exponent * log(norm_entropy))
2077
0
    struct ggml_tensor * log_norm_entropy = ggml_log(ctx, norm_entropy);
2078
0
    struct ggml_tensor * scaled_log       = ggml_scale(ctx, log_norm_entropy, sctx->exponent);
2079
0
    struct ggml_tensor * pow_entropy      = ggml_exp(ctx, scaled_log);
2080
    // With pow_entropy computed we can now compute dyn_temp, scaling by
2081
    // (max_temp - min_temp) and then adding min_temp.
2082
0
    struct ggml_tensor * dyn_temp         = ggml_scale_bias(ctx, pow_entropy, max_temp - min_temp, min_temp);
2083
0
    ggml_set_name(log_norm_entropy, "temp_ext_log_norm_entropy");
2084
0
    ggml_set_name(scaled_log,       "temp_ext_scaled_log");
2085
0
    ggml_set_name(pow_entropy,      "temp_ext_pow_entropy");
2086
0
    ggml_set_name(dyn_temp,         "temp_ext_dyn_temp");
2087
2088
    // Scale the logits by the dynamic temperature
2089
0
    struct ggml_tensor * scaled_logits = ggml_div(ctx, data->logits, dyn_temp);
2090
0
    ggml_set_name(scaled_logits, "temp_ext_scaled_logits");
2091
2092
0
    data->logits = scaled_logits;
2093
0
}
2094
2095
static struct llama_sampler_i llama_sampler_temp_ext_i = {
2096
    /* .name              = */ llama_sampler_temp_ext_name,
2097
    /* .accept            = */ nullptr,
2098
    /* .apply             = */ llama_sampler_temp_ext_apply,
2099
    /* .reset             = */ nullptr,
2100
    /* .clone             = */ llama_sampler_temp_ext_clone,
2101
    /* .free              = */ llama_sampler_temp_ext_free,
2102
    /* .backend_init      = */ llama_sampler_temp_ext_backend_init,
2103
    /* .backend_accept    = */ nullptr,
2104
    /* .backend_apply     = */ llama_sampler_temp_ext_backend_apply,
2105
    /* .backend_set_input = */ nullptr,
2106
};
2107
2108
0
struct llama_sampler * llama_sampler_init_temp_ext(float temp, float delta, float exponent) {
2109
0
    const bool is_empty = temp == 1.0f && delta <= 0.0f;
2110
2111
0
    if (is_empty) {
2112
0
        return llama_sampler_init_empty("?temp-ext");
2113
0
    }
2114
2115
0
    auto * res = llama_sampler_init(
2116
0
        /* .iface = */ &llama_sampler_temp_ext_i,
2117
0
        /* .ctx   = */ new llama_sampler_temp_ext {
2118
0
            ("temp-ext"),
2119
0
            /* .temp     = */ temp,
2120
0
            /* .delta    = */ delta,
2121
0
            /* .exponent = */ exponent,
2122
0
        }
2123
0
    );
2124
2125
0
    return res;
2126
0
}
2127
2128
// xtc
2129
2130
struct llama_sampler_xtc {
2131
    const float    probability;
2132
    const float    threshold;
2133
    const size_t   min_keep;
2134
2135
    const uint32_t seed;
2136
    uint32_t       seed_cur;
2137
2138
    std::mt19937   rng;
2139
};
2140
2141
0
static const char * llama_sampler_xtc_name(const struct llama_sampler * /*smpl*/) {
2142
0
    return "xtc";
2143
0
}
2144
2145
0
static void llama_sample_xtc_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2146
0
    auto * ctx = (llama_sampler_xtc *) smpl->ctx;
2147
2148
0
    if (ctx->probability <= 0.0f
2149
0
        || ctx->threshold > 0.5f
2150
0
        || cur_p->size < 2) {
2151
0
        return;
2152
0
    }
2153
2154
0
    std::uniform_real_distribution<float> distribution(0.0f, 1.0f);
2155
0
    float chance = distribution(ctx->rng);
2156
0
    if (chance > ctx->probability) {
2157
0
        return;
2158
0
    }
2159
2160
0
    llama_sampler_softmax_impl(cur_p, true);
2161
2162
0
    int pos_last = 0;
2163
2164
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2165
0
        if (cur_p->data[i].p >= ctx->threshold) {
2166
0
            pos_last = i;
2167
0
        } else {
2168
0
            break;
2169
0
        }
2170
0
    }
2171
2172
0
    if (cur_p->size - pos_last >= ctx->min_keep && pos_last > 0) {
2173
0
        cur_p->data += pos_last;
2174
0
        cur_p->size -= pos_last;
2175
0
    }
2176
0
}
2177
2178
0
static struct llama_sampler * llama_sampler_xtc_clone(const struct llama_sampler * smpl) {
2179
0
    const auto * ctx = (const llama_sampler_xtc *) smpl->ctx;
2180
0
    auto * result = llama_sampler_init_xtc(ctx->probability, ctx->threshold, ctx->min_keep, ctx->seed);
2181
2182
    // copy the state
2183
0
    {
2184
0
        auto * result_ctx = (llama_sampler_xtc *) result->ctx;
2185
2186
0
        result_ctx->rng = ctx->rng;
2187
0
    }
2188
2189
0
    return result;
2190
0
}
2191
2192
0
static void llama_sampler_xtc_free(struct llama_sampler * smpl) {
2193
0
    delete (llama_sampler_xtc *) smpl->ctx;
2194
0
}
2195
2196
0
static void llama_sampler_xtc_reset(struct llama_sampler * smpl) {
2197
0
    auto * ctx = (llama_sampler_xtc *) smpl->ctx;
2198
0
    ctx->seed_cur = get_rng_seed(ctx->seed);
2199
0
    ctx->rng.seed(ctx->seed_cur);
2200
0
}
2201
2202
static struct llama_sampler_i llama_sampler_xtc_i = {
2203
    /* .name              = */ llama_sampler_xtc_name,
2204
    /* .accept            = */ nullptr,
2205
    /* .apply             = */ llama_sample_xtc_apply,
2206
    /* .reset             = */ llama_sampler_xtc_reset,
2207
    /* .clone             = */ llama_sampler_xtc_clone,
2208
    /* .free              = */ llama_sampler_xtc_free,
2209
    /* .backend_init      = */ nullptr,
2210
    /* .backend_accept    = */ nullptr,
2211
    /* .backend_apply     = */ nullptr,
2212
    /* .backend_set_input = */ nullptr,
2213
};
2214
2215
0
struct llama_sampler * llama_sampler_init_xtc(float p, float t, size_t min_keep, uint32_t seed) {
2216
0
    const bool is_empty = (p <= 0.0f || t > 0.5f);
2217
2218
0
    if (is_empty) {
2219
0
        return llama_sampler_init_empty("?xtc");
2220
0
    }
2221
2222
0
    const auto seed_cur = get_rng_seed(seed);
2223
2224
0
    return llama_sampler_init(
2225
0
        /* .iface = */ &llama_sampler_xtc_i,
2226
0
        /* .ctx   = */ new llama_sampler_xtc {
2227
0
            /* .probability   = */ p,
2228
0
            /* .threshold     = */ t,
2229
0
            /* .min_keep      = */ min_keep,
2230
0
            /* .seed          = */ seed,
2231
0
            /* .seed_cur      = */ seed_cur,
2232
0
            /* .rng           = */ std::mt19937(seed_cur),
2233
0
        }
2234
0
    );
2235
0
}
2236
2237
// mirostat
2238
2239
struct llama_sampler_mirostat {
2240
    const int32_t n_vocab;
2241
2242
    const uint32_t seed;
2243
          uint32_t seed_cur;
2244
2245
    const float tau;
2246
    const float eta;
2247
2248
    const int32_t m;
2249
2250
    float mu;
2251
2252
    std::mt19937    rng;
2253
};
2254
2255
0
static const char * llama_sampler_mirostat_name(const struct llama_sampler * /*smpl*/) {
2256
0
    return "mirostat";
2257
0
}
2258
2259
0
static void llama_sampler_mirostat_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2260
0
    auto * ctx = (llama_sampler_mirostat *) smpl->ctx;
2261
2262
0
    llama_sampler_softmax_impl(cur_p, true);
2263
2264
    // Estimate s_hat using the most probable m tokens
2265
0
    float s_hat = 0.0;
2266
0
    float sum_ti_bi = 0.0;
2267
0
    float sum_ti_sq = 0.0;
2268
0
    for (size_t i = 0; i < size_t(ctx->m - 1) && i < cur_p->size - 1; ++i) {
2269
0
        float t_i = logf(float(i + 2) / float(i + 1));
2270
0
        float b_i = logf(cur_p->data[i].p / cur_p->data[i + 1].p);
2271
0
        sum_ti_bi += t_i * b_i;
2272
0
        sum_ti_sq += t_i * t_i;
2273
0
    }
2274
0
    s_hat = sum_ti_bi / sum_ti_sq;
2275
2276
    // Compute k from the estimated s_hat and target surprise value
2277
0
    float epsilon_hat = s_hat - 1;
2278
0
    float k = powf((epsilon_hat * powf(2, ctx->mu)) / (1 - powf(ctx->n_vocab, -epsilon_hat)), 1 / s_hat);
2279
2280
0
    llama_sampler_top_k_impl(cur_p, std::max(int(k), 1));
2281
2282
0
    llama_sampler_softmax_impl(cur_p, true);
2283
2284
0
    const int idx = llama_sample_dist(cur_p, ctx->rng);
2285
2286
0
    cur_p->selected = idx;
2287
2288
0
    float observed_surprise = -log2f(cur_p->data[idx].p);
2289
0
    float e = observed_surprise - ctx->tau;
2290
2291
    // Update mu using the learning rate and error
2292
0
    ctx->mu = ctx->mu - ctx->eta * e;
2293
0
}
2294
2295
0
static struct llama_sampler * llama_sampler_mirostat_clone(const struct llama_sampler * smpl) {
2296
0
    const auto * ctx = (const llama_sampler_mirostat *) smpl->ctx;
2297
0
    auto * result = llama_sampler_init_mirostat(ctx->n_vocab, ctx->seed, ctx->tau, ctx->eta, ctx->m);
2298
2299
    // copy the state
2300
0
    {
2301
0
        auto * result_ctx = (llama_sampler_mirostat *) smpl->ctx;
2302
2303
0
        result_ctx->mu  = ctx->mu;
2304
0
        result_ctx->rng = ctx->rng;
2305
0
    }
2306
2307
0
    return result;
2308
0
}
2309
2310
0
static void llama_sampler_mirostat_reset(struct llama_sampler * smpl) {
2311
0
    auto * ctx = (llama_sampler_mirostat *) smpl->ctx;
2312
0
    ctx->mu = 2.0f*ctx->tau;
2313
0
    ctx->seed_cur = get_rng_seed(ctx->seed);
2314
0
    ctx->rng.seed(ctx->seed_cur);
2315
0
}
2316
2317
0
static void llama_sampler_mirostat_free(struct llama_sampler * smpl) {
2318
0
    delete (llama_sampler_mirostat *) smpl->ctx;
2319
0
}
2320
2321
static struct llama_sampler_i llama_sampler_mirostat_i = {
2322
    /* .name              = */ llama_sampler_mirostat_name,
2323
    /* .accept            = */ nullptr,
2324
    /* .apply             = */ llama_sampler_mirostat_apply,
2325
    /* .reset             = */ llama_sampler_mirostat_reset,
2326
    /* .clone             = */ llama_sampler_mirostat_clone,
2327
    /* .free              = */ llama_sampler_mirostat_free,
2328
    /* .backend_init      = */ nullptr,
2329
    /* .backend_accept    = */ nullptr,
2330
    /* .backend_apply     = */ nullptr,
2331
    /* .backend_set_input = */ nullptr,
2332
};
2333
2334
0
struct llama_sampler * llama_sampler_init_mirostat(int32_t n_vocab, uint32_t seed, float tau, float eta, int32_t m) {
2335
0
    const auto seed_cur = get_rng_seed(seed);
2336
2337
0
    return llama_sampler_init(
2338
0
        /* .iface = */ &llama_sampler_mirostat_i,
2339
0
        /* .ctx   = */ new llama_sampler_mirostat {
2340
0
            /* .n_vocab  = */ n_vocab,
2341
0
            /* .seed     = */ seed,
2342
0
            /* .seed_cur = */ seed_cur,
2343
0
            /* .tau      = */ tau,
2344
0
            /* .eta      = */ eta,
2345
0
            /* .m        = */ m,
2346
0
            /* .mu       = */ 2.0f*tau,
2347
0
            /* .rng      = */ std::mt19937(seed_cur),
2348
0
        }
2349
0
    );
2350
0
}
2351
2352
// mirostat v2
2353
2354
struct llama_sampler_mirostat_v2 {
2355
    const uint32_t seed;
2356
          uint32_t seed_cur;
2357
2358
    const float tau;
2359
    const float eta;
2360
2361
    float mu;
2362
2363
    std::mt19937 rng;
2364
};
2365
2366
0
static const char * llama_sampler_mirostat_v2_name(const struct llama_sampler * /*smpl*/) {
2367
0
    return "mirostat-v2";
2368
0
}
2369
2370
0
static void llama_sampler_mirostat_v2_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2371
0
    auto * ctx = (llama_sampler_mirostat_v2 *) smpl->ctx;
2372
2373
0
    llama_sampler_softmax_impl(cur_p, true);
2374
2375
    // Truncate the words with surprise values greater than mu
2376
0
    cur_p->size = std::distance(cur_p->data, std::find_if(cur_p->data, cur_p->data + cur_p->size, [&](const llama_token_data & candidate) {
2377
0
        return -log2f(candidate.p) > ctx->mu;
2378
0
    }));
2379
2380
0
    if (cur_p->size == 0) {
2381
0
        cur_p->size = 1;
2382
0
    }
2383
2384
    // Normalize the probabilities of the remaining words
2385
0
    llama_sampler_softmax_impl(cur_p, true);
2386
2387
0
    const int idx = llama_sample_dist(cur_p, ctx->rng);
2388
2389
0
    cur_p->selected = idx;
2390
2391
0
    float observed_surprise = -log2f(cur_p->data[idx].p);
2392
0
    float e = observed_surprise - ctx->tau;
2393
2394
    // Update mu using the learning rate and error
2395
0
    ctx->mu = ctx->mu - ctx->eta * e;
2396
0
}
2397
2398
0
static void llama_sampler_mirostat_v2_reset(struct llama_sampler * smpl) {
2399
0
    auto * ctx = (llama_sampler_mirostat_v2 *) smpl->ctx;
2400
0
    ctx->mu = 2.0f*ctx->tau;
2401
0
    ctx->seed_cur = get_rng_seed(ctx->seed);
2402
0
    ctx->rng.seed(ctx->seed_cur);
2403
0
}
2404
2405
0
static struct llama_sampler * llama_sampler_mirostat_v2_clone(const struct llama_sampler * smpl) {
2406
0
    const auto * ctx = (const llama_sampler_mirostat_v2 *) smpl->ctx;
2407
2408
0
    auto * result = llama_sampler_init_mirostat_v2(ctx->seed, ctx->tau, ctx->eta);
2409
2410
    // copy the state
2411
0
    {
2412
0
        auto * result_ctx = (llama_sampler_mirostat_v2 *) result->ctx;
2413
2414
0
        result_ctx->mu  = ctx->mu;
2415
0
        result_ctx->rng = ctx->rng;
2416
0
    }
2417
2418
0
    return result;
2419
0
}
2420
2421
0
static void llama_sampler_mirostat_v2_free(struct llama_sampler * smpl) {
2422
0
    delete (llama_sampler_mirostat_v2 *) smpl->ctx;
2423
0
}
2424
2425
static struct llama_sampler_i llama_sampler_mirostat_v2_i = {
2426
    /* .name              = */ llama_sampler_mirostat_v2_name,
2427
    /* .accept            = */ nullptr,
2428
    /* .apply             = */ llama_sampler_mirostat_v2_apply,
2429
    /* .reset             = */ llama_sampler_mirostat_v2_reset,
2430
    /* .clone             = */ llama_sampler_mirostat_v2_clone,
2431
    /* .free              = */ llama_sampler_mirostat_v2_free,
2432
    /* .backend_init      = */ nullptr,
2433
    /* .backend_accept    = */ nullptr,
2434
    /* .backend_apply     = */ nullptr,
2435
    /* .backend_set_input = */ nullptr,
2436
};
2437
2438
0
struct llama_sampler * llama_sampler_init_mirostat_v2(uint32_t seed, float tau, float eta) {
2439
0
    auto seed_cur = get_rng_seed(seed);
2440
0
    return llama_sampler_init(
2441
0
        /* .iface = */ &llama_sampler_mirostat_v2_i,
2442
0
        /* .ctx   = */ new llama_sampler_mirostat_v2 {
2443
0
            /* .seed     = */ seed,
2444
0
            /* .seed_cur = */ seed_cur,
2445
0
            /* .tau      = */ tau,
2446
0
            /* .eta      = */ eta,
2447
0
            /* .mu       = */ 2.0f*tau,
2448
0
            /* .rng      = */ std::mt19937(seed_cur),
2449
0
        }
2450
0
    );
2451
0
}
2452
2453
// grammar
2454
2455
struct llama_sampler_grammar {
2456
    const struct llama_vocab * vocab;
2457
2458
    std::string grammar_str;
2459
    std::string grammar_root;
2460
2461
    struct llama_grammar * grammar;
2462
};
2463
2464
0
static const char * llama_sampler_grammar_name(const struct llama_sampler * /*smpl*/) {
2465
0
    return "grammar";
2466
0
}
2467
2468
0
static void llama_sampler_grammar_accept_impl(struct llama_sampler * smpl, llama_token token) {
2469
0
    auto * ctx = (llama_sampler_grammar *) smpl->ctx;
2470
0
    if (ctx->grammar) {
2471
0
        llama_grammar_accept_impl(*ctx->grammar, token);
2472
0
    }
2473
0
}
2474
2475
0
static void llama_sampler_grammar_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2476
0
    auto * ctx = (llama_sampler_grammar *) smpl->ctx;
2477
0
    if (ctx->grammar) {
2478
0
        llama_grammar_apply_impl(*ctx->grammar, cur_p);
2479
0
    }
2480
0
}
2481
2482
// Fwd declare to break reset --> init_impl --> llama_sampler_grammar_i --> reset cycle.
2483
static struct llama_sampler * llama_sampler_init_grammar_impl(
2484
        const struct llama_vocab * vocab,
2485
                      const char * grammar_str,
2486
                      const char * grammar_root,
2487
                              bool lazy,
2488
                     const char ** trigger_words,
2489
                            size_t num_trigger_words,
2490
               const llama_token * trigger_tokens,
2491
                            size_t num_trigger_tokens,
2492
                     const char ** trigger_patterns,
2493
                            size_t num_trigger_patterns);
2494
2495
0
static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
2496
0
    auto * ctx = (llama_sampler_grammar *) smpl->ctx;
2497
0
    if (!ctx->grammar) {
2498
0
        return;
2499
0
    }
2500
2501
0
    std::vector<const char *>  trigger_patterns_c;
2502
0
    trigger_patterns_c.reserve(ctx->grammar->trigger_patterns.size());
2503
0
    for (auto & trigger_pattern : ctx->grammar->trigger_patterns) {
2504
0
        trigger_patterns_c.push_back(trigger_pattern.pattern.c_str());
2505
0
    }
2506
2507
0
    auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(),
2508
0
                                                 ctx->grammar->lazy, trigger_patterns_c.data(), trigger_patterns_c.size(),
2509
0
                                                 ctx->grammar->trigger_tokens.data(), ctx->grammar->trigger_tokens.size());
2510
2511
0
    llama_grammar_free_impl(ctx->grammar);
2512
0
    ctx->grammar = grammar_new;
2513
0
}
2514
2515
0
static struct llama_sampler * llama_sampler_grammar_clone(const struct llama_sampler * smpl) {
2516
0
    const auto * ctx = (const llama_sampler_grammar *) smpl->ctx;
2517
2518
0
    auto * result = llama_sampler_init_grammar_impl(ctx->vocab, nullptr, nullptr, false, nullptr, 0, nullptr, 0, nullptr, 0);
2519
0
    GGML_ASSERT(result);
2520
2521
    // copy the state
2522
0
    {
2523
0
        auto * result_ctx = (llama_sampler_grammar *) result->ctx;
2524
2525
0
        if (ctx->grammar) {
2526
0
            result_ctx->grammar_str  = ctx->grammar_str;
2527
0
            result_ctx->grammar_root = ctx->grammar_root;
2528
2529
0
            result_ctx->grammar = llama_grammar_clone_impl(*ctx->grammar);
2530
0
        }
2531
0
    }
2532
2533
0
    return result;
2534
0
}
2535
2536
0
static void llama_sampler_grammar_free(struct llama_sampler * smpl) {
2537
0
    const auto * ctx = (llama_sampler_grammar *) smpl->ctx;
2538
2539
0
    if (ctx->grammar) {
2540
0
        llama_grammar_free_impl(ctx->grammar);
2541
0
    }
2542
2543
0
    delete ctx;
2544
0
}
2545
2546
static struct llama_sampler_i llama_sampler_grammar_i = {
2547
    /* .name              = */ llama_sampler_grammar_name,
2548
    /* .accept            = */ llama_sampler_grammar_accept_impl,
2549
    /* .apply             = */ llama_sampler_grammar_apply,
2550
    /* .reset             = */ llama_sampler_grammar_reset,
2551
    /* .clone             = */ llama_sampler_grammar_clone,
2552
    /* .free              = */ llama_sampler_grammar_free,
2553
    /* .backend_init      = */ nullptr,
2554
    /* .backend_accept    = */ nullptr,
2555
    /* .backend_apply     = */ nullptr,
2556
    /* .backend_set_input = */ nullptr,
2557
};
2558
2559
static struct llama_sampler * llama_sampler_init_grammar_impl(
2560
        const struct llama_vocab * vocab,
2561
                      const char * grammar_str,
2562
                      const char * grammar_root,
2563
                              bool lazy,
2564
                     const char ** trigger_words,
2565
                            size_t num_trigger_words,
2566
               const llama_token * trigger_tokens,
2567
                            size_t num_trigger_tokens,
2568
                     const char ** trigger_patterns,
2569
0
                            size_t num_trigger_patterns) {
2570
0
    auto * ctx = new llama_sampler_grammar;
2571
2572
0
    if (grammar_str != nullptr && grammar_str[0] != '\0') {
2573
0
        std::string trigger_pattern;
2574
0
        llama_grammar * grammar = nullptr;
2575
        // TODO: remove trigger_words support.
2576
0
        if (trigger_words != nullptr && num_trigger_words > 0) {
2577
0
            GGML_ASSERT(trigger_patterns == nullptr && num_trigger_patterns == 0);
2578
0
            trigger_pattern = "[\\s\\S]*?(";
2579
0
            for (size_t i = 0; i < num_trigger_words; ++i) {
2580
0
                static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]");
2581
0
                if (i > 0) {
2582
0
                    trigger_pattern += "|";
2583
0
                }
2584
0
                trigger_pattern += std::regex_replace(trigger_words[i], special_chars, "\\$0");
2585
0
            }
2586
0
            trigger_pattern += ")[\\s\\S]*";
2587
2588
0
            std::array<const char *, 1> tmp_trigger_patterns = { trigger_pattern.c_str() };
2589
0
            grammar = llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, tmp_trigger_patterns.data(), tmp_trigger_patterns.size(), trigger_tokens, num_trigger_tokens);
2590
0
        } else {
2591
0
            grammar = llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, trigger_patterns, num_trigger_patterns, trigger_tokens, num_trigger_tokens);
2592
0
        }
2593
0
        *ctx = {
2594
0
            /* .vocab        = */ vocab,
2595
0
            /* .grammar_str  = */ grammar_str,
2596
0
            /* .grammar_root = */ grammar_root,
2597
0
            /* .grammar      = */ grammar,
2598
0
        };
2599
0
        if (!ctx->grammar) {
2600
0
            delete ctx;
2601
0
            return nullptr;
2602
0
        }
2603
0
    } else {
2604
0
        *ctx = {
2605
0
            /* .vocab        = */ vocab,
2606
0
            /* .grammar_str  = */ {},
2607
0
            /* .grammar_root = */ {},
2608
0
            /* .grammar      = */ nullptr,
2609
0
        };
2610
0
    }
2611
2612
0
    return llama_sampler_init(
2613
0
        /* .iface = */ &llama_sampler_grammar_i,
2614
0
        /* .ctx   = */ ctx
2615
0
    );
2616
0
}
2617
2618
struct llama_sampler * llama_sampler_init_grammar(
2619
        const struct llama_vocab * vocab,
2620
                      const char * grammar_str,
2621
0
                      const char * grammar_root) {
2622
0
    return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ false, nullptr, 0, nullptr, 0, nullptr, 0);
2623
0
}
2624
2625
struct llama_sampler * llama_sampler_init_grammar_lazy(
2626
        const struct llama_vocab * vocab,
2627
                      const char * grammar_str,
2628
                      const char * grammar_root,
2629
                     const char ** trigger_words,
2630
                            size_t num_trigger_words,
2631
               const llama_token * trigger_tokens,
2632
0
                            size_t num_trigger_tokens) {
2633
0
    return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ true, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens, nullptr, 0);
2634
0
}
2635
2636
struct llama_sampler * llama_sampler_init_grammar_lazy_patterns(
2637
        const struct llama_vocab * vocab,
2638
                      const char * grammar_str,
2639
                      const char * grammar_root,
2640
                     const char ** trigger_patterns,
2641
                            size_t num_trigger_patterns,
2642
               const llama_token * trigger_tokens,
2643
0
                            size_t num_trigger_tokens) {
2644
0
    return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ true, nullptr, 0, trigger_tokens, num_trigger_tokens, trigger_patterns, num_trigger_patterns);
2645
0
}
2646
2647
// penalties
2648
2649
struct llama_sampler_penalties {
2650
    const int32_t penalty_last_n;
2651
    const float   penalty_repeat;
2652
    const float   penalty_freq;
2653
    const float   penalty_present;
2654
2655
    ring_buffer<llama_token> prev;
2656
2657
    // a frequency map to count token occurrences
2658
    std::unordered_map<llama_token, int> token_count;
2659
};
2660
2661
0
static const char * llama_sampler_penalties_name(const struct llama_sampler * /*smpl*/) {
2662
0
    return "penalties";
2663
0
}
2664
2665
0
static void llama_sampler_penalties_accept(struct llama_sampler * smpl, llama_token token) {
2666
0
    auto * ctx = (llama_sampler_penalties *) smpl->ctx;
2667
0
    if (ctx->penalty_last_n == 0) {
2668
0
        return;
2669
0
    }
2670
2671
0
    ctx->token_count[token]++;
2672
2673
    // if the ring buffer is full, remove the oldest token
2674
0
    if (ctx->prev.size() >= (size_t) ctx->penalty_last_n) {
2675
0
        const auto old = ctx->prev.front();
2676
2677
0
        ctx->token_count[old]--;
2678
0
        if (ctx->token_count[old] == 0) {
2679
0
            ctx->token_count.erase(old);
2680
0
        }
2681
0
    }
2682
2683
0
    ctx->prev.push_back(token);
2684
2685
#if 0
2686
    // sanity check
2687
    std::unordered_map<llama_token, int> tmp;
2688
    for (int i = 0; i < std::min<int>(ctx->penalty_last_n, ctx->prev.size()); ++i) {
2689
        tmp[ctx->prev.rat(i)]++;
2690
    }
2691
2692
    assert(ctx->token_count == tmp);
2693
#endif
2694
0
}
2695
2696
0
static void llama_sampler_penalties_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2697
0
    auto * ctx = (llama_sampler_penalties *) smpl->ctx;
2698
2699
0
    if ((ctx->penalty_last_n == 0) ||
2700
0
        (ctx->penalty_repeat == 1.0f && ctx->penalty_freq == 0.0f && ctx->penalty_present == 0.0f)) {
2701
0
        return;
2702
0
    }
2703
2704
    // Apply frequency and presence penalties to the cur_p
2705
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2706
0
        const auto token_iter = ctx->token_count.find(cur_p->data[i].id);
2707
0
        if (token_iter == ctx->token_count.end()) {
2708
0
            continue;
2709
0
        }
2710
2711
0
        const int count = token_iter->second;
2712
2713
0
        assert(count > 0 && count <= ctx->penalty_last_n);
2714
2715
        // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
2716
        // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
2717
0
        if (cur_p->data[i].logit <= 0) {
2718
0
            cur_p->data[i].logit *= ctx->penalty_repeat;
2719
0
        } else {
2720
0
            cur_p->data[i].logit /= ctx->penalty_repeat;
2721
0
        }
2722
2723
0
        cur_p->data[i].logit -= float(count) * ctx->penalty_freq + float(count > 0) * ctx->penalty_present;
2724
0
    }
2725
2726
0
    cur_p->sorted = false;
2727
0
}
2728
2729
0
static void llama_sampler_penalties_reset(struct llama_sampler * smpl) {
2730
0
    auto * ctx = (llama_sampler_penalties *) smpl->ctx;
2731
0
    ctx->prev.clear();
2732
0
    ctx->token_count.clear();
2733
0
}
2734
2735
0
static struct llama_sampler * llama_sampler_penalties_clone(const struct llama_sampler * smpl) {
2736
0
    const auto * ctx = (const llama_sampler_penalties *) smpl->ctx;
2737
0
    auto * result = llama_sampler_init_penalties(
2738
0
            ctx->penalty_last_n,
2739
0
            ctx->penalty_repeat,
2740
0
            ctx->penalty_freq,
2741
0
            ctx->penalty_present);
2742
2743
    // copy the state
2744
0
    {
2745
0
        auto * result_ctx = (llama_sampler_penalties *) result->ctx;
2746
2747
0
        result_ctx->prev = ctx->prev;
2748
0
    }
2749
2750
0
    return result;
2751
0
}
2752
2753
0
static void llama_sampler_penalties_free(struct llama_sampler * smpl) {
2754
0
    delete (llama_sampler_penalties *) smpl->ctx;
2755
0
}
2756
2757
static struct llama_sampler_i llama_sampler_penalties_i = {
2758
    /* .name              = */ llama_sampler_penalties_name,
2759
    /* .accept            = */ llama_sampler_penalties_accept,
2760
    /* .apply             = */ llama_sampler_penalties_apply,
2761
    /* .reset             = */ llama_sampler_penalties_reset,
2762
    /* .clone             = */ llama_sampler_penalties_clone,
2763
    /* .free              = */ llama_sampler_penalties_free,
2764
    /* .backend_init      = */ nullptr,
2765
    /* .backend_accept    = */ nullptr,
2766
    /* .backend_apply     = */ nullptr,
2767
    /* .backend_set_input = */ nullptr,
2768
};
2769
2770
struct llama_sampler * llama_sampler_init_penalties(
2771
        int32_t penalty_last_n,
2772
        float penalty_repeat,
2773
        float penalty_freq,
2774
0
        float penalty_present) {
2775
0
    penalty_last_n = std::max(penalty_last_n, 0);
2776
2777
0
    const bool is_empty = (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f));
2778
2779
0
    if (is_empty) {
2780
0
        return llama_sampler_init_empty("?penalties");
2781
0
    }
2782
2783
0
    return llama_sampler_init(
2784
0
        /* .iface = */ &llama_sampler_penalties_i,
2785
0
        /* .ctx   = */ new llama_sampler_penalties {
2786
0
            /* .penalty_last_n  = */ penalty_last_n,
2787
0
            /* .penalty_repeat  = */ penalty_repeat,
2788
0
            /* .penalty_freq    = */ penalty_freq,
2789
0
            /* .penalty_present = */ penalty_present,
2790
0
            /* .prev            = */ ring_buffer<llama_token>(penalty_last_n),
2791
0
            /* .token_count     = */ {},
2792
0
        }
2793
0
    );
2794
0
}
2795
2796
// top-n-sigma
2797
2798
struct llama_sampler_top_n_sigma {
2799
    const float n;
2800
};
2801
2802
0
static const char * llama_sampler_top_n_sigma_name(const struct llama_sampler * /*smpl*/) {
2803
0
    return "top-n-sigma";
2804
0
}
2805
2806
0
static void llama_sampler_top_n_sigma_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2807
0
    auto * ctx = (llama_sampler_top_n_sigma *) smpl->ctx;
2808
2809
0
    if (ctx->n <= 0.0f || cur_p->size <= 1) {
2810
0
        return;
2811
0
    }
2812
2813
    // find max logit and calculate mean
2814
0
    float max = cur_p->data[0].logit;
2815
0
    float logits_sum = 0;
2816
0
    size_t valid_count = 0;
2817
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2818
        // Only count non-negative infinity values
2819
0
        if (cur_p->data[i].logit != -INFINITY) {
2820
0
            max = std::max(max, cur_p->data[i].logit);
2821
0
            logits_sum += cur_p->data[i].logit;
2822
0
            valid_count++;
2823
0
        }
2824
0
    }
2825
0
    float mean = valid_count > 0 ? logits_sum/valid_count : 0;
2826
2827
    // calculate standard deviation
2828
0
    float acc = 0;
2829
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2830
        // Skip -infinity in std calculation
2831
0
        if (cur_p->data[i].logit != -INFINITY) {
2832
0
            acc += pow(cur_p->data[i].logit - mean, 2);
2833
0
        }
2834
0
    }
2835
0
    float std = valid_count > 0 ? sqrt(acc/valid_count) : 0;
2836
2837
    // apply mask
2838
0
    for (size_t i = 0; i < cur_p->size; ++i) {
2839
0
        if (cur_p->data[i].logit < max - (ctx->n * std)) {
2840
0
            cur_p->data[i].logit = -INFINITY;
2841
0
        }
2842
0
    }
2843
2844
0
    llama_sampler_softmax_impl(cur_p, true);
2845
0
}
2846
2847
0
static struct llama_sampler * llama_sampler_top_n_sigma_clone(const struct llama_sampler * smpl) {
2848
0
    const auto * ctx = (const llama_sampler_top_n_sigma *) smpl->ctx;
2849
0
    return llama_sampler_init_top_n_sigma(ctx->n);
2850
0
}
2851
2852
0
static void llama_sampler_top_n_sigma_free(struct llama_sampler * smpl) {
2853
0
    delete (llama_sampler_top_n_sigma *) smpl->ctx;
2854
0
}
2855
2856
static struct llama_sampler_i llama_sampler_top_n_sigma_i = {
2857
    /* .name              = */ llama_sampler_top_n_sigma_name,
2858
    /* .accept            = */ nullptr,
2859
    /* .apply             = */ llama_sampler_top_n_sigma_apply,
2860
    /* .reset             = */ nullptr,
2861
    /* .clone             = */ llama_sampler_top_n_sigma_clone,
2862
    /* .free              = */ llama_sampler_top_n_sigma_free,
2863
    /* .backend_init      = */ nullptr,
2864
    /* .backend_accept    = */ nullptr,
2865
    /* .backend_apply     = */ nullptr,
2866
    /* .backend_set_input = */ nullptr,
2867
};
2868
2869
0
struct llama_sampler * llama_sampler_init_top_n_sigma(float n) {
2870
0
    const bool is_empty = (n <= 0.0f);
2871
2872
0
    if (is_empty) {
2873
0
        return llama_sampler_init_empty("?top-n-sigma");
2874
0
    }
2875
2876
0
    return llama_sampler_init(
2877
0
        /* .iface = */ &llama_sampler_top_n_sigma_i,
2878
0
        /* .ctx   = */ new llama_sampler_top_n_sigma {
2879
0
            /* .n = */ n,
2880
0
        }
2881
0
    );
2882
0
}
2883
2884
// DRY
2885
2886
struct llama_sampler_dry {
2887
    int32_t total_context_size;
2888
2889
    const float   dry_multiplier;
2890
    const float   dry_base;
2891
    const int32_t dry_allowed_length;
2892
    const int32_t dry_penalty_last_n;
2893
2894
    std::unordered_multimap<llama_token, std::vector<llama_token>> dry_processed_breakers;
2895
    std::vector<int> dry_repeat_count;
2896
    std::unordered_map<llama_token, int> dry_max_token_repeat;
2897
    ring_buffer<llama_token> last_tokens;
2898
};
2899
2900
// Ported from Koboldcpp, original PR: https://github.com/LostRuins/koboldcpp/pull/982 (Original author: pi6am)
2901
0
static void get_overlapping_token_sequences(const llama_vocab & vocab, const std::string& str, std::unordered_multimap<llama_token, std::vector<llama_token>>& token_sequences, int max_tail_len = -1) {
2902
0
    for (llama_token token_id = 0; token_id < (llama_token) vocab.n_tokens(); token_id++) {
2903
0
        std::string word = vocab.detokenize({token_id}, true);
2904
0
        if (word.find(str) != std::string::npos) {
2905
0
            token_sequences.emplace(token_id, std::vector<llama_token>());
2906
0
        } else {
2907
0
            size_t word_len = word.size();
2908
0
            size_t str_len = str.size();
2909
0
            size_t pos = -1;
2910
0
            while ((pos = word.find(str[0], pos + 1)) != std::string::npos) {
2911
0
                bool match = true;
2912
0
                size_t i;
2913
0
                for (i = 1; i < str_len && i + pos < word_len; ++i) {
2914
0
                    if (word[pos + i] != str[i]) {
2915
0
                        match = false;
2916
0
                        break;
2917
0
                    }
2918
0
                }
2919
0
                if (match) {
2920
0
                    std::vector<llama_token> tokenization = vocab.tokenize(str.substr(i), false, false);
2921
0
                    if (max_tail_len >= 0 && tokenization.size() > (size_t)max_tail_len) {
2922
0
                        tokenization.resize(max_tail_len);
2923
0
                    }
2924
2925
                    // Ensure we don't already have a duplicate matching tokenization
2926
0
                    auto its = token_sequences.equal_range(token_id);
2927
0
                    bool found = false;
2928
0
                    for (auto it = its.first; it != its.second; ++it) {
2929
0
                        if (tokenization == it->second) {
2930
0
                            found = true;
2931
0
                            break;
2932
0
                        }
2933
0
                    }
2934
0
                    if (!found) {
2935
0
                        token_sequences.emplace(token_id, tokenization);
2936
0
                    }
2937
0
                }
2938
0
            }
2939
0
        }
2940
0
    }
2941
0
}
2942
2943
0
static const char * llama_sampler_dry_name(const struct llama_sampler * /*smpl*/) {
2944
0
    return "dry";
2945
0
}
2946
2947
0
static void llama_sampler_dry_accept(struct llama_sampler * smpl, llama_token token) {
2948
0
    auto * ctx = (llama_sampler_dry *) smpl->ctx;
2949
0
    if (ctx->dry_multiplier == 0.0f || ctx->dry_base < 1.0f || ctx->dry_penalty_last_n == 0) {
2950
0
        return;
2951
0
    }
2952
2953
0
    ctx->last_tokens.push_back(token);
2954
0
}
2955
2956
// Ported from Koboldcpp, original PR: https://github.com/LostRuins/koboldcpp/pull/982 (Original author: pi6am)
2957
0
static void llama_sampler_dry_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
2958
0
    auto * ctx = (llama_sampler_dry *) smpl->ctx;
2959
2960
0
    if (ctx->dry_multiplier == 0.0f || ctx->dry_base < 1.0f || ctx->dry_penalty_last_n == 0) {
2961
0
        return;
2962
0
    }
2963
2964
0
    int32_t effective_dry_penalty_last_n = (ctx->dry_penalty_last_n == -1) ? ctx->total_context_size : std::max(ctx->dry_penalty_last_n, 0);
2965
0
    int last_n_repeat = std::min(std::min((int)ctx->last_tokens.size(), effective_dry_penalty_last_n), ctx->total_context_size);
2966
2967
0
    if (last_n_repeat <= ctx->dry_allowed_length) {
2968
0
        return;
2969
0
    }
2970
2971
0
    ctx->dry_repeat_count.assign(last_n_repeat, 0);
2972
0
    ctx->dry_max_token_repeat.clear();
2973
2974
    // Step 1: Look for restart sequences to limit the maximum repetition length.
2975
    // Work backwards through the context looking for any token that begins a restart sequence.
2976
    //
2977
    // The collection `restart_sequences` is a mapping from a "head" token to all "tail"
2978
    // sequences that together comprise a restart sequence. This allows us to quickly check
2979
    // whether each token is the head of a complete sequence. Most restart sequences are actually
2980
    // a single token, and for these the "tail" is an empty vector.
2981
    //
2982
    // If the token is a "head", test all restart sequences that begin with this token
2983
    // (there will often only be one sequence for each token, but if sequences like 'aaaq1' and
2984
    // 'aaa1' are used as restart strings, both could start with 'aaa' when tokenized). The
2985
    // longest matching sequence (if any) is used to limit the maximum repetition length.
2986
    //
2987
    // Note that in the case case of a short sequence contained in a longer one, this might fail to
2988
    // find the smallest value for `rep_limit`. For example, if 'amniotic' and 'ni' are both used as
2989
    // restart sequences, 'ni' will be found first, and since it's shorter it will fail to suppress
2990
    // 'otic'. This is a minor issue since fully contained restart sequences are likely to be rare.
2991
    //
2992
    // This is theoretically worst-case O(N^2) for arbitrary restart sequences, which is why we
2993
    // have already clamped the maximum tail sequence length when generating `restart_sequences`.
2994
    // With clamping, this scan is O(N) in the context length.
2995
2996
0
    int rep_limit = last_n_repeat;
2997
0
    for (int i = 0; i < last_n_repeat; ++i) {
2998
0
        llama_token token = ctx->last_tokens.rat(i);
2999
0
        auto its = ctx->dry_processed_breakers.equal_range(token);
3000
0
        if (its.first == ctx->dry_processed_breakers.end()) {
3001
0
            continue;
3002
0
        }
3003
0
        int longest_match = -1;
3004
0
        for (auto it = its.first; it != its.second; ++it) {
3005
            // Note that (*it) does not contain the head character, so seq_len will be
3006
            // the restart sequence length minus 1.
3007
            // In the common case of a single-token restart sequence, (*it) will be empty
3008
            // and we will trivially match.
3009
0
            int seq_len = (int)it->second.size();
3010
0
            if (seq_len > longest_match && seq_len <= (int)i) {
3011
0
                bool match = true;
3012
0
                for (int offset = 0; offset < seq_len; ++offset) {
3013
                    // The -1 when indexing `last_tokens` is because we already matched the head.
3014
0
                    if (it->second[offset] != ctx->last_tokens.rat(i - offset - 1)) {
3015
0
                        match = false;
3016
0
                        break;
3017
0
                    }
3018
0
                }
3019
0
                if (match) {
3020
0
                    longest_match = seq_len;
3021
0
                }
3022
0
            }
3023
0
        }
3024
0
        if (longest_match >= 0) {
3025
            // We found a restart sequence starting `i` tokens from the end and continuing for
3026
            // `longest_match` tokens.
3027
0
            rep_limit = i - longest_match;
3028
0
            break;
3029
0
        }
3030
0
    }
3031
0
    if (rep_limit < ctx->dry_allowed_length) {
3032
0
        return;
3033
0
    }
3034
3035
    // Step 2: Iterate in reverse over the last N tokens of the context, using the "Z-algorithm" (in
3036
    // the reverse direction) to efficiently compute the positions and lengths of suffixes appearing
3037
    // elsewhere in the context. We limit the suffix length to `rep_limit` to respect restart sequences.
3038
    //
3039
    // This algorithm is not currently documented on Wikipedia, but there is a clear description here:
3040
    // https://ivanyu.me/blog/2014/10/15/z-algorithm/
3041
    //
3042
    // The code below is adapted from the public domain implementation by the same author here:
3043
    // https://github.com/ivanyu/string-algorithms/blob/master/z_algorithm.py
3044
    //
3045
    // Example:
3046
    // Last N tokens: a b c c b c y a b c
3047
    // Repeat counts: 0 0 3 1 0 2 0 0 0 0
3048
    //                    ^
3049
    //   This `3` means that the last three tokens of the context (a b c) also appear here.
3050
    //
3051
    // This step is worst case O(N) since the Z-algorithm is linear, despite the appearance of nested
3052
    // for/while loops. This can be seen by observing that the `lt` and `rt` bounds are set after each
3053
    // repeated suffix is detected (i.e. after each while loop when n > 0). These bound variables
3054
    // ensure that the inner while loops only examine each token in the context once as the outer
3055
    // for loop iterates over the context.
3056
3057
0
    {
3058
0
        const int last = last_n_repeat - 1;
3059
3060
0
        int rt = 0;
3061
0
        int lt = 0;
3062
3063
0
        for (int k = 1; k < last_n_repeat; ++k) {
3064
0
            if (k > rt) {
3065
                // If k is outside the current Z-box, do naive computation.
3066
0
                int n = 0;
3067
0
                while (n + k < last_n_repeat && ctx->last_tokens.rat(n) == ctx->last_tokens.rat(n+k)) {
3068
0
                    ++n;
3069
0
                }
3070
0
                ctx->dry_repeat_count[last - k] = std::min(n, rep_limit);
3071
0
                if (n > 0) {
3072
0
                    lt = k;
3073
0
                    rt = k + n - 1;
3074
0
                }
3075
0
            } else {
3076
                // If k is inside the current Z-box, consider two cases.
3077
3078
0
                int p = k - lt; // Pair index.
3079
0
                int right_part_len = rt - k + 1;
3080
3081
0
                if (ctx->dry_repeat_count[last - p] < right_part_len) {
3082
0
                    int n = std::min(ctx->dry_repeat_count[last - p], rep_limit);
3083
0
                    ctx->dry_repeat_count[last - k] = n;
3084
0
                } else {
3085
0
                    int i = rt + 1;
3086
0
                    while (i < last_n_repeat && ctx->last_tokens.rat(i) == ctx->last_tokens.rat(i - k)) {
3087
0
                        i += 1;
3088
0
                    }
3089
3090
0
                    int n = std::min(i - k, rep_limit);
3091
0
                    ctx->dry_repeat_count[last - k] = n;
3092
0
                    lt = k;
3093
0
                    rt = i - 1;
3094
0
                }
3095
0
            }
3096
0
        }
3097
0
    }
3098
3099
    // Step 3: Iterate over dry_repeat_count and last_tokens, examining the maximum repeat length
3100
    // that would be generated by emitting each new token that would extend a sequence.
3101
    //
3102
    // Following the same example as above:
3103
    // Last N tokens: a b c c b c y a b c
3104
    // Repeat counts: 0 0 3 1 0 2 0 0 0 0
3105
    //
3106
    // For each non-zero, look ahead one token. This token, if emitted, would extend the repetition.
3107
    // c: 3 -> 4 (from `a b c` to `a b c c`)
3108
    // b: 1 -> 2 (from `c` to `c b`)
3109
    // y: 2 -> 3 (from `b c` to `b c y`)
3110
3111
0
    for (int i = 0; i < last_n_repeat - 1; ++i) {
3112
0
        int repeat_len = ctx->dry_repeat_count[i];
3113
0
        if (repeat_len >= ctx->dry_allowed_length) {
3114
            // This token ends a repeat, so the next token would continue one.
3115
            // By convention, the value of `repeat_len` only includes the tokens currently
3116
            // in the context, not the new token that would be added.
3117
0
            llama_token token = ctx->last_tokens.rat(last_n_repeat - 2 - i);
3118
            // Track the maximum sequence ending in this token.
3119
0
            const auto& it = ctx->dry_max_token_repeat.find(token);
3120
0
            if (it == ctx->dry_max_token_repeat.end() || it->second < repeat_len) {
3121
0
                ctx->dry_max_token_repeat[token] = repeat_len;
3122
0
            }
3123
0
        }
3124
0
    }
3125
3126
    // Step 4: Apply logit penalties based on the maximum repeat length for relevant tokens.
3127
3128
    // Prevent floating point overflow in `pow(penalty_base, exponent)` by clamping to `max_exponent`.
3129
    // Compute it from `penalty_base` and the approximate log of `std::numeric_limits<float>::max()`
3130
0
    const float FLOAT_MAX_LOG = 88.7228391f;
3131
0
    int max_exponent = 0;
3132
0
    if (ctx->dry_base > 1.000001f) {
3133
0
        max_exponent = FLOAT_MAX_LOG / std::log(ctx->dry_base);
3134
0
    }
3135
3136
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3137
0
        const auto& af_kvp = ctx->dry_max_token_repeat.find(cur_p->data[i].id);
3138
0
        if (af_kvp != ctx->dry_max_token_repeat.end()) {
3139
            // Check all sequence breakers starting with this token
3140
0
            auto range = ctx->dry_processed_breakers.equal_range(cur_p->data[i].id);
3141
0
            bool is_single_token_breaker = false;
3142
3143
0
            for (auto it = range.first; it != range.second; ++it) {
3144
0
                if (it->second.empty()) {
3145
0
                    is_single_token_breaker = true;
3146
0
                    break;
3147
0
                }
3148
0
            }
3149
3150
            // Apply penalty only if it's not a single-token sequence breaker
3151
0
            if (!is_single_token_breaker) {
3152
0
                int repeat_exp = af_kvp->second - ctx->dry_allowed_length;
3153
0
                if (max_exponent > 0 && repeat_exp > max_exponent) {
3154
0
                    repeat_exp = max_exponent;
3155
0
                }
3156
0
                float penalty = ctx->dry_multiplier * std::pow(ctx->dry_base, repeat_exp);
3157
0
                cur_p->data[i].logit -= penalty;
3158
0
            }
3159
0
        }
3160
0
    }
3161
3162
0
    cur_p->sorted = false;
3163
0
}
3164
3165
0
static void llama_sampler_dry_reset(struct llama_sampler * smpl) {
3166
0
    auto * ctx = (llama_sampler_dry *) smpl->ctx;
3167
0
    ctx->last_tokens.clear();
3168
0
    ctx->dry_repeat_count.clear();
3169
0
    ctx->dry_max_token_repeat.clear();
3170
0
}
3171
3172
0
static struct llama_sampler * llama_sampler_dry_clone(const struct llama_sampler * smpl) {
3173
0
    const auto * ctx = (llama_sampler_dry *) smpl->ctx;
3174
3175
0
    llama_vocab dummy_vocab;
3176
3177
    // dummy vocab is passed because it is only needed for raw sequence breaker processing, which we have already done and will simply be copying
3178
0
    auto * result = llama_sampler_init_dry(&dummy_vocab, ctx->total_context_size, ctx->dry_multiplier, ctx->dry_base, ctx->dry_allowed_length, ctx->dry_penalty_last_n, NULL, 0);
3179
3180
    // Copy the state, including the processed breakers
3181
0
    {
3182
0
        auto * result_ctx = (llama_sampler_dry *) result->ctx;
3183
0
        result_ctx->dry_processed_breakers = ctx->dry_processed_breakers;
3184
0
        result_ctx->dry_repeat_count = ctx->dry_repeat_count;
3185
0
        result_ctx->dry_max_token_repeat = ctx->dry_max_token_repeat;
3186
0
        result_ctx->last_tokens = ctx->last_tokens;
3187
0
    }
3188
3189
0
    return result;
3190
0
}
3191
3192
0
static void llama_sampler_dry_free(struct llama_sampler * smpl) {
3193
0
    delete (llama_sampler_dry *) smpl->ctx;
3194
0
}
3195
3196
static struct llama_sampler_i llama_sampler_dry_i = {
3197
    /* .name              = */ llama_sampler_dry_name,
3198
    /* .accept            = */ llama_sampler_dry_accept,
3199
    /* .apply             = */ llama_sampler_dry_apply,
3200
    /* .reset             = */ llama_sampler_dry_reset,
3201
    /* .clone             = */ llama_sampler_dry_clone,
3202
    /* .free              = */ llama_sampler_dry_free,
3203
    /* .backend_init      = */ nullptr,
3204
    /* .backend_accept    = */ nullptr,
3205
    /* .backend_apply     = */ nullptr,
3206
    /* .backend_set_input = */ nullptr,
3207
};
3208
3209
0
struct llama_sampler * llama_sampler_init_dry(const struct llama_vocab * vocab, int32_t n_ctx_train, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const char** seq_breakers, size_t num_breakers) {
3210
0
    int32_t effective_dry_penalty_last_n = (dry_penalty_last_n == -1) ? n_ctx_train : std::max(dry_penalty_last_n, 0);
3211
0
    std::unordered_multimap<llama_token, std::vector<llama_token>> processed_breakers;
3212
0
    const int MAX_CHAR_LEN = 40;
3213
0
    const int MAX_SEQ_LEN = 20;
3214
3215
0
    const bool dry_enabled = (dry_multiplier != 0.0f && dry_base >= 1.0f && dry_penalty_last_n != 0);
3216
3217
0
    if (!dry_enabled) {
3218
0
        return llama_sampler_init_empty("?dry");
3219
0
    }
3220
3221
0
    if (dry_enabled && seq_breakers != nullptr && num_breakers > 0) {
3222
        // Process sequence breakers
3223
0
        for (size_t i = 0; i < num_breakers; ++i) {
3224
0
            if (seq_breakers[i] == nullptr || std::strlen(seq_breakers[i]) == 0) {
3225
0
                LLAMA_LOG_WARN("skipping null or empty DRY sequence breaker at index %zu\n", i);
3226
0
                continue;
3227
0
            }
3228
3229
0
            std::string sequence_break(seq_breakers[i]);
3230
0
            if (sequence_break.empty()) {
3231
0
                LLAMA_LOG_WARN("skipping empty DRY sequence breaker\n");
3232
0
                continue;
3233
0
            }
3234
3235
0
            if (sequence_break.size() > MAX_CHAR_LEN) {
3236
0
                LLAMA_LOG_WARN("truncating DRY sequence breaker to %d characters\n", MAX_CHAR_LEN);
3237
0
                sequence_break.resize(MAX_CHAR_LEN);
3238
0
            }
3239
3240
0
            get_overlapping_token_sequences(*vocab, sequence_break, processed_breakers, MAX_SEQ_LEN);
3241
0
        }
3242
0
    }
3243
3244
0
    return llama_sampler_init(
3245
0
        /* .iface = */ &llama_sampler_dry_i,
3246
0
        /* .ctx   = */ new llama_sampler_dry {
3247
0
            /* .total_context_size     = */ n_ctx_train,
3248
0
            /* .dry_multiplier         = */ dry_multiplier,
3249
0
            /* .dry_base               = */ dry_base,
3250
0
            /* .dry_allowed_length     = */ dry_allowed_length,
3251
0
            /* .dry_penalty_last_n     = */ dry_penalty_last_n,
3252
0
            /* .dry_processed_breakers = */ std::move(processed_breakers),
3253
0
            /* .dry_repeat_count       = */ dry_enabled ? std::vector<int>(effective_dry_penalty_last_n, 0) : std::vector<int>{},
3254
0
            /* .dry_max_token_repeat   = */ {},
3255
0
            /* .last_tokens            = */ dry_enabled ? ring_buffer<llama_token>(effective_dry_penalty_last_n) : ring_buffer<llama_token>(0),
3256
0
        }
3257
0
    );
3258
0
}
3259
3260
// wrapper for test-sampling.cpp
3261
0
struct llama_sampler * llama_sampler_init_dry_testing(int32_t context_size, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const std::vector<std::vector<llama_token>>& seq_breakers) {
3262
0
    llama_vocab dummy_vocab;
3263
0
    auto * result = llama_sampler_init_dry(&dummy_vocab, context_size, dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n, NULL, 0);
3264
0
    auto * ctx = (llama_sampler_dry *) result->ctx;
3265
3266
    // Process the token-based sequence breakers
3267
0
    ctx->dry_processed_breakers.clear();
3268
0
    if (seq_breakers.empty()) {
3269
0
        LLAMA_LOG_WARN("empty DRY sequence breakers list in llama_sampler_init_dry_testing\n");
3270
0
    } else {
3271
0
        for (const auto& breaker : seq_breakers) {
3272
0
            if (breaker.empty()) {
3273
0
                LLAMA_LOG_WARN("skipping DRY empty sequence breaker\n");
3274
0
                continue;
3275
0
            }
3276
0
            llama_token head_token = breaker[0];
3277
0
            std::vector<llama_token> tail_tokens(breaker.begin() + 1, breaker.end());
3278
0
            ctx->dry_processed_breakers.emplace(head_token, std::move(tail_tokens));
3279
0
        }
3280
3281
0
        if (ctx->dry_processed_breakers.empty()) {
3282
0
            LLAMA_LOG_WARN("no valid DRY sequence breakers processed in llama_sampler_init_dry_testing\n");
3283
0
        }
3284
0
    }
3285
3286
0
    return result;
3287
0
}
3288
3289
// adaptive-p sampler state
3290
//
3291
// maintains an exponential moving average of the *ORIGINAL* probabilities
3292
// of selected tokens, used to compute an adapted target at each sampling step.
3293
//
3294
// see llama.h for a full description of the sampler
3295
//
3296
// ref: https://github.com/ggml-org/llama.cpp/pull/17927
3297
//
3298
struct llama_sampler_adaptive_p {
3299
    const float        target;            // target probability (0.0 - 1.0; negative = disabled)
3300
    const float        decay;             // EMA decay; history ~= 1/(1-decay) tokens (0.0 - 0.99)
3301
    const uint32_t     seed;              // original RNG seed
3302
    uint32_t           seed_cur;          // actual RNG seed
3303
    std::mt19937       rng;               // RNG state
3304
    float              weighted_sum;      // sum(p_i * decay^i)
3305
    float              total_weight;      // sum(decay^i), converges to 1/(1-decay)
3306
    std::vector<float> original_probs;    // pre-transform probs, cached for EMA update
3307
    llama_token        pending_token_id;  // token ID of selected token
3308
    int32_t            pending_token_idx; // index of orig. prob. of selected token in original_probs
3309
};
3310
3311
// adaptive probability transformation constants
3312
static constexpr float DISTRIBUTION_WIDTH =  0.3f;
3313
static constexpr float PEAK_LOGIT_VALUE   =  5.0f;
3314
static constexpr float SHARPNESS          = 10.0f;
3315
static constexpr float INV_WIDTH          =  1.0f / DISTRIBUTION_WIDTH;
3316
3317
0
static const char * llama_sampler_adaptive_p_name(const struct llama_sampler * /*smpl*/) {
3318
0
    return "adaptive-p";
3319
0
}
3320
3321
0
static void llama_sampler_adaptive_p_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
3322
0
    auto * ctx = (llama_sampler_adaptive_p *) smpl->ctx;
3323
3324
0
    llama_sampler_softmax_impl(cur_p, false);
3325
3326
0
    if (ctx->target < 0.0f) {
3327
        // at negative target values, adaptive-p is no-op
3328
        // we simply sample from the existing distribution
3329
0
        cur_p->selected = llama_sample_dist(cur_p, ctx->rng);
3330
0
        return;
3331
0
    }
3332
3333
    // store the original probabilities
3334
0
    ctx->original_probs.resize(cur_p->size);
3335
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3336
0
        ctx->original_probs[i] = cur_p->data[i].p;
3337
0
    }
3338
3339
    // using the EMA, compute the adapted target probability for the current sampling step
3340
0
    auto target = std::clamp(ctx->target, 0.0f, 1.0f);
3341
0
    float adapted_target = std::clamp(
3342
0
        ctx->total_weight == 0.0f ? target : 2.0f * target - (ctx->weighted_sum / ctx->total_weight),
3343
0
        0.0f, 1.0f
3344
0
    );
3345
3346
    // adaptive probability transform
3347
    //
3348
    // quadratic near target for fine differentiation, transitioning to linear decay in the
3349
    // tails. unbounded negative logits ensure proper suppression of far-from-target tokens
3350
    // after the softmax.
3351
    //
3352
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3353
0
        if (cur_p->data[i].logit == -INFINITY) {
3354
            // don't transform logits that are -INFINITY
3355
            // (as masked out by e.g. min-p and top-p when using backend sampling)
3356
0
            continue;
3357
0
        }
3358
0
        float dist = std::abs((cur_p->data[i].p - adapted_target) * INV_WIDTH);
3359
0
        cur_p->data[i].logit = PEAK_LOGIT_VALUE - SHARPNESS * dist * dist / (1.0f + dist);
3360
0
    }
3361
3362
    // softmax and sample from the transformed distribution
3363
0
    llama_sampler_softmax_impl(cur_p, false);
3364
0
    const int idx   = llama_sample_dist(cur_p, ctx->rng);
3365
0
    cur_p->selected = idx;
3366
3367
    // store the selected token ID for acceptance later
3368
0
    ctx->pending_token_id  = cur_p->data[idx].id;
3369
0
    ctx->pending_token_idx = idx;
3370
0
}
3371
3372
0
static void llama_sampler_adaptive_p_accept(struct llama_sampler * smpl, llama_token token) {
3373
0
    auto * ctx = (llama_sampler_adaptive_p *) smpl->ctx;
3374
0
    if (ctx->pending_token_id == token) {
3375
0
        GGML_ASSERT(ctx->pending_token_id != LLAMA_TOKEN_NULL);
3376
0
        GGML_ASSERT(ctx->pending_token_idx != -1);
3377
        // update EMA with the original probability of the selected token
3378
0
        ctx->weighted_sum = ctx->original_probs[ctx->pending_token_idx] + ctx->decay * ctx->weighted_sum;
3379
0
        ctx->total_weight = 1.0f + ctx->decay * ctx->total_weight;
3380
0
    }
3381
0
    ctx->pending_token_id = LLAMA_TOKEN_NULL;
3382
0
    ctx->pending_token_idx = -1;
3383
0
}
3384
3385
0
static void llama_sampler_adaptive_p_reset(struct llama_sampler * smpl) {
3386
0
    auto * ctx = (llama_sampler_adaptive_p *) smpl->ctx;
3387
    // ctx->target and ctx->decay never change after init, so it's safe to keep them as is.
3388
    // original_probs is completely overwritten on every call to _apply.
3389
    // so we only need to reset the EMA state and pending token.
3390
0
    ctx->weighted_sum      = ctx->target / (1.0f - ctx->decay);
3391
0
    ctx->total_weight      = 1.0f / (1.0f - ctx->decay);
3392
0
    ctx->pending_token_id  = LLAMA_TOKEN_NULL;
3393
0
    ctx->pending_token_idx = -1;
3394
0
    ctx->seed_cur          = get_rng_seed(ctx->seed);
3395
0
    ctx->rng.seed(ctx->seed_cur);
3396
0
}
3397
3398
0
static struct llama_sampler * llama_sampler_adaptive_p_clone(const struct llama_sampler * smpl) {
3399
0
    const auto * ctx  = (const llama_sampler_adaptive_p *) smpl->ctx;
3400
0
    auto * result     = llama_sampler_init_adaptive_p(ctx->target, ctx->decay, ctx->seed);
3401
0
    auto * result_ctx = (llama_sampler_adaptive_p *) result->ctx;
3402
3403
    // copy everything (target, decay, seed, and RNG are already set)
3404
0
    result_ctx->weighted_sum      = ctx->weighted_sum;
3405
0
    result_ctx->total_weight      = ctx->total_weight;
3406
0
    result_ctx->pending_token_id  = ctx->pending_token_id;
3407
0
    result_ctx->pending_token_idx = ctx->pending_token_idx;
3408
3409
0
    return result;
3410
0
}
3411
3412
0
static void llama_sampler_adaptive_p_free(struct llama_sampler * smpl) {
3413
0
    delete (llama_sampler_adaptive_p *) smpl->ctx;
3414
0
}
3415
3416
static struct llama_sampler_i llama_sampler_adaptive_p_i = {
3417
    /* .name              = */ llama_sampler_adaptive_p_name,
3418
    /* .accept            = */ llama_sampler_adaptive_p_accept,
3419
    /* .apply             = */ llama_sampler_adaptive_p_apply,
3420
    /* .reset             = */ llama_sampler_adaptive_p_reset,
3421
    /* .clone             = */ llama_sampler_adaptive_p_clone,
3422
    /* .free              = */ llama_sampler_adaptive_p_free,
3423
    /* .backend_init      = */ nullptr,
3424
    /* .backend_accept    = */ nullptr,
3425
    /* .backend_apply     = */ nullptr,
3426
    /* .backend_set_input = */ nullptr,
3427
};
3428
3429
struct llama_sampler * llama_sampler_init_adaptive_p(
3430
    float    target,
3431
    float    decay,
3432
    uint32_t seed
3433
0
) {
3434
0
    auto seed_cur = get_rng_seed(seed);
3435
0
    float clamped_decay = std::clamp(decay, 0.0f, 0.99f);
3436
0
    return llama_sampler_init(
3437
0
        /* .iface = */ &llama_sampler_adaptive_p_i,
3438
0
        /* .ctx   = */ new llama_sampler_adaptive_p {
3439
0
            /* .target            = */ target,
3440
0
            /* .decay             = */ clamped_decay,
3441
0
            /* .seed              = */ seed,
3442
0
            /* .seed_cur          = */ seed_cur,
3443
0
            /* .rng               = */ std::mt19937(seed_cur),
3444
0
            /* .weighted_sum      = */ target / (1.0f - clamped_decay),
3445
0
            /* .total_weight      = */ 1.0f / (1.0f - clamped_decay),
3446
0
            /* .original_probs    = */ {},
3447
0
            /* .pending_token_id  = */ LLAMA_TOKEN_NULL,
3448
0
            /* .pending_token_idx = */ -1
3449
0
        }
3450
0
    );
3451
0
}
3452
3453
// logit-bias
3454
3455
struct llama_sampler_logit_bias : public llama_sampler_backend {
3456
    const int32_t n_vocab;
3457
3458
    const std::vector<llama_logit_bias> logit_bias;
3459
3460
    std::vector<llama_logit_bias> to_search;
3461
3462
    struct ggml_tensor * inp_logit_bias;
3463
    struct ggml_tensor * inp_logit_idxs;
3464
3465
    ggml_context_ptr        inp_ctx;
3466
    ggml_backend_buffer_ptr inp_buf;
3467
};
3468
3469
0
static const char * llama_sampler_logit_bias_name(const struct llama_sampler * smpl) {
3470
0
    auto * ctx = (llama_sampler_logit_bias *) smpl->ctx;
3471
0
    return ctx->get_name();
3472
0
}
3473
3474
0
static void llama_sampler_logit_bias_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
3475
0
    auto * ctx = (llama_sampler_logit_bias *) smpl->ctx;
3476
3477
0
    if (ctx->logit_bias.empty()) {
3478
0
        return;
3479
0
    }
3480
3481
0
    ctx->to_search.clear();
3482
3483
    // update the candidates that have not been shuffled in the vocabulary (i.e. idx == id)
3484
0
    for (const auto & lb : ctx->logit_bias) {
3485
0
        if (lb.token >= 0 && cur_p->size > (size_t) lb.token && cur_p->data[lb.token].id == lb.token) {
3486
0
            cur_p->data[lb.token].logit += lb.bias;
3487
0
        } else {
3488
0
            ctx->to_search.push_back(lb);
3489
0
        }
3490
0
    }
3491
3492
0
    if (ctx->to_search.empty()) {
3493
0
        return;
3494
0
    }
3495
3496
    // search for the remaining candidates that were not found in the previous step
3497
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3498
0
        for (const auto & lb : ctx->to_search) {
3499
0
            if (cur_p->data[i].id == lb.token) {
3500
0
                cur_p->data[i].logit += lb.bias;
3501
0
                break;
3502
0
            }
3503
0
        }
3504
0
    }
3505
0
}
3506
3507
0
static struct llama_sampler * llama_sampler_logit_bias_clone(const struct llama_sampler * smpl) {
3508
0
    const auto * ctx = (const llama_sampler_logit_bias *) smpl->ctx;
3509
0
    return llama_sampler_init_logit_bias(ctx->n_vocab, ctx->logit_bias.size(), ctx->logit_bias.data());
3510
0
}
3511
3512
0
static void llama_sampler_logit_bias_free(struct llama_sampler * smpl) {
3513
0
    delete (llama_sampler_logit_bias *) smpl->ctx;
3514
0
}
3515
3516
static void llama_sampler_logit_bias_backend_apply(
3517
        struct llama_sampler      * smpl,
3518
        struct ggml_context       * ctx,
3519
        struct ggml_cgraph        * gf,
3520
0
        struct llama_sampler_data * data) {
3521
0
    GGML_UNUSED(gf);
3522
0
    GGML_UNUSED(ctx);
3523
3524
0
    auto * sctx = (llama_sampler_logit_bias *) smpl->ctx;
3525
0
    if (sctx->logit_bias.empty()) {
3526
0
        return;
3527
0
    }
3528
3529
0
    ggml_tensor * cur = ggml_fill(ctx, data->logits, 0.0f);
3530
3531
0
    cur = ggml_reshape_2d(ctx, cur, 1, ggml_nelements(cur));
3532
0
    cur = ggml_set_rows(ctx, cur, sctx->inp_logit_bias, sctx->inp_logit_idxs);
3533
0
    cur = ggml_reshape_1d(ctx, cur, ggml_nelements(cur));
3534
3535
0
    data->logits = ggml_add(ctx, data->logits, cur);
3536
0
}
3537
3538
0
static void llama_sampler_logit_bias_backend_set_input(struct llama_sampler * smpl) {
3539
0
    auto * sctx = (llama_sampler_logit_bias *) smpl->ctx;
3540
0
    if (sctx->logit_bias.empty()) {
3541
0
        return;
3542
0
    }
3543
3544
0
    GGML_ASSERT(sctx->inp_logit_bias != nullptr);
3545
0
    GGML_ASSERT(sctx->inp_logit_idxs != nullptr);
3546
3547
0
    const size_t n = sctx->logit_bias.size();
3548
3549
0
    std::vector<float>   data_logit_bias(n, 0.0f);
3550
0
    std::vector<int32_t> data_logit_idxs(n, 0);
3551
0
    for (size_t i = 0; i < n; ++i) {
3552
0
        const auto & lb = sctx->logit_bias[i];
3553
0
        GGML_ASSERT(lb.token >= 0 && lb.token < (int32_t) sctx->n_vocab);
3554
0
        data_logit_bias[i] = lb.bias;
3555
0
        data_logit_idxs[i] = lb.token;
3556
0
    }
3557
3558
0
    ggml_backend_tensor_set(sctx->inp_logit_bias, data_logit_bias.data(), 0, ggml_nbytes(sctx->inp_logit_bias));
3559
0
    ggml_backend_tensor_set(sctx->inp_logit_idxs, data_logit_idxs.data(), 0, ggml_nbytes(sctx->inp_logit_idxs));
3560
0
}
3561
3562
static bool llama_sampler_logit_bias_backend_init(
3563
        struct llama_sampler       * smpl,
3564
0
        ggml_backend_buffer_type_t   buft) {
3565
0
    auto * sctx = (llama_sampler_logit_bias *) smpl->ctx;
3566
3567
0
    sctx->init(true);
3568
3569
0
    if (sctx->logit_bias.empty()) {
3570
0
        return true;
3571
0
    }
3572
3573
0
    ggml_init_params params = {
3574
0
        /*.mem_size   =*/ 2*ggml_tensor_overhead(),
3575
0
        /*.mem_buffer =*/ nullptr,
3576
0
        /*.no_alloc   =*/ true,
3577
0
    };
3578
3579
0
    sctx->inp_ctx.reset(ggml_init(params));
3580
3581
0
    const size_t n = sctx->logit_bias.size();
3582
3583
0
    sctx->inp_logit_bias = ggml_new_tensor_2d(sctx->inp_ctx.get(), GGML_TYPE_F32, 1, n);
3584
0
    ggml_set_name(sctx->inp_logit_bias, "logit_bias");
3585
0
    ggml_set_input(sctx->inp_logit_bias);
3586
3587
0
    sctx->inp_logit_idxs = ggml_new_tensor_1d(sctx->inp_ctx.get(), GGML_TYPE_I32, n);
3588
0
    ggml_set_name(sctx->inp_logit_idxs, "logit_idxs");
3589
0
    ggml_set_input(sctx->inp_logit_idxs);
3590
3591
    // Allocate all tensors from our context to the backend
3592
0
    sctx->inp_buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(sctx->inp_ctx.get(), buft));
3593
3594
0
    ggml_backend_buffer_clear(sctx->inp_buf.get(), 0);
3595
3596
0
    return true;
3597
0
}
3598
3599
static struct llama_sampler_i llama_sampler_logit_bias_i = {
3600
    /* .name              = */ llama_sampler_logit_bias_name,
3601
    /* .accept            = */ nullptr,
3602
    /* .apply             = */ llama_sampler_logit_bias_apply,
3603
    /* .reset             = */ nullptr,
3604
    /* .clone             = */ llama_sampler_logit_bias_clone,
3605
    /* .free              = */ llama_sampler_logit_bias_free,
3606
    /* .backend_init      = */ llama_sampler_logit_bias_backend_init,
3607
    /* .backend_accept    = */ nullptr,
3608
    /* .backend_apply     = */ llama_sampler_logit_bias_backend_apply,
3609
    /* .backend_set_input = */ llama_sampler_logit_bias_backend_set_input,
3610
};
3611
3612
struct llama_sampler * llama_sampler_init_logit_bias(
3613
                         int32_t   n_vocab,
3614
                         int32_t   n_logit_bias,
3615
0
          const llama_logit_bias * logit_bias) {
3616
0
    const bool is_empty = n_logit_bias <= 0;
3617
3618
0
    if (is_empty) {
3619
0
        return llama_sampler_init_empty("?logit-bias");
3620
0
    }
3621
3622
0
    return llama_sampler_init(
3623
0
        /* .iface = */ &llama_sampler_logit_bias_i,
3624
0
        /* .ctx   = */ new llama_sampler_logit_bias {
3625
0
            ("logit-bias"),
3626
0
            /* .n_vocab        = */ n_vocab,
3627
0
            /* .logit_bias     = */ std::vector<llama_logit_bias>(logit_bias, logit_bias + n_logit_bias),
3628
0
            /* .to_search      = */ {},
3629
0
            /* .inp_logit_bias = */ nullptr,
3630
0
            /* .inp_logit_idxs = */ nullptr,
3631
0
            /* .inp_ctx        = */ nullptr,
3632
0
            /* .inp_buf        = */ nullptr,
3633
0
        }
3634
0
    );
3635
0
}
3636
3637
// infill
3638
3639
//#define GGML_DEBUG_SAMPLER_INFILL
3640
3641
struct llama_sampler_infill {
3642
    const struct llama_vocab * vocab;
3643
3644
    std::vector<char> buf0;
3645
    std::vector<char> buf1;
3646
};
3647
3648
0
static const char * llama_sampler_infill_name(const struct llama_sampler * /*smpl*/) {
3649
0
    return "infill";
3650
0
}
3651
3652
0
static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) {
3653
0
    auto * ctx = (llama_sampler_infill *) smpl->ctx;
3654
3655
0
    llama_sampler_softmax_impl(cur_p, true);
3656
3657
#if defined(GGML_DEBUG_SAMPLER_INFILL)
3658
#define LOG_DBG_CUR LLAMA_LOG_DEBUG
3659
#else
3660
0
#define LOG_DBG_CUR(...)
3661
0
#endif
3662
3663
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3664
0
        LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit);
3665
0
    }
3666
3667
0
    float p_txt_sum = 0.0f;
3668
0
    float p_eog_sum = 0.0f;
3669
3670
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3671
0
        if (ctx->vocab->is_eog(cur_p->data[i].id)) {
3672
0
            p_eog_sum += cur_p->data[i].p;
3673
0
        } else {
3674
0
            p_txt_sum += cur_p->data[i].p;
3675
0
        }
3676
0
    }
3677
3678
0
    const float rat = p_eog_sum == 0.0 ? INFINITY : p_txt_sum / p_eog_sum; GGML_UNUSED(rat);
3679
3680
0
    LOG_DBG_CUR("%s: p_txt_sum = %.2f, p_eog_sum = %.2f, rat = %.2f, n = %zu\n", __func__, p_txt_sum, p_eog_sum, rat, cur_p->size);
3681
3682
0
    if (3*p_eog_sum*cur_p->size > p_txt_sum) {
3683
0
        LOG_DBG_CUR("%s: the ratio p_txt/p_eog = %.2f is too low -> sampling EOG\n", __func__, p_txt_sum/p_eog_sum);
3684
3685
        // keep just the EOG tokens
3686
0
        const auto size_org = cur_p->size;
3687
3688
0
        cur_p->size = 0;
3689
3690
0
        float p_sum = 0.0f;
3691
3692
0
        for (size_t i = 0; i < size_org; ++i) {
3693
0
            if (ctx->vocab->is_eog(cur_p->data[i].id)) {
3694
0
                p_sum += cur_p->data[i].p;
3695
3696
0
                cur_p->data[cur_p->size++] = cur_p->data[i];
3697
0
            }
3698
0
        }
3699
3700
        // normalize probs
3701
0
        for (size_t i = 0; i < cur_p->size; ++i) {
3702
0
            cur_p->data[i].p /= p_sum;
3703
0
        }
3704
3705
0
        return;
3706
0
    }
3707
3708
0
    size_t n_combined = 0; GGML_UNUSED(n_combined);
3709
3710
    // combine tokens with common prefix
3711
0
    for (size_t i0 = 0; i0 < cur_p->size; ++i0) {
3712
0
        for (size_t i1 = 0; i1 < cur_p->size; ++i1) {
3713
0
            if (cur_p->data[i0].logit == -INFINITY) {
3714
0
                break;
3715
0
            }
3716
3717
0
            if (i0 == i1 || cur_p->data[i1].logit == -INFINITY) {
3718
0
                continue;
3719
0
            }
3720
3721
0
            int len0 = ctx->vocab->token_to_piece(cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false);
3722
0
            if (len0 < 0) {
3723
0
                ctx->buf0.resize(len0);
3724
0
                len0 = ctx->vocab->token_to_piece(cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false);
3725
0
                assert(len0 > 0);
3726
0
            }
3727
3728
0
            int len1 = ctx->vocab->token_to_piece(cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false);
3729
0
            if (len1 < 0) {
3730
0
                ctx->buf1.resize(len1);
3731
0
                len1 = ctx->vocab->token_to_piece(cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false);
3732
0
                assert(len1 > 0);
3733
0
            }
3734
3735
            // token i0 is a prefix of token i1
3736
0
            if (len0 > 0 && len0 <= len1 && memcmp(ctx->buf0.data(), ctx->buf1.data(), len0) == 0) {
3737
0
                int dst = i0;
3738
0
                int src = i1;
3739
3740
                // merge into the token with higher probability
3741
0
                if (cur_p->data[i1].p > cur_p->data[i0].p) {
3742
0
                    std::swap(dst, src);
3743
0
                }
3744
3745
0
                cur_p->data[dst].p += cur_p->data[src].p;
3746
0
                cur_p->data[src].logit = -INFINITY;
3747
0
                cur_p->data[src].p     = 0.0f;
3748
3749
0
                n_combined++;
3750
0
            }
3751
0
        }
3752
0
    }
3753
3754
0
    size_t n_non_eog = 0;
3755
3756
0
    size_t size_org = cur_p->size;
3757
3758
0
    float p_sum = 0.0f;
3759
0
    float thold = 0.2f;
3760
3761
0
    cur_p->size = 0;
3762
3763
0
    LOG_DBG_CUR("%s: n_combined = %zu, applying thold = %.3f\n", __func__, n_combined, thold);
3764
3765
0
    for (size_t i = 0; i < size_org; ++i) {
3766
0
        const bool is_eog = ctx->vocab->is_eog(cur_p->data[i].id);
3767
3768
0
        if (cur_p->data[i].p < thold && !is_eog) {
3769
0
            continue;
3770
0
        }
3771
3772
0
        if (!is_eog) {
3773
0
            ++n_non_eog;
3774
0
        }
3775
3776
0
        p_sum += cur_p->data[i].p;
3777
3778
        // keep this token
3779
0
        cur_p->data[cur_p->size++] = cur_p->data[i];
3780
0
    }
3781
3782
0
    LOG_DBG_CUR("%s: n_non_eog = %zu\n", __func__, n_non_eog);
3783
3784
    // if no non-EOG tokens are left -> reduce cur_p to single EOT token
3785
0
    if (n_non_eog == 0) {
3786
0
        cur_p->size = 1;
3787
0
        cur_p->data[0].id = ctx->vocab->token_eot();
3788
0
        if (cur_p->data[0].id == LLAMA_TOKEN_NULL) {
3789
0
            cur_p->data[0].id = ctx->vocab->token_eos();
3790
0
        }
3791
0
        cur_p->data[0].logit = 1.0f;
3792
3793
0
        GGML_ASSERT(cur_p->data[0].id != LLAMA_TOKEN_NULL);
3794
3795
0
        return;
3796
0
    }
3797
3798
    // normalize probs
3799
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3800
0
        cur_p->data[i].p /= p_sum;
3801
3802
0
        LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit);
3803
0
    }
3804
3805
0
    size_org = cur_p->size;
3806
0
    p_sum = 0.0f;
3807
0
    thold = 1.0/(n_non_eog + 1);
3808
3809
0
    cur_p->size = 0;
3810
3811
0
    LOG_DBG_CUR("%s: applying thold = %.3f\n", __func__, thold);
3812
3813
0
    for (size_t i = 0; i < size_org; ++i) {
3814
0
        const bool is_eog = ctx->vocab->is_eog(cur_p->data[i].id);
3815
3816
0
        if (cur_p->data[i].p < thold && !is_eog) {
3817
0
            continue;
3818
0
        }
3819
3820
0
        p_sum += cur_p->data[i].p;
3821
3822
0
        cur_p->data[cur_p->size++] = cur_p->data[i];
3823
0
    }
3824
3825
    // normalize probs
3826
0
    for (size_t i = 0; i < cur_p->size; ++i) {
3827
0
        cur_p->data[i].p /= p_sum;
3828
3829
0
        LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit);
3830
0
    }
3831
3832
0
#undef LOG_DBG_CUR
3833
0
}
3834
3835
0
static struct llama_sampler * llama_sampler_infill_clone(const struct llama_sampler * smpl) {
3836
0
    const auto * ctx = (const llama_sampler_infill *) smpl->ctx;
3837
0
    return llama_sampler_init_infill(ctx->vocab);
3838
0
}
3839
3840
0
static void llama_sampler_infill_free(struct llama_sampler * smpl) {
3841
0
    delete (llama_sampler_infill *) smpl->ctx;
3842
0
}
3843
3844
static struct llama_sampler_i llama_sampler_infill_i = {
3845
    /* .name              = */ llama_sampler_infill_name,
3846
    /* .accept            = */ nullptr,
3847
    /* .apply             = */ llama_sampler_infill_apply,
3848
    /* .reset             = */ nullptr,
3849
    /* .clone             = */ llama_sampler_infill_clone,
3850
    /* .free              = */ llama_sampler_infill_free,
3851
    /* .backend_apply     = */ nullptr,
3852
    /* .backend_accept    = */ nullptr,
3853
    /* .backend_set_input = */ nullptr,
3854
    /* .backend_init      = */ nullptr,
3855
};
3856
3857
0
struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab) {
3858
0
    return llama_sampler_init(
3859
0
        /* .iface = */ &llama_sampler_infill_i,
3860
0
        /* .ctx   = */ new llama_sampler_infill {
3861
0
            /* .vocab = */ vocab,
3862
0
            /* .buf0  = */ std::vector<char>(512),
3863
0
            /* .buf1  = */ std::vector<char>(512),
3864
0
        }
3865
0
    );
3866
0
}
3867
3868
// utils
3869
3870
0
uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl) {
3871
0
    if (smpl->iface == &llama_sampler_dist_i) {
3872
0
        return ((const llama_sampler_dist *) smpl->ctx)->seed_cur;
3873
0
    }
3874
3875
0
    if (smpl->iface == &llama_sampler_mirostat_i) {
3876
0
        return ((const llama_sampler_mirostat *) smpl->ctx)->seed_cur;
3877
0
    }
3878
3879
0
    if (smpl->iface == &llama_sampler_mirostat_v2_i) {
3880
0
        return ((const llama_sampler_mirostat_v2 *) smpl->ctx)->seed_cur;
3881
0
    }
3882
3883
0
    if (smpl->iface == &llama_sampler_chain_i) {
3884
0
        const auto * ctx = (const llama_sampler_chain *) smpl->ctx;
3885
0
        for (auto it = ctx->samplers.rbegin(); it != ctx->samplers.rend(); ++it) {
3886
0
            const uint32_t seed = llama_sampler_get_seed(it->ptr);
3887
0
            if (seed != LLAMA_DEFAULT_SEED) {
3888
0
                return seed;
3889
0
            }
3890
0
        }
3891
0
    }
3892
3893
0
    return LLAMA_DEFAULT_SEED;
3894
0
}
3895
3896
// perf
3897
3898
0
struct llama_perf_sampler_data llama_perf_sampler(const struct llama_sampler * chain) {
3899
0
    struct llama_perf_sampler_data data = {};
3900
3901
0
    if (chain == nullptr || chain->iface != &llama_sampler_chain_i) {
3902
0
        GGML_ABORT("%s: invalid sampler passed - requires a sampler created with llama_sampler_chain_init()\n", __func__);
3903
0
    }
3904
3905
0
    const auto * ctx = (const struct llama_sampler_chain *) chain->ctx;
3906
3907
0
    data.t_sample_ms = 1e-3 * ctx->t_sample_us;
3908
0
    data.n_sample    = std::max(0, ctx->n_sample);
3909
3910
0
    return data;
3911
0
}
3912
3913
0
void llama_perf_sampler_print(const struct llama_sampler * chain) {
3914
0
    const auto data = llama_perf_sampler(chain);
3915
3916
0
    LLAMA_LOG_INFO("%s:    samplers time = %10.2f ms / %5d runs\n", __func__, data.t_sample_ms, data.n_sample);
3917
0
}
3918
3919
0
void llama_perf_sampler_reset(struct llama_sampler * chain) {
3920
0
    if (chain == nullptr || chain->iface != &llama_sampler_chain_i) {
3921
0
        GGML_ABORT("%s: invalid sampler passed - requires a sampler created with llama_sampler_chain_init()\n", __func__);
3922
0
    }
3923
3924
0
    auto * ctx = (struct llama_sampler_chain *) chain->ctx;
3925
3926
0
    ctx->t_sample_us = 0;
3927
0
    ctx->n_sample    = 0;
3928
0
}