Coverage Report

Created: 2026-03-21 06:50

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/common/sampling.cpp
Line
Count
Source
1
#include "sampling.h"
2
3
#include "common.h"
4
#include "ggml.h"
5
#include "log.h"
6
#include "reasoning-budget.h"
7
8
#include <algorithm>
9
#include <cctype>
10
#include <cmath>
11
#include <cstring>
12
#include <unordered_map>
13
#include <vector>
14
15
// the ring buffer works similarly to std::deque, but with a fixed capacity
16
// TODO: deduplicate with llama-impl.h
17
template<typename T>
18
struct ring_buffer {
19
0
    ring_buffer(size_t cap) : capacity(cap), data(cap) {}
20
21
    T & front() {
22
        if (sz == 0) {
23
            throw std::runtime_error("ring buffer is empty");
24
        }
25
        return data[first];
26
    }
27
28
    const T & front() const {
29
        if (sz == 0) {
30
            throw std::runtime_error("ring buffer is empty");
31
        }
32
        return data[first];
33
    }
34
35
    T & back() {
36
        if (sz == 0) {
37
            throw std::runtime_error("ring buffer is empty");
38
        }
39
        return data[pos];
40
    }
41
42
    const T & back() const {
43
        if (sz == 0) {
44
            throw std::runtime_error("ring buffer is empty");
45
        }
46
        return data[pos];
47
    }
48
49
0
    void push_back(const T & value) {
50
0
        if (sz == capacity) {
51
            // advance the start when buffer is full
52
0
            first = (first + 1) % capacity;
53
0
        } else {
54
0
            sz++;
55
0
        }
56
0
        data[pos] = value;
57
0
        pos = (pos + 1) % capacity;
58
0
    }
59
60
    T pop_front() {
61
        if (sz == 0) {
62
            throw std::runtime_error("ring buffer is empty");
63
        }
64
        T value = data[first];
65
        first = (first + 1) % capacity;
66
        sz--;
67
        return value;
68
    }
69
70
0
    const T & rat(size_t i) const {
71
0
        if (i >= sz) {
72
0
            throw std::runtime_error("ring buffer: index out of bounds");
73
0
        }
74
0
        return data[(first + sz - i - 1) % capacity];
75
0
    }
76
77
    std::vector<T> to_vector() const {
78
        std::vector<T> result;
79
        result.reserve(sz);
80
        for (size_t i = 0; i < sz; i++) {
81
            result.push_back(data[(first + i) % capacity]);
82
        }
83
        return result;
84
    }
85
86
0
    void clear() {
87
        // here only reset the status of the buffer
88
0
        sz = 0;
89
0
        first = 0;
90
0
        pos = 0;
91
0
    }
92
93
    bool empty() const {
94
        return sz == 0;
95
    }
96
97
0
    size_t size() const {
98
0
        return sz;
99
0
    }
100
101
    size_t capacity = 0;
102
    size_t sz = 0;
103
    size_t first = 0;
104
    size_t pos = 0;
105
    std::vector<T> data;
106
};
107
108
struct common_sampler {
109
    common_params_sampling params;
110
111
    struct llama_sampler * grmr;
112
    struct llama_sampler * chain;
113
114
    ring_buffer<llama_token> prev;
115
116
    std::vector<llama_token_data> cur;
117
118
    llama_token_data_array cur_p;
119
120
0
    void reset() {
121
0
        prev.clear();
122
123
0
        llama_sampler_reset(chain);
124
0
    }
125
126
0
    void set_logits(struct llama_context * ctx, int idx) {
127
0
        const float *       sampled_probs  = llama_get_sampled_probs_ith     (ctx, idx);
128
0
        const float *       sampled_logits = llama_get_sampled_logits_ith    (ctx, idx);
129
0
        const llama_token * sampled_ids    = llama_get_sampled_candidates_ith(ctx, idx);
130
131
0
        const llama_model * model = llama_get_model(ctx);
132
0
        const llama_vocab * vocab = llama_model_get_vocab(model);
133
134
0
        const int n_vocab = llama_vocab_n_tokens(vocab);
135
136
0
        if (sampled_probs) {
137
0
            const uint32_t sampled_probs_count = llama_get_sampled_probs_count_ith(ctx, idx);
138
0
            cur.resize(sampled_probs_count);
139
0
            for (uint32_t i = 0; i < sampled_probs_count; ++i) {
140
0
                cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], sampled_probs[i]};
141
0
            }
142
0
        } else if (sampled_logits) {
143
0
            const uint32_t sampled_logits_count = llama_get_sampled_logits_count_ith(ctx, idx);
144
0
            cur.resize(sampled_logits_count);
145
0
            for (uint32_t i = 0; i < sampled_logits_count; i++) {
146
0
                cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], 0.0f};
147
0
            }
148
0
        } else {
149
0
            const auto * logits = llama_get_logits_ith(ctx, idx);
150
0
            GGML_ASSERT(logits != nullptr);
151
0
            cur.resize(n_vocab);
152
0
            for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
153
0
                cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
154
0
            }
155
0
        }
156
157
0
        cur_p = { cur.data(), cur.size(), -1, false };
158
0
    }
159
160
0
    common_time_meas tm() {
161
0
        return common_time_meas(t_total_us, params.no_perf);
162
0
    }
163
164
    mutable int64_t t_total_us = 0;
165
};
166
167
0
std::string common_params_sampling::print() const {
168
0
    char result[1024];
169
170
0
    snprintf(result, sizeof(result),
171
0
            "\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n"
172
0
            "\tdry_multiplier = %.3f, dry_base = %.3f, dry_allowed_length = %d, dry_penalty_last_n = %d\n"
173
0
            "\ttop_k = %d, top_p = %.3f, min_p = %.3f, xtc_probability = %.3f, xtc_threshold = %.3f, typical_p = %.3f, top_n_sigma = %.3f, temp = %.3f\n"
174
0
            "\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f, adaptive_target = %.3f, adaptive_decay = %.3f",
175
0
            penalty_last_n, penalty_repeat, penalty_freq, penalty_present,
176
0
            dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n,
177
0
            top_k, top_p, min_p, xtc_probability, xtc_threshold, typ_p, top_n_sigma, temp,
178
0
            mirostat, mirostat_eta, mirostat_tau, adaptive_target, adaptive_decay);
179
180
0
    return std::string(result);
181
0
}
182
183
0
struct common_sampler * common_sampler_init(const struct llama_model * model, struct common_params_sampling & params) {
184
0
    const llama_vocab * vocab = llama_model_get_vocab(model);
185
186
0
    llama_sampler_chain_params lparams = llama_sampler_chain_default_params();
187
188
0
    lparams.no_perf = params.no_perf;
189
190
0
    llama_sampler * grmr = nullptr;
191
0
    llama_sampler * chain = llama_sampler_chain_init(lparams);
192
193
0
    std::vector<llama_sampler *> samplers;
194
195
0
    const std::string & grammar_str = common_grammar_value(params.grammar);
196
0
    if (grammar_str.compare(0, 11, "%llguidance") == 0) {
197
#ifdef LLAMA_USE_LLGUIDANCE
198
        grmr = llama_sampler_init_llg(vocab, "lark", grammar_str.c_str());
199
#else
200
0
        GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled");
201
0
#endif // LLAMA_USE_LLGUIDANCE
202
0
    } else {
203
0
        std::vector<std::string> trigger_patterns;
204
0
        std::vector<llama_token> trigger_tokens;
205
0
        for (const auto & trigger : params.grammar_triggers) {
206
0
            switch (trigger.type) {
207
0
                case COMMON_GRAMMAR_TRIGGER_TYPE_WORD:
208
0
                {
209
0
                    const auto & word = trigger.value;
210
0
                    trigger_patterns.push_back(regex_escape(word));
211
0
                    break;
212
0
                }
213
0
                case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN:
214
0
                {
215
0
                    trigger_patterns.push_back(trigger.value);
216
0
                    break;
217
0
                }
218
0
                case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL:
219
0
                {
220
0
                    const auto & pattern = trigger.value;
221
0
                    std::string anchored = "^$";
222
0
                    if (!pattern.empty()) {
223
0
                        anchored = (pattern.front() != '^' ? "^" : "")
224
0
                            + pattern
225
0
                            + (pattern.back() != '$' ? "$" : "");
226
0
                    }
227
0
                    trigger_patterns.push_back(anchored);
228
0
                    break;
229
0
                }
230
0
                case COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN:
231
0
                {
232
0
                    const auto token = trigger.token;
233
0
                    trigger_tokens.push_back(token);
234
0
                    break;
235
0
                }
236
0
                default:
237
0
                    GGML_ASSERT(false && "unknown trigger type");
238
0
            }
239
0
        }
240
241
0
        std::vector<const char *> trigger_patterns_c;
242
0
        trigger_patterns_c.reserve(trigger_patterns.size());
243
0
        for (const auto & regex : trigger_patterns) {
244
0
            trigger_patterns_c.push_back(regex.c_str());
245
0
        }
246
247
0
        if (!grammar_str.empty()) {
248
0
             if (params.grammar_lazy) {
249
0
                 grmr = llama_sampler_init_grammar_lazy_patterns(vocab, grammar_str.c_str(), "root",
250
0
                         trigger_patterns_c.data(), trigger_patterns_c.size(),
251
0
                         trigger_tokens.data(), trigger_tokens.size());
252
0
             } else {
253
0
                 grmr = llama_sampler_init_grammar(vocab, grammar_str.c_str(), "root");
254
0
             }
255
0
        }
256
0
    }
257
258
    // Feed generation prompt tokens to the grammar sampler so it advances past
259
    // tokens the template already placed in the prompt.
260
    // Only applies to output-format and tool-call grammars; user-supplied grammars must not be prefilled.
261
0
    std::vector<llama_token> prefill_tokens;
262
0
    if (!params.generation_prompt.empty() && common_grammar_needs_prefill(params.grammar)) {
263
0
        GGML_ASSERT(vocab != nullptr);
264
0
        prefill_tokens = common_tokenize(vocab, params.generation_prompt, false, true);
265
0
        if (!prefill_tokens.empty()) {
266
0
            std::string first_token = common_token_to_piece(vocab, prefill_tokens[0], true);
267
0
            if (std::isspace(first_token[0]) && !std::isspace(params.generation_prompt[0])) {
268
                // Some tokenizers will add a space before the first special token, need to remove
269
0
                prefill_tokens = std::vector<llama_token>(prefill_tokens.begin() + 1, prefill_tokens.end());
270
0
            }
271
0
        }
272
273
0
        if (grmr) {
274
0
            try {
275
0
                for (const auto & token : prefill_tokens) {
276
0
                    llama_sampler_accept(grmr, token);
277
0
                    LOG_DBG("%s: accepted prefill token (%d)\n", __func__, token);
278
0
                }
279
0
            } catch (std::exception &e) {
280
0
                LOG_ERR("%s: error initializing grammar sampler for grammar:\n%s\n\nGeneration prompt:\n'%s'\n", __func__,
281
0
                    common_grammar_value(params.grammar).c_str(), params.generation_prompt.c_str());
282
0
                throw e;
283
0
            }
284
0
        }
285
0
    }
286
287
    // reasoning budget sampler — added first so it can force tokens before other samplers
288
0
    if (params.reasoning_budget_tokens >= 0 && !params.reasoning_budget_forced.empty()) {
289
0
        samplers.push_back(common_reasoning_budget_init(
290
0
            vocab,
291
0
            params.reasoning_budget_start,
292
0
            params.reasoning_budget_end,
293
0
            params.reasoning_budget_forced,
294
0
            params.reasoning_budget_tokens,
295
0
            prefill_tokens));
296
0
    }
297
298
0
    if (params.has_logit_bias()) {
299
0
        samplers.push_back(llama_sampler_init_logit_bias(llama_vocab_n_tokens(vocab), params.logit_bias.size(), params.logit_bias.data()));
300
0
    }
301
302
0
    if (params.mirostat == 0) {
303
304
0
        bool use_adaptive_p = false; // see below
305
306
0
        for (const auto & cnstr : params.samplers) {
307
0
            switch (cnstr) {
308
0
                case COMMON_SAMPLER_TYPE_DRY:
309
0
                    {
310
0
                        std::vector<const char *> c_breakers;
311
0
                        c_breakers.reserve(params.dry_sequence_breakers.size());
312
0
                        for (const auto & str : params.dry_sequence_breakers) {
313
0
                            c_breakers.push_back(str.c_str());
314
0
                        }
315
0
                        samplers.push_back(llama_sampler_init_dry(vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size()));
316
0
                    }
317
0
                    break;
318
0
                case COMMON_SAMPLER_TYPE_TOP_K:
319
0
                    samplers.push_back(llama_sampler_init_top_k(params.top_k));
320
0
                    break;
321
0
                case COMMON_SAMPLER_TYPE_TOP_P:
322
0
                    samplers.push_back(llama_sampler_init_top_p(params.top_p, params.min_keep));
323
0
                    break;
324
0
                case COMMON_SAMPLER_TYPE_TOP_N_SIGMA:
325
0
                    samplers.push_back(llama_sampler_init_top_n_sigma(params.top_n_sigma));
326
0
                    break;
327
0
                case COMMON_SAMPLER_TYPE_MIN_P:
328
0
                    samplers.push_back(llama_sampler_init_min_p(params.min_p, params.min_keep));
329
0
                    break;
330
0
                case COMMON_SAMPLER_TYPE_XTC:
331
0
                    samplers.push_back(llama_sampler_init_xtc(params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
332
0
                    break;
333
0
                case COMMON_SAMPLER_TYPE_TYPICAL_P:
334
0
                    samplers.push_back(llama_sampler_init_typical(params.typ_p, params.min_keep));
335
0
                    break;
336
0
                case COMMON_SAMPLER_TYPE_TEMPERATURE:
337
0
                    samplers.push_back(llama_sampler_init_temp_ext(params.temp, params.dynatemp_range, params.dynatemp_exponent));
338
0
                    break;
339
0
                case COMMON_SAMPLER_TYPE_INFILL:
340
0
                    samplers.push_back(llama_sampler_init_infill(vocab));
341
0
                    break;
342
0
                case COMMON_SAMPLER_TYPE_PENALTIES:
343
0
                    samplers.push_back(llama_sampler_init_penalties(params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present));
344
0
                    break;
345
0
                case COMMON_SAMPLER_TYPE_ADAPTIVE_P:
346
                    // the `adaptive-p` sampler is like `dist` and `mirostat` in that it selects
347
                    // a single token, so we will add `dist` at the end of the chain by default,
348
                    // unless the user specifically included `adaptive-p`. we set this flag here
349
                    // so we know to add the sampler at the very end.
350
0
                    use_adaptive_p = true;
351
0
                    break;
352
0
                default:
353
0
                    GGML_ASSERT(false && "unknown sampler type");
354
0
            }
355
0
        }
356
0
        if (use_adaptive_p) {
357
            // only if user explicitly included adaptive-p sampler
358
0
            samplers.push_back(llama_sampler_init_adaptive_p(params.adaptive_target, params.adaptive_decay, params.seed));
359
0
        } else {
360
            // default: sample from distribution
361
0
            samplers.push_back(llama_sampler_init_dist(params.seed));
362
0
        }
363
0
    } else if (params.mirostat == 1) {
364
0
        samplers.push_back(llama_sampler_init_temp(params.temp));
365
0
        samplers.push_back(llama_sampler_init_mirostat(llama_vocab_n_tokens(vocab), params.seed, params.mirostat_tau, params.mirostat_eta, 100));
366
0
    } else if (params.mirostat == 2) {
367
0
        samplers.push_back(llama_sampler_init_temp(params.temp));
368
0
        samplers.push_back(llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta));
369
0
    } else {
370
0
        GGML_ASSERT(false && "unknown mirostat version");
371
0
    }
372
373
0
    for (auto * smpl : samplers) {
374
0
        llama_sampler_chain_add(chain, smpl);
375
0
    }
376
377
0
    if (grmr && params.backend_sampling) {
378
0
        LOG_WRN("%s: backend sampling is not compatible with grammar, disabling\n", __func__);
379
380
0
        params.backend_sampling = false;
381
0
    }
382
383
0
    auto * result = new common_sampler {
384
0
        /* .params  = */ params,
385
0
        /* .grmr    = */ grmr,
386
0
        /* .chain   = */ chain,
387
0
        /* .prev    = */ ring_buffer<llama_token>(std::max(32, params.n_prev)),
388
0
        /* .cur     = */ {},
389
0
        /* .cur_p   = */ {},
390
0
    };
391
392
0
    return result;
393
0
}
394
395
0
void common_sampler_free(struct common_sampler * gsmpl) {
396
0
    if (!gsmpl) {
397
0
        return;
398
0
    }
399
400
0
    llama_sampler_free(gsmpl->grmr);
401
0
    llama_sampler_free(gsmpl->chain);
402
403
0
    delete gsmpl;
404
0
}
405
406
0
void common_sampler_accept(struct common_sampler * gsmpl, llama_token token, bool accept_grammar) {
407
0
    if (!gsmpl) {
408
0
        return;
409
0
    }
410
411
0
    const auto tm = gsmpl->tm();
412
413
0
    if (gsmpl->grmr && accept_grammar) {
414
0
        llama_sampler_accept(gsmpl->grmr, token);
415
0
    }
416
417
0
    llama_sampler_accept(gsmpl->chain, token);
418
419
0
    gsmpl->prev.push_back(token);
420
0
}
421
422
0
void common_sampler_reset(struct common_sampler * gsmpl) {
423
0
    if (!gsmpl) {
424
0
        return;
425
0
    }
426
427
0
    gsmpl->reset();
428
0
}
429
430
0
struct common_sampler * common_sampler_clone(common_sampler * gsmpl) {
431
0
    return new common_sampler {
432
0
        /* .params  = */ gsmpl->params,
433
0
        /* .grmr    = */ llama_sampler_clone(gsmpl->grmr),
434
0
        /* .chain   = */ llama_sampler_clone(gsmpl->chain),
435
0
        /* .prev    = */ gsmpl->prev,
436
0
        /* .cur     = */ gsmpl->cur,
437
0
        /* .cur_p   = */ gsmpl->cur_p,
438
0
    };
439
0
}
440
441
0
void common_perf_print(const struct llama_context * ctx, const struct common_sampler * gsmpl) {
442
    // TODO: measure grammar performance
443
444
0
    const double t_sampling_ms = gsmpl ? 1e-3*gsmpl->t_total_us : 0;
445
446
0
    llama_perf_sampler_data data_smpl;
447
0
    llama_perf_context_data data_ctx;
448
449
0
    memset(&data_smpl, 0, sizeof(data_smpl));
450
0
    memset(&data_ctx,  0, sizeof(data_ctx));
451
452
0
    if (gsmpl) {
453
0
        auto & data = data_smpl;
454
455
0
        data = llama_perf_sampler(gsmpl->chain);
456
457
        // note: the sampling time includes the samplers time + extra time spent in common/sampling
458
0
        LOG_INF("%s:    sampling time = %10.2f ms\n", __func__, t_sampling_ms);
459
0
        LOG_INF("%s:    samplers time = %10.2f ms / %5d tokens\n", __func__, data.t_sample_ms, data.n_sample);
460
0
    }
461
462
0
    if (ctx) {
463
0
        auto & data = data_ctx;
464
465
0
        data = llama_perf_context(ctx);
466
467
0
        const double t_end_ms = 1e-3 * ggml_time_us();
468
469
0
        const double t_total_ms = t_end_ms - data.t_start_ms;
470
0
        const double t_unacc_ms = t_total_ms - (t_sampling_ms + data.t_p_eval_ms + data.t_eval_ms);
471
0
        const double t_unacc_pc = 100.0 * t_unacc_ms /  t_total_ms;
472
473
0
        LOG_INF("%s:        load time = %10.2f ms\n", __func__, data.t_load_ms);
474
0
        LOG_INF("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
475
0
                __func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval);
476
0
        LOG_INF("%s:        eval time = %10.2f ms / %5d runs   (%8.2f ms per token, %8.2f tokens per second)\n",
477
0
                __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval);
478
0
        LOG_INF("%s:       total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval));
479
0
        LOG_INF("%s: unaccounted time = %10.2f ms / %5.1f %%      (total - sampling - prompt eval - eval) / (total)\n", __func__, t_unacc_ms, t_unacc_pc);
480
0
        LOG_INF("%s:    graphs reused = %10d\n", __func__, data.n_reused);
481
482
0
        llama_memory_breakdown_print(ctx);
483
0
    }
484
0
}
485
486
0
struct llama_sampler * common_sampler_get(const struct common_sampler * gsmpl) {
487
0
    if (!gsmpl) {
488
0
        return nullptr;
489
0
    }
490
491
0
    return gsmpl->chain;
492
0
}
493
494
0
llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first) {
495
0
    llama_synchronize(ctx);
496
497
    // start measuring sampling time after the llama_context synchronization in order to not measure any ongoing async operations
498
0
    const auto tm = gsmpl->tm();
499
500
0
    llama_token id = LLAMA_TOKEN_NULL;
501
502
0
    auto & grmr  = gsmpl->grmr;
503
0
    auto & chain = gsmpl->chain;
504
0
    auto & cur_p = gsmpl->cur_p; // initialized by set_logits
505
506
    // Check if a backend sampler has already sampled a token in which case we
507
    // return that token id directly.
508
0
    {
509
0
        id = llama_get_sampled_token_ith(ctx, idx);
510
511
0
        if (id != LLAMA_TOKEN_NULL) {
512
0
            LOG_DBG("%s: Backend sampler selected token: '%d'. Will not run any CPU samplers\n", __func__, id);
513
514
0
            GGML_ASSERT(!gsmpl->grmr && "using grammar in combination with backend sampling is not supported");
515
516
            // TODO: simplify
517
0
            gsmpl->cur.resize(1);
518
0
            gsmpl->cur[0] = { id, 0.0f, 1.0f };
519
0
            cur_p = { gsmpl->cur.data(), gsmpl->cur.size(), 0, true };
520
521
0
            return id;
522
0
        }
523
0
    }
524
525
0
    gsmpl->set_logits(ctx, idx);
526
527
0
    if (grammar_first) {
528
0
        llama_sampler_apply(grmr, &cur_p);
529
0
    }
530
531
0
    llama_sampler_apply(chain, &cur_p);
532
533
0
    id = cur_p.data[cur_p.selected].id;
534
535
0
    if (grammar_first) {
536
0
        return id;
537
0
    }
538
539
    // check if it the sampled token fits the grammar (grammar-based rejection sampling)
540
0
    {
541
0
        llama_token_data       single_token_data       = { id, 1.0f, 0.0f };
542
0
        llama_token_data_array single_token_data_array = { &single_token_data, 1, -1, false };
543
544
0
        llama_sampler_apply(grmr, &single_token_data_array);
545
546
0
        const bool is_valid = single_token_data_array.data[0].logit != -INFINITY;
547
0
        if (is_valid) {
548
0
            return id;
549
0
        }
550
0
    }
551
552
    // resampling:
553
    // if the token is not valid, sample again, but first apply the grammar sampler and then the sampling chain
554
0
    gsmpl->set_logits(ctx, idx);
555
556
0
    llama_sampler_apply(grmr,  &cur_p);
557
0
    llama_sampler_apply(chain, &cur_p);
558
559
0
    GGML_ASSERT(cur_p.selected != -1 && "no selected token during sampling - check your sampling configuration");
560
561
0
    id = cur_p.data[cur_p.selected].id;
562
563
0
    return id;
564
0
}
565
566
0
std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector<int> & idxs, const llama_tokens & draft, bool grammar_first) {
567
0
    GGML_ASSERT(idxs.size() == draft.size() + 1 && "idxs.size() must be draft.size() + 1");
568
569
0
    std::vector<llama_token> result;
570
0
    result.reserve(idxs.size());
571
572
0
    size_t i = 0;
573
0
    for (; i < draft.size(); i++) {
574
0
        const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first);
575
576
0
        common_sampler_accept(gsmpl, id, true);
577
578
0
        result.push_back(id);
579
580
0
        if (draft[i] != id) {
581
0
            break;
582
0
        }
583
0
    }
584
585
0
    if (i == draft.size()) {
586
0
        const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first);
587
588
0
        common_sampler_accept(gsmpl, id, true);
589
590
0
        result.push_back(id);
591
0
    }
592
593
0
    return result;
594
0
}
595
596
0
std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first) {
597
0
    std::vector<int> idxs(draft.size() + 1);
598
0
    for (size_t i = 0; i < idxs.size(); ++i) {
599
0
        idxs[i] = i;
600
0
    }
601
602
0
    return common_sampler_sample_and_accept_n(gsmpl, ctx, idxs, draft, grammar_first);
603
0
}
604
605
0
uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) {
606
0
    return llama_sampler_get_seed(gsmpl->chain);
607
0
}
608
609
// helpers
610
611
0
llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl, bool do_sort) {
612
0
    const auto tm = gsmpl->tm();
613
614
0
    auto * res = &gsmpl->cur_p;
615
616
0
    if (do_sort && !res->sorted) {
617
        // remember the selected token before sorting
618
0
        const llama_token id = res->data[res->selected].id;
619
620
0
        std::sort(res->data, res->data + res->size, [](const llama_token_data & a, const llama_token_data & b) {
621
0
            return a.p > b.p;
622
0
        });
623
624
        // restore the selected token after sorting
625
0
        for (size_t i = 0; i < res->size; ++i) {
626
0
            if (res->data[i].id == id) {
627
0
                res->selected = i;
628
0
                break;
629
0
            }
630
0
        }
631
632
0
        res->sorted = true;
633
0
    }
634
635
0
    return res;
636
0
}
637
638
0
llama_token common_sampler_last(const struct common_sampler * gsmpl) {
639
0
    return gsmpl->prev.rat(0);
640
0
}
641
642
0
std::string common_sampler_print(const struct common_sampler * gsmpl) {
643
0
    std::string result = "logits ";
644
645
0
    for (int i = 0; i < llama_sampler_chain_n(gsmpl->chain); i++) {
646
0
        const auto * smpl = llama_sampler_chain_get(gsmpl->chain, i);
647
0
        result += std::string("-> ");
648
0
        result += std::string(llama_sampler_name(smpl)) + " ";
649
0
    }
650
651
0
    return result;
652
0
}
653
654
0
std::string common_sampler_prev_str(common_sampler * gsmpl, llama_context * ctx_main, int n) {
655
0
    n = std::min(n, (int) gsmpl->prev.size());
656
657
0
    if (n <= 0) {
658
0
        return "";
659
0
    }
660
661
0
    std::string result;
662
0
    result.reserve(8*n); // 8 is the average length of a token [citation needed], TODO: compute this from the vocab
663
664
0
    for (int i = n - 1; i >= 0; i--) {
665
0
        const llama_token id = gsmpl->prev.rat(i);
666
667
0
        GGML_ASSERT(id != LLAMA_TOKEN_NULL && "null token in the sampling history - should not happen");
668
669
0
        result += common_token_to_piece(ctx_main, id);
670
0
    }
671
672
0
    return result;
673
0
}
674
675
0
char common_sampler_type_to_chr(enum common_sampler_type cnstr) {
676
0
    switch (cnstr) {
677
0
        case COMMON_SAMPLER_TYPE_DRY:         return 'd';
678
0
        case COMMON_SAMPLER_TYPE_TOP_K:       return 'k';
679
0
        case COMMON_SAMPLER_TYPE_TYPICAL_P:   return 'y';
680
0
        case COMMON_SAMPLER_TYPE_TOP_P:       return 'p';
681
0
        case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return 's';
682
0
        case COMMON_SAMPLER_TYPE_MIN_P:       return 'm';
683
0
        case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't';
684
0
        case COMMON_SAMPLER_TYPE_XTC:         return 'x';
685
0
        case COMMON_SAMPLER_TYPE_INFILL:      return 'i';
686
0
        case COMMON_SAMPLER_TYPE_PENALTIES:   return 'e';
687
0
        case COMMON_SAMPLER_TYPE_ADAPTIVE_P:  return 'a';
688
0
        default : return '?';
689
0
    }
690
0
}
691
692
0
std::string common_sampler_type_to_str(enum common_sampler_type cnstr) {
693
0
    switch (cnstr) {
694
0
        case COMMON_SAMPLER_TYPE_DRY:         return "dry";
695
0
        case COMMON_SAMPLER_TYPE_TOP_K:       return "top_k";
696
0
        case COMMON_SAMPLER_TYPE_TYPICAL_P:   return "typ_p";
697
0
        case COMMON_SAMPLER_TYPE_TOP_P:       return "top_p";
698
0
        case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return "top_n_sigma";
699
0
        case COMMON_SAMPLER_TYPE_MIN_P:       return "min_p";
700
0
        case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature";
701
0
        case COMMON_SAMPLER_TYPE_XTC:         return "xtc";
702
0
        case COMMON_SAMPLER_TYPE_INFILL:      return "infill";
703
0
        case COMMON_SAMPLER_TYPE_PENALTIES:   return "penalties";
704
0
        case COMMON_SAMPLER_TYPE_ADAPTIVE_P:  return "adaptive_p";
705
0
        default : return "";
706
0
    }
707
0
}
708
709
0
std::vector<common_sampler_type> common_sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) {
710
0
    std::unordered_map<std::string, common_sampler_type> sampler_canonical_name_map {
711
0
        { "dry",         COMMON_SAMPLER_TYPE_DRY },
712
0
        { "top_k",       COMMON_SAMPLER_TYPE_TOP_K },
713
0
        { "top_p",       COMMON_SAMPLER_TYPE_TOP_P },
714
0
        { "top_n_sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
715
0
        { "typ_p",       COMMON_SAMPLER_TYPE_TYPICAL_P },
716
0
        { "min_p",       COMMON_SAMPLER_TYPE_MIN_P },
717
0
        { "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE },
718
0
        { "xtc",         COMMON_SAMPLER_TYPE_XTC },
719
0
        { "infill",      COMMON_SAMPLER_TYPE_INFILL },
720
0
        { "penalties",   COMMON_SAMPLER_TYPE_PENALTIES },
721
0
        { "adaptive_p",  COMMON_SAMPLER_TYPE_ADAPTIVE_P },
722
0
    };
723
724
    // since samplers names are written multiple ways
725
    // make it ready for both system names and input names
726
0
    std::unordered_map<std::string, common_sampler_type> sampler_alt_name_map {
727
0
        { "top-k",       COMMON_SAMPLER_TYPE_TOP_K },
728
0
        { "top-p",       COMMON_SAMPLER_TYPE_TOP_P },
729
0
        { "top-n-sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
730
0
        { "nucleus",     COMMON_SAMPLER_TYPE_TOP_P },
731
0
        { "typical-p",   COMMON_SAMPLER_TYPE_TYPICAL_P },
732
0
        { "typical",     COMMON_SAMPLER_TYPE_TYPICAL_P },
733
0
        { "typ-p",       COMMON_SAMPLER_TYPE_TYPICAL_P },
734
0
        { "typ",         COMMON_SAMPLER_TYPE_TYPICAL_P },
735
0
        { "min-p",       COMMON_SAMPLER_TYPE_MIN_P },
736
0
        { "temp",        COMMON_SAMPLER_TYPE_TEMPERATURE },
737
0
        { "adaptive-p",  COMMON_SAMPLER_TYPE_ADAPTIVE_P },
738
0
    };
739
740
0
    std::vector<common_sampler_type> samplers;
741
0
    samplers.reserve(names.size());
742
743
0
    for (const auto & name : names) {
744
0
        auto sampler = sampler_canonical_name_map.find(name);
745
0
        if (sampler != sampler_canonical_name_map.end()) {
746
0
            samplers.push_back(sampler->second);
747
0
            continue;
748
0
        }
749
0
        if (allow_alt_names) {
750
0
            sampler = sampler_alt_name_map.find(name);
751
0
            if (sampler != sampler_alt_name_map.end()) {
752
0
                samplers.push_back(sampler->second);
753
0
                continue;
754
0
            }
755
0
        }
756
0
        LOG_WRN("%s: unable to match sampler by name '%s'\n", __func__, name.c_str());
757
0
    }
758
759
0
    return samplers;
760
0
}
761
762
0
std::vector<common_sampler_type> common_sampler_types_from_chars(const std::string & chars) {
763
0
    std::unordered_map<char, common_sampler_type> sampler_name_map = {
764
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_DRY),         COMMON_SAMPLER_TYPE_DRY },
765
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_K),       COMMON_SAMPLER_TYPE_TOP_K },
766
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TYPICAL_P),   COMMON_SAMPLER_TYPE_TYPICAL_P },
767
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_P),       COMMON_SAMPLER_TYPE_TOP_P },
768
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_N_SIGMA), COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
769
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_MIN_P),       COMMON_SAMPLER_TYPE_MIN_P },
770
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE },
771
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC),         COMMON_SAMPLER_TYPE_XTC },
772
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_INFILL),      COMMON_SAMPLER_TYPE_INFILL },
773
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_PENALTIES),   COMMON_SAMPLER_TYPE_PENALTIES },
774
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_ADAPTIVE_P),  COMMON_SAMPLER_TYPE_ADAPTIVE_P },
775
0
    };
776
777
0
    std::vector<common_sampler_type> samplers;
778
0
    samplers.reserve(chars.size());
779
780
0
    for (const auto & c : chars) {
781
0
        const auto sampler = sampler_name_map.find(c);
782
0
        if (sampler != sampler_name_map.end()) {
783
0
            samplers.push_back(sampler->second);
784
0
        } else {
785
0
            LOG_WRN("%s: unable to match sampler by char '%c'\n", __func__, c);
786
0
        }
787
0
    }
788
789
0
    return samplers;
790
0
}