Coverage Report

Created: 2026-04-12 06:40

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/common/sampling.cpp
Line
Count
Source
1
#include "sampling.h"
2
3
#include "common.h"
4
#include "ggml.h"
5
#include "log.h"
6
#include "reasoning-budget.h"
7
8
#include <algorithm>
9
#include <cctype>
10
#include <climits>
11
#include <cmath>
12
#include <cstring>
13
#include <unordered_map>
14
#include <vector>
15
16
// the ring buffer works similarly to std::deque, but with a fixed capacity
17
// TODO: deduplicate with llama-impl.h
18
template<typename T>
19
struct ring_buffer {
20
0
    ring_buffer(size_t cap) : capacity(cap), data(cap) {}
21
22
    T & front() {
23
        if (sz == 0) {
24
            throw std::runtime_error("ring buffer is empty");
25
        }
26
        return data[first];
27
    }
28
29
    const T & front() const {
30
        if (sz == 0) {
31
            throw std::runtime_error("ring buffer is empty");
32
        }
33
        return data[first];
34
    }
35
36
    T & back() {
37
        if (sz == 0) {
38
            throw std::runtime_error("ring buffer is empty");
39
        }
40
        return data[pos];
41
    }
42
43
    const T & back() const {
44
        if (sz == 0) {
45
            throw std::runtime_error("ring buffer is empty");
46
        }
47
        return data[pos];
48
    }
49
50
0
    void push_back(const T & value) {
51
0
        if (sz == capacity) {
52
            // advance the start when buffer is full
53
0
            first = (first + 1) % capacity;
54
0
        } else {
55
0
            sz++;
56
0
        }
57
0
        data[pos] = value;
58
0
        pos = (pos + 1) % capacity;
59
0
    }
60
61
    T pop_front() {
62
        if (sz == 0) {
63
            throw std::runtime_error("ring buffer is empty");
64
        }
65
        T value = data[first];
66
        first = (first + 1) % capacity;
67
        sz--;
68
        return value;
69
    }
70
71
0
    const T & rat(size_t i) const {
72
0
        if (i >= sz) {
73
0
            throw std::runtime_error("ring buffer: index out of bounds");
74
0
        }
75
0
        return data[(first + sz - i - 1) % capacity];
76
0
    }
77
78
    std::vector<T> to_vector() const {
79
        std::vector<T> result;
80
        result.reserve(sz);
81
        for (size_t i = 0; i < sz; i++) {
82
            result.push_back(data[(first + i) % capacity]);
83
        }
84
        return result;
85
    }
86
87
0
    void clear() {
88
        // here only reset the status of the buffer
89
0
        sz = 0;
90
0
        first = 0;
91
0
        pos = 0;
92
0
    }
93
94
    bool empty() const {
95
        return sz == 0;
96
    }
97
98
0
    size_t size() const {
99
0
        return sz;
100
0
    }
101
102
    size_t capacity = 0;
103
    size_t sz = 0;
104
    size_t first = 0;
105
    size_t pos = 0;
106
    std::vector<T> data;
107
};
108
109
struct common_sampler {
110
    common_params_sampling params;
111
112
    struct llama_sampler * grmr;
113
    struct llama_sampler * rbudget;
114
    struct llama_sampler * chain;
115
116
    ring_buffer<llama_token> prev;
117
118
    std::vector<llama_token_data> cur;
119
120
    llama_token_data_array cur_p;
121
122
0
    void reset() {
123
0
        prev.clear();
124
125
0
        llama_sampler_reset(chain);
126
0
    }
127
128
0
    void set_logits(struct llama_context * ctx, int idx) {
129
0
        const float *       sampled_probs  = llama_get_sampled_probs_ith     (ctx, idx);
130
0
        const float *       sampled_logits = llama_get_sampled_logits_ith    (ctx, idx);
131
0
        const llama_token * sampled_ids    = llama_get_sampled_candidates_ith(ctx, idx);
132
133
0
        const llama_model * model = llama_get_model(ctx);
134
0
        const llama_vocab * vocab = llama_model_get_vocab(model);
135
136
0
        const int n_vocab = llama_vocab_n_tokens(vocab);
137
138
0
        if (sampled_probs) {
139
0
            const uint32_t sampled_probs_count = llama_get_sampled_probs_count_ith(ctx, idx);
140
0
            cur.resize(sampled_probs_count);
141
0
            for (uint32_t i = 0; i < sampled_probs_count; ++i) {
142
0
                cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], sampled_probs[i]};
143
0
            }
144
0
        } else if (sampled_logits) {
145
0
            const uint32_t sampled_logits_count = llama_get_sampled_logits_count_ith(ctx, idx);
146
0
            cur.resize(sampled_logits_count);
147
0
            for (uint32_t i = 0; i < sampled_logits_count; i++) {
148
0
                cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], 0.0f};
149
0
            }
150
0
        } else {
151
0
            const auto * logits = llama_get_logits_ith(ctx, idx);
152
0
            GGML_ASSERT(logits != nullptr);
153
0
            cur.resize(n_vocab);
154
0
            for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
155
0
                cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
156
0
            }
157
0
        }
158
159
0
        cur_p = { cur.data(), cur.size(), -1, false };
160
0
    }
161
162
0
    common_time_meas tm() {
163
0
        return common_time_meas(t_total_us, params.no_perf);
164
0
    }
165
166
    mutable int64_t t_total_us = 0;
167
};
168
169
0
std::string common_params_sampling::print() const {
170
0
    char result[1024];
171
172
0
    snprintf(result, sizeof(result),
173
0
            "\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n"
174
0
            "\tdry_multiplier = %.3f, dry_base = %.3f, dry_allowed_length = %d, dry_penalty_last_n = %d\n"
175
0
            "\ttop_k = %d, top_p = %.3f, min_p = %.3f, xtc_probability = %.3f, xtc_threshold = %.3f, typical_p = %.3f, top_n_sigma = %.3f, temp = %.3f\n"
176
0
            "\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f, adaptive_target = %.3f, adaptive_decay = %.3f",
177
0
            penalty_last_n, penalty_repeat, penalty_freq, penalty_present,
178
0
            dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n,
179
0
            top_k, top_p, min_p, xtc_probability, xtc_threshold, typ_p, top_n_sigma, temp,
180
0
            mirostat, mirostat_eta, mirostat_tau, adaptive_target, adaptive_decay);
181
182
0
    return std::string(result);
183
0
}
184
185
0
struct common_sampler * common_sampler_init(const struct llama_model * model, struct common_params_sampling & params) {
186
0
    const llama_vocab * vocab = llama_model_get_vocab(model);
187
188
0
    llama_sampler_chain_params lparams = llama_sampler_chain_default_params();
189
190
0
    lparams.no_perf = params.no_perf;
191
192
0
    llama_sampler * grmr = nullptr;
193
0
    llama_sampler * rbudget = nullptr;
194
0
    llama_sampler * chain = llama_sampler_chain_init(lparams);
195
196
0
    std::vector<llama_sampler *> samplers;
197
198
0
    const std::string & grammar_str = common_grammar_value(params.grammar);
199
0
    if (grammar_str.compare(0, 11, "%llguidance") == 0) {
200
#ifdef LLAMA_USE_LLGUIDANCE
201
        grmr = llama_sampler_init_llg(vocab, "lark", grammar_str.c_str());
202
#else
203
0
        GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled");
204
0
#endif // LLAMA_USE_LLGUIDANCE
205
0
    } else {
206
0
        std::vector<std::string> trigger_patterns;
207
0
        std::vector<llama_token> trigger_tokens;
208
0
        for (const auto & trigger : params.grammar_triggers) {
209
0
            switch (trigger.type) {
210
0
                case COMMON_GRAMMAR_TRIGGER_TYPE_WORD:
211
0
                {
212
0
                    const auto & word = trigger.value;
213
0
                    trigger_patterns.push_back(regex_escape(word));
214
0
                    break;
215
0
                }
216
0
                case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN:
217
0
                {
218
0
                    trigger_patterns.push_back(trigger.value);
219
0
                    break;
220
0
                }
221
0
                case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL:
222
0
                {
223
0
                    const auto & pattern = trigger.value;
224
0
                    std::string anchored = "^$";
225
0
                    if (!pattern.empty()) {
226
0
                        anchored = (pattern.front() != '^' ? "^" : "")
227
0
                            + pattern
228
0
                            + (pattern.back() != '$' ? "$" : "");
229
0
                    }
230
0
                    trigger_patterns.push_back(anchored);
231
0
                    break;
232
0
                }
233
0
                case COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN:
234
0
                {
235
0
                    const auto token = trigger.token;
236
0
                    trigger_tokens.push_back(token);
237
0
                    break;
238
0
                }
239
0
                default:
240
0
                    GGML_ASSERT(false && "unknown trigger type");
241
0
            }
242
0
        }
243
244
0
        std::vector<const char *> trigger_patterns_c;
245
0
        trigger_patterns_c.reserve(trigger_patterns.size());
246
0
        for (const auto & regex : trigger_patterns) {
247
0
            trigger_patterns_c.push_back(regex.c_str());
248
0
        }
249
250
0
        if (!grammar_str.empty()) {
251
0
             if (params.grammar_lazy) {
252
0
                 grmr = llama_sampler_init_grammar_lazy_patterns(vocab, grammar_str.c_str(), "root",
253
0
                         trigger_patterns_c.data(), trigger_patterns_c.size(),
254
0
                         trigger_tokens.data(), trigger_tokens.size());
255
0
             } else {
256
0
                 grmr = llama_sampler_init_grammar(vocab, grammar_str.c_str(), "root");
257
0
             }
258
0
        }
259
0
    }
260
261
    // Feed generation prompt tokens to the grammar sampler so it advances past
262
    // tokens the template already placed in the prompt.
263
    // Only applies to output-format and tool-call grammars; user-supplied grammars must not be prefilled.
264
0
    std::vector<llama_token> prefill_tokens;
265
0
    if (!params.generation_prompt.empty() && common_grammar_needs_prefill(params.grammar)) {
266
0
        GGML_ASSERT(vocab != nullptr);
267
0
        prefill_tokens = common_tokenize(vocab, params.generation_prompt, false, true);
268
0
        if (!prefill_tokens.empty()) {
269
0
            std::string first_token = common_token_to_piece(vocab, prefill_tokens[0], true);
270
0
            if (std::isspace(first_token[0]) && !std::isspace(params.generation_prompt[0])) {
271
                // Some tokenizers will add a space before the first special token, need to remove
272
0
                prefill_tokens = std::vector<llama_token>(prefill_tokens.begin() + 1, prefill_tokens.end());
273
0
            }
274
0
        }
275
276
0
        if (grmr && !params.grammar_lazy) {
277
0
            try {
278
0
                for (const auto & token : prefill_tokens) {
279
0
                    llama_sampler_accept(grmr, token);
280
0
                    LOG_DBG("%s: accepted prefill token (%d)\n", __func__, token);
281
0
                }
282
0
            } catch (std::exception &e) {
283
0
                LOG_ERR("%s: error initializing grammar sampler for grammar:\n%s\n\nGeneration prompt:\n'%s'\n", __func__,
284
0
                    common_grammar_value(params.grammar).c_str(), params.generation_prompt.c_str());
285
0
                throw e;
286
0
            }
287
0
        }
288
0
    }
289
290
    // reasoning budget sampler
291
0
    if (!params.reasoning_budget_start.empty() && !params.reasoning_budget_end.empty()) {
292
0
        rbudget = common_reasoning_budget_init(
293
0
            vocab,
294
0
            params.reasoning_budget_start,
295
0
            params.reasoning_budget_end,
296
0
            params.reasoning_budget_forced,
297
0
            params.reasoning_budget_tokens < 0 ? INT_MAX : params.reasoning_budget_tokens,
298
0
            prefill_tokens);
299
0
    }
300
301
0
    if (params.has_logit_bias()) {
302
0
        samplers.push_back(llama_sampler_init_logit_bias(llama_vocab_n_tokens(vocab), params.logit_bias.size(), params.logit_bias.data()));
303
0
    }
304
305
0
    if (params.mirostat == 0) {
306
307
0
        bool use_adaptive_p = false; // see below
308
309
0
        for (const auto & cnstr : params.samplers) {
310
0
            switch (cnstr) {
311
0
                case COMMON_SAMPLER_TYPE_DRY:
312
0
                    {
313
0
                        std::vector<const char *> c_breakers;
314
0
                        c_breakers.reserve(params.dry_sequence_breakers.size());
315
0
                        for (const auto & str : params.dry_sequence_breakers) {
316
0
                            c_breakers.push_back(str.c_str());
317
0
                        }
318
0
                        samplers.push_back(llama_sampler_init_dry(vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size()));
319
0
                    }
320
0
                    break;
321
0
                case COMMON_SAMPLER_TYPE_TOP_K:
322
0
                    samplers.push_back(llama_sampler_init_top_k(params.top_k));
323
0
                    break;
324
0
                case COMMON_SAMPLER_TYPE_TOP_P:
325
0
                    samplers.push_back(llama_sampler_init_top_p(params.top_p, params.min_keep));
326
0
                    break;
327
0
                case COMMON_SAMPLER_TYPE_TOP_N_SIGMA:
328
0
                    samplers.push_back(llama_sampler_init_top_n_sigma(params.top_n_sigma));
329
0
                    break;
330
0
                case COMMON_SAMPLER_TYPE_MIN_P:
331
0
                    samplers.push_back(llama_sampler_init_min_p(params.min_p, params.min_keep));
332
0
                    break;
333
0
                case COMMON_SAMPLER_TYPE_XTC:
334
0
                    samplers.push_back(llama_sampler_init_xtc(params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
335
0
                    break;
336
0
                case COMMON_SAMPLER_TYPE_TYPICAL_P:
337
0
                    samplers.push_back(llama_sampler_init_typical(params.typ_p, params.min_keep));
338
0
                    break;
339
0
                case COMMON_SAMPLER_TYPE_TEMPERATURE:
340
0
                    samplers.push_back(llama_sampler_init_temp_ext(params.temp, params.dynatemp_range, params.dynatemp_exponent));
341
0
                    break;
342
0
                case COMMON_SAMPLER_TYPE_INFILL:
343
0
                    samplers.push_back(llama_sampler_init_infill(vocab));
344
0
                    break;
345
0
                case COMMON_SAMPLER_TYPE_PENALTIES:
346
0
                    samplers.push_back(llama_sampler_init_penalties(params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present));
347
0
                    break;
348
0
                case COMMON_SAMPLER_TYPE_ADAPTIVE_P:
349
                    // the `adaptive-p` sampler is like `dist` and `mirostat` in that it selects
350
                    // a single token, so we will add `dist` at the end of the chain by default,
351
                    // unless the user specifically included `adaptive-p`. we set this flag here
352
                    // so we know to add the sampler at the very end.
353
0
                    use_adaptive_p = true;
354
0
                    break;
355
0
                default:
356
0
                    GGML_ASSERT(false && "unknown sampler type");
357
0
            }
358
0
        }
359
0
        if (use_adaptive_p) {
360
            // only if user explicitly included adaptive-p sampler
361
0
            samplers.push_back(llama_sampler_init_adaptive_p(params.adaptive_target, params.adaptive_decay, params.seed));
362
0
        } else {
363
            // default: sample from distribution
364
0
            samplers.push_back(llama_sampler_init_dist(params.seed));
365
0
        }
366
0
    } else if (params.mirostat == 1) {
367
0
        samplers.push_back(llama_sampler_init_temp(params.temp));
368
0
        samplers.push_back(llama_sampler_init_mirostat(llama_vocab_n_tokens(vocab), params.seed, params.mirostat_tau, params.mirostat_eta, 100));
369
0
    } else if (params.mirostat == 2) {
370
0
        samplers.push_back(llama_sampler_init_temp(params.temp));
371
0
        samplers.push_back(llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta));
372
0
    } else {
373
0
        GGML_ASSERT(false && "unknown mirostat version");
374
0
    }
375
376
0
    for (auto * smpl : samplers) {
377
0
        llama_sampler_chain_add(chain, smpl);
378
0
    }
379
380
0
    if (grmr && params.backend_sampling) {
381
0
        LOG_WRN("%s: backend sampling is not compatible with grammar, disabling\n", __func__);
382
383
0
        params.backend_sampling = false;
384
0
    }
385
386
0
    if (rbudget && params.backend_sampling) {
387
0
        LOG_WRN("%s: backend sampling is not compatible with reasoning budget, disabling\n", __func__);
388
389
0
        params.backend_sampling = false;
390
0
    }
391
392
0
    auto * result = new common_sampler {
393
0
        /* .params  = */ params,
394
0
        /* .grmr    = */ grmr,
395
0
        /* .rbudget = */ rbudget,
396
0
        /* .chain   = */ chain,
397
0
        /* .prev    = */ ring_buffer<llama_token>(std::max(32, params.n_prev)),
398
0
        /* .cur     = */ {},
399
0
        /* .cur_p   = */ {},
400
0
    };
401
402
0
    return result;
403
0
}
404
405
0
void common_sampler_free(struct common_sampler * gsmpl) {
406
0
    if (!gsmpl) {
407
0
        return;
408
0
    }
409
410
0
    llama_sampler_free(gsmpl->grmr);
411
0
    llama_sampler_free(gsmpl->rbudget);
412
0
    llama_sampler_free(gsmpl->chain);
413
414
0
    delete gsmpl;
415
0
}
416
417
0
static bool grammar_should_apply(struct common_sampler * gsmpl) {
418
0
    if (!gsmpl->grmr) {
419
0
        return false;
420
0
    }
421
0
    if (!gsmpl->rbudget) {
422
0
        return true;
423
0
    }
424
0
    if (gsmpl->params.grammar_lazy) {
425
        // if grammar is lazy, only apply when reasoning budget is not active
426
0
        const auto state = common_reasoning_budget_get_state(gsmpl->rbudget);
427
0
        return state == REASONING_BUDGET_IDLE || state == REASONING_BUDGET_DONE;
428
0
    }
429
0
    return true;
430
0
}
431
432
0
void common_sampler_accept(struct common_sampler * gsmpl, llama_token token, bool accept_grammar) {
433
0
    if (!gsmpl) {
434
0
        return;
435
0
    }
436
437
0
    const auto tm = gsmpl->tm();
438
439
    // grammar_should_apply() checks the reasoning budget state, so calculate this before we accept
440
0
    accept_grammar = accept_grammar && grammar_should_apply(gsmpl);
441
442
0
    llama_sampler_accept(gsmpl->rbudget, token);
443
444
0
    if (gsmpl->grmr && accept_grammar) {
445
0
        llama_sampler_accept(gsmpl->grmr, token);
446
0
    }
447
448
0
    llama_sampler_accept(gsmpl->chain, token);
449
450
0
    gsmpl->prev.push_back(token);
451
0
}
452
453
0
void common_sampler_reset(struct common_sampler * gsmpl) {
454
0
    if (!gsmpl) {
455
0
        return;
456
0
    }
457
458
0
    gsmpl->reset();
459
0
}
460
461
0
struct common_sampler * common_sampler_clone(common_sampler * gsmpl) {
462
0
    return new common_sampler {
463
0
        /* .params  = */ gsmpl->params,
464
0
        /* .grmr    = */ llama_sampler_clone(gsmpl->grmr),
465
0
        /* .rbudget = */ llama_sampler_clone(gsmpl->rbudget),
466
0
        /* .chain   = */ llama_sampler_clone(gsmpl->chain),
467
0
        /* .prev    = */ gsmpl->prev,
468
0
        /* .cur     = */ gsmpl->cur,
469
0
        /* .cur_p   = */ gsmpl->cur_p,
470
0
    };
471
0
}
472
473
0
void common_perf_print(const struct llama_context * ctx, const struct common_sampler * gsmpl) {
474
    // TODO: measure grammar performance
475
476
0
    const double t_sampling_ms = gsmpl ? 1e-3*gsmpl->t_total_us : 0;
477
478
0
    llama_perf_sampler_data data_smpl;
479
0
    llama_perf_context_data data_ctx;
480
481
0
    memset(&data_smpl, 0, sizeof(data_smpl));
482
0
    memset(&data_ctx,  0, sizeof(data_ctx));
483
484
0
    if (gsmpl) {
485
0
        auto & data = data_smpl;
486
487
0
        data = llama_perf_sampler(gsmpl->chain);
488
489
        // note: the sampling time includes the samplers time + extra time spent in common/sampling
490
0
        LOG_INF("%s:    sampling time = %10.2f ms\n", __func__, t_sampling_ms);
491
0
        LOG_INF("%s:    samplers time = %10.2f ms / %5d tokens\n", __func__, data.t_sample_ms, data.n_sample);
492
0
    }
493
494
0
    if (ctx) {
495
0
        auto & data = data_ctx;
496
497
0
        data = llama_perf_context(ctx);
498
499
0
        const double t_end_ms = 1e-3 * ggml_time_us();
500
501
0
        const double t_total_ms = t_end_ms - data.t_start_ms;
502
0
        const double t_unacc_ms = t_total_ms - (t_sampling_ms + data.t_p_eval_ms + data.t_eval_ms);
503
0
        const double t_unacc_pc = 100.0 * t_unacc_ms /  t_total_ms;
504
505
0
        LOG_INF("%s:        load time = %10.2f ms\n", __func__, data.t_load_ms);
506
0
        LOG_INF("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
507
0
                __func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval);
508
0
        LOG_INF("%s:        eval time = %10.2f ms / %5d runs   (%8.2f ms per token, %8.2f tokens per second)\n",
509
0
                __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval);
510
0
        LOG_INF("%s:       total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval));
511
0
        LOG_INF("%s: unaccounted time = %10.2f ms / %5.1f %%      (total - sampling - prompt eval - eval) / (total)\n", __func__, t_unacc_ms, t_unacc_pc);
512
0
        LOG_INF("%s:    graphs reused = %10d\n", __func__, data.n_reused);
513
514
0
        llama_memory_breakdown_print(ctx);
515
0
    }
516
0
}
517
518
0
struct llama_sampler * common_sampler_get(const struct common_sampler * gsmpl) {
519
0
    if (!gsmpl) {
520
0
        return nullptr;
521
0
    }
522
523
0
    return gsmpl->chain;
524
0
}
525
526
0
llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first) {
527
0
    llama_synchronize(ctx);
528
529
    // start measuring sampling time after the llama_context synchronization in order to not measure any ongoing async operations
530
0
    const auto tm = gsmpl->tm();
531
532
0
    llama_token id = LLAMA_TOKEN_NULL;
533
534
0
    auto & grmr  = gsmpl->grmr;
535
0
    auto & rbudget = gsmpl->rbudget;
536
0
    auto & chain = gsmpl->chain;
537
0
    auto & cur_p = gsmpl->cur_p; // initialized by set_logits
538
539
    // Check if a backend sampler has already sampled a token in which case we
540
    // return that token id directly.
541
0
    {
542
0
        id = llama_get_sampled_token_ith(ctx, idx);
543
544
0
        if (id != LLAMA_TOKEN_NULL) {
545
0
            LOG_DBG("%s: Backend sampler selected token: '%d'. Will not run any CPU samplers\n", __func__, id);
546
547
0
            GGML_ASSERT(!gsmpl->grmr    && "using grammar in combination with backend sampling is not supported");
548
0
            GGML_ASSERT(!gsmpl->rbudget && "using reasoning budget in combination with backend sampling is not supported");
549
550
            // TODO: simplify
551
0
            gsmpl->cur.resize(1);
552
0
            gsmpl->cur[0] = { id, 0.0f, 1.0f };
553
0
            cur_p = { gsmpl->cur.data(), gsmpl->cur.size(), 0, true };
554
555
0
            return id;
556
0
        }
557
0
    }
558
559
0
    gsmpl->set_logits(ctx, idx);
560
561
    // apply reasoning budget first
562
0
    llama_sampler_apply(rbudget, &cur_p);
563
564
0
    if (grammar_first && grammar_should_apply(gsmpl)) {
565
0
        llama_sampler_apply(grmr, &cur_p);
566
0
    }
567
568
0
    llama_sampler_apply(chain, &cur_p);
569
570
0
    id = cur_p.data[cur_p.selected].id;
571
572
0
    if (grammar_first || !grammar_should_apply(gsmpl)) {
573
0
        return id;
574
0
    }
575
576
    // check if it the sampled token fits the grammar (grammar-based rejection sampling)
577
0
    {
578
0
        llama_token_data       single_token_data       = { id, 1.0f, 0.0f };
579
0
        llama_token_data_array single_token_data_array = { &single_token_data, 1, -1, false };
580
581
0
        llama_sampler_apply(grmr, &single_token_data_array);
582
583
0
        const bool is_valid = single_token_data_array.data[0].logit != -INFINITY;
584
0
        if (is_valid) {
585
0
            return id;
586
0
        }
587
0
    }
588
589
    // resampling:
590
    // if the token is not valid, sample again, but first apply the grammar sampler and then the sampling chain
591
0
    gsmpl->set_logits(ctx, idx);
592
593
0
    llama_sampler_apply(rbudget,  &cur_p);
594
595
0
    if (grammar_should_apply(gsmpl)) {
596
0
        llama_sampler_apply(grmr,  &cur_p);
597
0
    }
598
599
0
    llama_sampler_apply(chain, &cur_p);
600
601
0
    GGML_ASSERT(cur_p.selected != -1 && "no selected token during sampling - check your sampling configuration");
602
603
0
    id = cur_p.data[cur_p.selected].id;
604
605
0
    return id;
606
0
}
607
608
0
std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector<int> & idxs, const llama_tokens & draft, bool grammar_first) {
609
0
    GGML_ASSERT(idxs.size() == draft.size() + 1 && "idxs.size() must be draft.size() + 1");
610
611
0
    std::vector<llama_token> result;
612
0
    result.reserve(idxs.size());
613
614
0
    size_t i = 0;
615
0
    for (; i < draft.size(); i++) {
616
0
        const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first);
617
618
0
        common_sampler_accept(gsmpl, id, true);
619
620
0
        result.push_back(id);
621
622
0
        if (draft[i] != id) {
623
0
            break;
624
0
        }
625
0
    }
626
627
0
    if (i == draft.size()) {
628
0
        const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first);
629
630
0
        common_sampler_accept(gsmpl, id, true);
631
632
0
        result.push_back(id);
633
0
    }
634
635
0
    return result;
636
0
}
637
638
0
std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first) {
639
0
    std::vector<int> idxs(draft.size() + 1);
640
0
    for (size_t i = 0; i < idxs.size(); ++i) {
641
0
        idxs[i] = i;
642
0
    }
643
644
0
    return common_sampler_sample_and_accept_n(gsmpl, ctx, idxs, draft, grammar_first);
645
0
}
646
647
0
uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) {
648
0
    return llama_sampler_get_seed(gsmpl->chain);
649
0
}
650
651
// helpers
652
653
0
llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl, bool do_sort) {
654
0
    const auto tm = gsmpl->tm();
655
656
0
    auto * res = &gsmpl->cur_p;
657
658
0
    if (do_sort && !res->sorted) {
659
        // remember the selected token before sorting
660
0
        const llama_token id = res->data[res->selected].id;
661
662
0
        std::sort(res->data, res->data + res->size, [](const llama_token_data & a, const llama_token_data & b) {
663
0
            return a.p > b.p;
664
0
        });
665
666
        // restore the selected token after sorting
667
0
        for (size_t i = 0; i < res->size; ++i) {
668
0
            if (res->data[i].id == id) {
669
0
                res->selected = i;
670
0
                break;
671
0
            }
672
0
        }
673
674
0
        res->sorted = true;
675
0
    }
676
677
0
    return res;
678
0
}
679
680
0
llama_token common_sampler_last(const struct common_sampler * gsmpl) {
681
0
    return gsmpl->prev.rat(0);
682
0
}
683
684
0
std::string common_sampler_print(const struct common_sampler * gsmpl) {
685
0
    std::string result = "logits ";
686
687
0
    for (int i = 0; i < llama_sampler_chain_n(gsmpl->chain); i++) {
688
0
        const auto * smpl = llama_sampler_chain_get(gsmpl->chain, i);
689
0
        result += std::string("-> ");
690
0
        result += std::string(llama_sampler_name(smpl)) + " ";
691
0
    }
692
693
0
    return result;
694
0
}
695
696
0
std::string common_sampler_prev_str(common_sampler * gsmpl, llama_context * ctx_main, int n) {
697
0
    n = std::min(n, (int) gsmpl->prev.size());
698
699
0
    if (n <= 0) {
700
0
        return "";
701
0
    }
702
703
0
    std::string result;
704
0
    result.reserve(8*n); // 8 is the average length of a token [citation needed], TODO: compute this from the vocab
705
706
0
    for (int i = n - 1; i >= 0; i--) {
707
0
        const llama_token id = gsmpl->prev.rat(i);
708
709
0
        GGML_ASSERT(id != LLAMA_TOKEN_NULL && "null token in the sampling history - should not happen");
710
711
0
        result += common_token_to_piece(ctx_main, id);
712
0
    }
713
714
0
    return result;
715
0
}
716
717
0
char common_sampler_type_to_chr(enum common_sampler_type cnstr) {
718
0
    switch (cnstr) {
719
0
        case COMMON_SAMPLER_TYPE_DRY:         return 'd';
720
0
        case COMMON_SAMPLER_TYPE_TOP_K:       return 'k';
721
0
        case COMMON_SAMPLER_TYPE_TYPICAL_P:   return 'y';
722
0
        case COMMON_SAMPLER_TYPE_TOP_P:       return 'p';
723
0
        case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return 's';
724
0
        case COMMON_SAMPLER_TYPE_MIN_P:       return 'm';
725
0
        case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't';
726
0
        case COMMON_SAMPLER_TYPE_XTC:         return 'x';
727
0
        case COMMON_SAMPLER_TYPE_INFILL:      return 'i';
728
0
        case COMMON_SAMPLER_TYPE_PENALTIES:   return 'e';
729
0
        case COMMON_SAMPLER_TYPE_ADAPTIVE_P:  return 'a';
730
0
        default : return '?';
731
0
    }
732
0
}
733
734
0
std::string common_sampler_type_to_str(enum common_sampler_type cnstr) {
735
0
    switch (cnstr) {
736
0
        case COMMON_SAMPLER_TYPE_DRY:         return "dry";
737
0
        case COMMON_SAMPLER_TYPE_TOP_K:       return "top_k";
738
0
        case COMMON_SAMPLER_TYPE_TYPICAL_P:   return "typ_p";
739
0
        case COMMON_SAMPLER_TYPE_TOP_P:       return "top_p";
740
0
        case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return "top_n_sigma";
741
0
        case COMMON_SAMPLER_TYPE_MIN_P:       return "min_p";
742
0
        case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature";
743
0
        case COMMON_SAMPLER_TYPE_XTC:         return "xtc";
744
0
        case COMMON_SAMPLER_TYPE_INFILL:      return "infill";
745
0
        case COMMON_SAMPLER_TYPE_PENALTIES:   return "penalties";
746
0
        case COMMON_SAMPLER_TYPE_ADAPTIVE_P:  return "adaptive_p";
747
0
        default : return "";
748
0
    }
749
0
}
750
751
0
std::vector<common_sampler_type> common_sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) {
752
0
    std::unordered_map<std::string, common_sampler_type> sampler_canonical_name_map {
753
0
        { "dry",         COMMON_SAMPLER_TYPE_DRY },
754
0
        { "top_k",       COMMON_SAMPLER_TYPE_TOP_K },
755
0
        { "top_p",       COMMON_SAMPLER_TYPE_TOP_P },
756
0
        { "top_n_sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
757
0
        { "typ_p",       COMMON_SAMPLER_TYPE_TYPICAL_P },
758
0
        { "min_p",       COMMON_SAMPLER_TYPE_MIN_P },
759
0
        { "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE },
760
0
        { "xtc",         COMMON_SAMPLER_TYPE_XTC },
761
0
        { "infill",      COMMON_SAMPLER_TYPE_INFILL },
762
0
        { "penalties",   COMMON_SAMPLER_TYPE_PENALTIES },
763
0
        { "adaptive_p",  COMMON_SAMPLER_TYPE_ADAPTIVE_P },
764
0
    };
765
766
    // since samplers names are written multiple ways
767
    // make it ready for both system names and input names
768
0
    std::unordered_map<std::string, common_sampler_type> sampler_alt_name_map {
769
0
        { "top-k",       COMMON_SAMPLER_TYPE_TOP_K },
770
0
        { "top-p",       COMMON_SAMPLER_TYPE_TOP_P },
771
0
        { "top-n-sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
772
0
        { "nucleus",     COMMON_SAMPLER_TYPE_TOP_P },
773
0
        { "typical-p",   COMMON_SAMPLER_TYPE_TYPICAL_P },
774
0
        { "typical",     COMMON_SAMPLER_TYPE_TYPICAL_P },
775
0
        { "typ-p",       COMMON_SAMPLER_TYPE_TYPICAL_P },
776
0
        { "typ",         COMMON_SAMPLER_TYPE_TYPICAL_P },
777
0
        { "min-p",       COMMON_SAMPLER_TYPE_MIN_P },
778
0
        { "temp",        COMMON_SAMPLER_TYPE_TEMPERATURE },
779
0
        { "adaptive-p",  COMMON_SAMPLER_TYPE_ADAPTIVE_P },
780
0
    };
781
782
0
    std::vector<common_sampler_type> samplers;
783
0
    samplers.reserve(names.size());
784
785
0
    for (const auto & name : names) {
786
0
        auto sampler = sampler_canonical_name_map.find(name);
787
0
        if (sampler != sampler_canonical_name_map.end()) {
788
0
            samplers.push_back(sampler->second);
789
0
            continue;
790
0
        }
791
0
        if (allow_alt_names) {
792
0
            sampler = sampler_alt_name_map.find(name);
793
0
            if (sampler != sampler_alt_name_map.end()) {
794
0
                samplers.push_back(sampler->second);
795
0
                continue;
796
0
            }
797
0
        }
798
0
        LOG_WRN("%s: unable to match sampler by name '%s'\n", __func__, name.c_str());
799
0
    }
800
801
0
    return samplers;
802
0
}
803
804
0
std::vector<common_sampler_type> common_sampler_types_from_chars(const std::string & chars) {
805
0
    std::unordered_map<char, common_sampler_type> sampler_name_map = {
806
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_DRY),         COMMON_SAMPLER_TYPE_DRY },
807
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_K),       COMMON_SAMPLER_TYPE_TOP_K },
808
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TYPICAL_P),   COMMON_SAMPLER_TYPE_TYPICAL_P },
809
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_P),       COMMON_SAMPLER_TYPE_TOP_P },
810
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_N_SIGMA), COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
811
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_MIN_P),       COMMON_SAMPLER_TYPE_MIN_P },
812
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE },
813
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC),         COMMON_SAMPLER_TYPE_XTC },
814
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_INFILL),      COMMON_SAMPLER_TYPE_INFILL },
815
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_PENALTIES),   COMMON_SAMPLER_TYPE_PENALTIES },
816
0
        { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_ADAPTIVE_P),  COMMON_SAMPLER_TYPE_ADAPTIVE_P },
817
0
    };
818
819
0
    std::vector<common_sampler_type> samplers;
820
0
    samplers.reserve(chars.size());
821
822
0
    for (const auto & c : chars) {
823
0
        const auto sampler = sampler_name_map.find(c);
824
0
        if (sampler != sampler_name_map.end()) {
825
0
            samplers.push_back(sampler->second);
826
0
        } else {
827
0
            LOG_WRN("%s: unable to match sampler by char '%c'\n", __func__, c);
828
0
        }
829
0
    }
830
831
0
    return samplers;
832
0
}