/src/llama.cpp/common/sampling.cpp
Line | Count | Source |
1 | | #include "sampling.h" |
2 | | |
3 | | #include "common.h" |
4 | | #include "log.h" |
5 | | |
6 | | #include <algorithm> |
7 | | #include <cmath> |
8 | | #include <cstring> |
9 | | #include <unordered_map> |
10 | | |
11 | | // the ring buffer works similarly to std::deque, but with a fixed capacity |
12 | | // TODO: deduplicate with llama-impl.h |
13 | | template<typename T> |
14 | | struct ring_buffer { |
15 | 0 | ring_buffer(size_t cap) : capacity(cap), data(cap) {} |
16 | | |
17 | | T & front() { |
18 | | if (sz == 0) { |
19 | | throw std::runtime_error("ring buffer is empty"); |
20 | | } |
21 | | return data[first]; |
22 | | } |
23 | | |
24 | | const T & front() const { |
25 | | if (sz == 0) { |
26 | | throw std::runtime_error("ring buffer is empty"); |
27 | | } |
28 | | return data[first]; |
29 | | } |
30 | | |
31 | | T & back() { |
32 | | if (sz == 0) { |
33 | | throw std::runtime_error("ring buffer is empty"); |
34 | | } |
35 | | return data[pos]; |
36 | | } |
37 | | |
38 | | const T & back() const { |
39 | | if (sz == 0) { |
40 | | throw std::runtime_error("ring buffer is empty"); |
41 | | } |
42 | | return data[pos]; |
43 | | } |
44 | | |
45 | 0 | void push_back(const T & value) { |
46 | 0 | if (sz == capacity) { |
47 | | // advance the start when buffer is full |
48 | 0 | first = (first + 1) % capacity; |
49 | 0 | } else { |
50 | 0 | sz++; |
51 | 0 | } |
52 | 0 | data[pos] = value; |
53 | 0 | pos = (pos + 1) % capacity; |
54 | 0 | } |
55 | | |
56 | | T pop_front() { |
57 | | if (sz == 0) { |
58 | | throw std::runtime_error("ring buffer is empty"); |
59 | | } |
60 | | T value = data[first]; |
61 | | first = (first + 1) % capacity; |
62 | | sz--; |
63 | | return value; |
64 | | } |
65 | | |
66 | 0 | const T & rat(size_t i) const { |
67 | 0 | if (i >= sz) { |
68 | 0 | throw std::runtime_error("ring buffer: index out of bounds"); |
69 | 0 | } |
70 | 0 | return data[(first + sz - i - 1) % capacity]; |
71 | 0 | } |
72 | | |
73 | | std::vector<T> to_vector() const { |
74 | | std::vector<T> result; |
75 | | result.reserve(sz); |
76 | | for (size_t i = 0; i < sz; i++) { |
77 | | result.push_back(data[(first + i) % capacity]); |
78 | | } |
79 | | return result; |
80 | | } |
81 | | |
82 | 0 | void clear() { |
83 | | // here only reset the status of the buffer |
84 | 0 | sz = 0; |
85 | 0 | first = 0; |
86 | 0 | pos = 0; |
87 | 0 | } |
88 | | |
89 | | bool empty() const { |
90 | | return sz == 0; |
91 | | } |
92 | | |
93 | 0 | size_t size() const { |
94 | 0 | return sz; |
95 | 0 | } |
96 | | |
97 | | size_t capacity = 0; |
98 | | size_t sz = 0; |
99 | | size_t first = 0; |
100 | | size_t pos = 0; |
101 | | std::vector<T> data; |
102 | | }; |
103 | | |
104 | | struct common_sampler { |
105 | | common_params_sampling params; |
106 | | |
107 | | struct llama_sampler * grmr; |
108 | | struct llama_sampler * chain; |
109 | | |
110 | | ring_buffer<llama_token> prev; |
111 | | |
112 | | std::vector<llama_token_data> cur; |
113 | | |
114 | | llama_token_data_array cur_p; |
115 | | |
116 | 0 | void reset() { |
117 | 0 | prev.clear(); |
118 | |
|
119 | 0 | llama_sampler_reset(grmr); |
120 | 0 | llama_sampler_reset(chain); |
121 | 0 | } |
122 | | |
123 | 0 | void set_logits(struct llama_context * ctx, int idx) { |
124 | 0 | const auto * logits = llama_get_logits_ith(ctx, idx); |
125 | |
|
126 | 0 | const llama_model * model = llama_get_model(ctx); |
127 | 0 | const llama_vocab * vocab = llama_model_get_vocab(model); |
128 | |
|
129 | 0 | const int n_vocab = llama_vocab_n_tokens(vocab); |
130 | |
|
131 | 0 | cur.resize(n_vocab); |
132 | |
|
133 | 0 | for (llama_token token_id = 0; token_id < n_vocab; token_id++) { |
134 | 0 | cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f}; |
135 | 0 | } |
136 | |
|
137 | 0 | cur_p = { cur.data(), cur.size(), -1, false }; |
138 | 0 | } |
139 | | |
140 | 0 | common_time_meas tm() { |
141 | 0 | return common_time_meas(t_total_us, params.no_perf); |
142 | 0 | } |
143 | | |
144 | | mutable int64_t t_total_us = 0; |
145 | | }; |
146 | | |
147 | 0 | std::string common_params_sampling::print() const { |
148 | 0 | char result[1024]; |
149 | |
|
150 | 0 | snprintf(result, sizeof(result), |
151 | 0 | "\trepeat_last_n = %d, repeat_penalty = %.3f, frequency_penalty = %.3f, presence_penalty = %.3f\n" |
152 | 0 | "\tdry_multiplier = %.3f, dry_base = %.3f, dry_allowed_length = %d, dry_penalty_last_n = %d\n" |
153 | 0 | "\ttop_k = %d, top_p = %.3f, min_p = %.3f, xtc_probability = %.3f, xtc_threshold = %.3f, typical_p = %.3f, top_n_sigma = %.3f, temp = %.3f\n" |
154 | 0 | "\tmirostat = %d, mirostat_lr = %.3f, mirostat_ent = %.3f", |
155 | 0 | penalty_last_n, penalty_repeat, penalty_freq, penalty_present, |
156 | 0 | dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n, |
157 | 0 | top_k, top_p, min_p, xtc_probability, xtc_threshold, typ_p, top_n_sigma, temp, |
158 | 0 | mirostat, mirostat_eta, mirostat_tau); |
159 | |
|
160 | 0 | return std::string(result); |
161 | 0 | } |
162 | | |
163 | 0 | struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_params_sampling & params) { |
164 | 0 | const llama_vocab * vocab = llama_model_get_vocab(model); |
165 | |
|
166 | 0 | llama_sampler_chain_params lparams = llama_sampler_chain_default_params(); |
167 | |
|
168 | 0 | lparams.no_perf = params.no_perf; |
169 | |
|
170 | 0 | struct llama_sampler * grmr; |
171 | 0 | if (params.grammar.compare(0, 11, "%llguidance") == 0) { |
172 | | #ifdef LLAMA_USE_LLGUIDANCE |
173 | | grmr = llama_sampler_init_llg(vocab, "lark", params.grammar.c_str()); |
174 | | #else |
175 | 0 | GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled"); |
176 | 0 | #endif // LLAMA_USE_LLGUIDANCE |
177 | 0 | } else { |
178 | 0 | std::vector<std::string> trigger_patterns; |
179 | 0 | std::vector<std::string> patterns_anywhere; |
180 | 0 | std::vector<llama_token> trigger_tokens; |
181 | 0 | for (const auto & trigger : params.grammar_triggers) { |
182 | 0 | switch (trigger.type) { |
183 | 0 | case COMMON_GRAMMAR_TRIGGER_TYPE_WORD: |
184 | 0 | { |
185 | 0 | const auto & word = trigger.value; |
186 | 0 | patterns_anywhere.push_back(regex_escape(word)); |
187 | 0 | break; |
188 | 0 | } |
189 | 0 | case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN: |
190 | 0 | { |
191 | 0 | patterns_anywhere.push_back(trigger.value); |
192 | 0 | break; |
193 | 0 | } |
194 | 0 | case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL: |
195 | 0 | { |
196 | 0 | trigger_patterns.push_back(trigger.value); |
197 | 0 | break; |
198 | 0 | } |
199 | 0 | case COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN: |
200 | 0 | { |
201 | 0 | const auto token = trigger.token; |
202 | 0 | trigger_tokens.push_back(token); |
203 | 0 | break; |
204 | 0 | } |
205 | 0 | default: |
206 | 0 | GGML_ASSERT(false && "unknown trigger type"); |
207 | 0 | } |
208 | 0 | } |
209 | | |
210 | 0 | if (!patterns_anywhere.empty()) { |
211 | 0 | trigger_patterns.push_back("^[\\s\\S]*?(" + string_join(patterns_anywhere, "|") + ")[\\s\\S]*"); |
212 | 0 | } |
213 | |
|
214 | 0 | std::vector<const char *> trigger_patterns_c; |
215 | 0 | trigger_patterns_c.reserve(trigger_patterns.size()); |
216 | 0 | for (const auto & regex : trigger_patterns) { |
217 | 0 | trigger_patterns_c.push_back(regex.c_str()); |
218 | 0 | } |
219 | |
|
220 | 0 | grmr = params.grammar_lazy |
221 | 0 | ? llama_sampler_init_grammar_lazy_patterns(vocab, params.grammar.c_str(), "root", |
222 | 0 | trigger_patterns_c.data(), trigger_patterns_c.size(), |
223 | 0 | trigger_tokens.data(), trigger_tokens.size()) |
224 | 0 | : llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root"); |
225 | 0 | if (!grmr) { |
226 | 0 | return nullptr; |
227 | 0 | } |
228 | 0 | } |
229 | | |
230 | 0 | auto * result = new common_sampler { |
231 | 0 | /* .params = */ params, |
232 | 0 | /* .grmr = */ grmr, |
233 | 0 | /* .chain = */ llama_sampler_chain_init(lparams), |
234 | 0 | /* .prev = */ ring_buffer<llama_token>(std::max(32, params.n_prev)), |
235 | 0 | /* .cur = */ {}, |
236 | 0 | /* .cur_p = */ {}, |
237 | 0 | }; |
238 | |
|
239 | 0 | llama_sampler_chain_add(result->chain, |
240 | 0 | llama_sampler_init_logit_bias( |
241 | 0 | llama_vocab_n_tokens(vocab), |
242 | 0 | params.logit_bias.size(), |
243 | 0 | params.logit_bias.data())); |
244 | |
|
245 | 0 | if (params.mirostat == 0) { |
246 | 0 | for (const auto & cnstr : params.samplers) { |
247 | 0 | switch (cnstr) { |
248 | 0 | case COMMON_SAMPLER_TYPE_DRY: |
249 | 0 | { |
250 | 0 | std::vector<const char *> c_breakers; |
251 | 0 | c_breakers.reserve(params.dry_sequence_breakers.size()); |
252 | 0 | for (const auto & str : params.dry_sequence_breakers) { |
253 | 0 | c_breakers.push_back(str.c_str()); |
254 | 0 | } |
255 | |
|
256 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size())); |
257 | 0 | } |
258 | 0 | break; |
259 | 0 | case COMMON_SAMPLER_TYPE_TOP_K: |
260 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k)); |
261 | 0 | break; |
262 | 0 | case COMMON_SAMPLER_TYPE_TOP_P: |
263 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep)); |
264 | 0 | break; |
265 | 0 | case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: |
266 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_top_n_sigma (params.top_n_sigma)); |
267 | 0 | break; |
268 | 0 | case COMMON_SAMPLER_TYPE_MIN_P: |
269 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep)); |
270 | 0 | break; |
271 | 0 | case COMMON_SAMPLER_TYPE_XTC: |
272 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed)); |
273 | 0 | break; |
274 | 0 | case COMMON_SAMPLER_TYPE_TYPICAL_P: |
275 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep)); |
276 | 0 | break; |
277 | 0 | case COMMON_SAMPLER_TYPE_TEMPERATURE: |
278 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent)); |
279 | 0 | break; |
280 | 0 | case COMMON_SAMPLER_TYPE_INFILL: |
281 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab)); |
282 | 0 | break; |
283 | 0 | case COMMON_SAMPLER_TYPE_PENALTIES: |
284 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_penalties (params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present)); |
285 | 0 | break; |
286 | 0 | default: |
287 | 0 | GGML_ASSERT(false && "unknown sampler type"); |
288 | 0 | } |
289 | 0 | } |
290 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed)); |
291 | 0 | } else if (params.mirostat == 1) { |
292 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp)); |
293 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(llama_vocab_n_tokens(vocab), params.seed, params.mirostat_tau, params.mirostat_eta, 100)); |
294 | 0 | } else if (params.mirostat == 2) { |
295 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp)); |
296 | 0 | llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta)); |
297 | 0 | } else { |
298 | 0 | GGML_ASSERT(false && "unknown mirostat version"); |
299 | 0 | } |
300 | | |
301 | 0 | return result; |
302 | 0 | } |
303 | | |
304 | 0 | void common_sampler_free(struct common_sampler * gsmpl) { |
305 | 0 | if (gsmpl) { |
306 | 0 | llama_sampler_free(gsmpl->grmr); |
307 | |
|
308 | 0 | llama_sampler_free(gsmpl->chain); |
309 | |
|
310 | 0 | delete gsmpl; |
311 | 0 | } |
312 | 0 | } |
313 | | |
314 | 0 | void common_sampler_accept(struct common_sampler * gsmpl, llama_token token, bool accept_grammar) { |
315 | 0 | const auto tm = gsmpl->tm(); |
316 | |
|
317 | 0 | if (accept_grammar) { |
318 | 0 | llama_sampler_accept(gsmpl->grmr, token); |
319 | 0 | } |
320 | |
|
321 | 0 | llama_sampler_accept(gsmpl->chain, token); |
322 | |
|
323 | 0 | gsmpl->prev.push_back(token); |
324 | 0 | } |
325 | | |
326 | 0 | void common_sampler_reset(struct common_sampler * gsmpl) { |
327 | 0 | gsmpl->reset(); |
328 | 0 | } |
329 | | |
330 | 0 | struct common_sampler * common_sampler_clone(common_sampler * gsmpl) { |
331 | 0 | return new common_sampler { |
332 | 0 | /* .params = */ gsmpl->params, |
333 | 0 | /* .grmr = */ llama_sampler_clone(gsmpl->grmr), |
334 | 0 | /* .chain = */ llama_sampler_clone(gsmpl->chain), |
335 | 0 | /* .prev = */ gsmpl->prev, |
336 | 0 | /* .cur = */ gsmpl->cur, |
337 | 0 | /* .cur_p = */ gsmpl->cur_p, |
338 | 0 | }; |
339 | 0 | } |
340 | | |
341 | 0 | void common_perf_print(const struct llama_context * ctx, const struct common_sampler * gsmpl) { |
342 | | // TODO: measure grammar performance |
343 | |
|
344 | 0 | const double t_sampling_ms = gsmpl ? 1e-3*gsmpl->t_total_us : 0; |
345 | |
|
346 | 0 | llama_perf_sampler_data data_smpl; |
347 | 0 | llama_perf_context_data data_ctx; |
348 | |
|
349 | 0 | memset(&data_smpl, 0, sizeof(data_smpl)); |
350 | 0 | memset(&data_ctx, 0, sizeof(data_ctx)); |
351 | |
|
352 | 0 | if (gsmpl) { |
353 | 0 | auto & data = data_smpl; |
354 | |
|
355 | 0 | data = llama_perf_sampler(gsmpl->chain); |
356 | | |
357 | | // note: the sampling time includes the samplers time + extra time spent in common/sampling |
358 | 0 | LOG_INF("%s: sampling time = %10.2f ms\n", __func__, t_sampling_ms); |
359 | 0 | LOG_INF("%s: samplers time = %10.2f ms / %5d tokens\n", __func__, data.t_sample_ms, data.n_sample); |
360 | 0 | } |
361 | |
|
362 | 0 | if (ctx) { |
363 | 0 | auto & data = data_ctx; |
364 | |
|
365 | 0 | data = llama_perf_context(ctx); |
366 | |
|
367 | 0 | const double t_end_ms = 1e-3 * ggml_time_us(); |
368 | |
|
369 | 0 | const double t_total_ms = t_end_ms - data.t_start_ms; |
370 | 0 | const double t_unacc_ms = t_total_ms - (t_sampling_ms + data.t_p_eval_ms + data.t_eval_ms); |
371 | 0 | const double t_unacc_pc = 100.0 * t_unacc_ms / t_total_ms; |
372 | |
|
373 | 0 | LOG_INF("%s: load time = %10.2f ms\n", __func__, data.t_load_ms); |
374 | 0 | LOG_INF("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", |
375 | 0 | __func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval); |
376 | 0 | LOG_INF("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", |
377 | 0 | __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval); |
378 | 0 | LOG_INF("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval)); |
379 | 0 | LOG_INF("%s: unaccounted time = %10.2f ms / %5.1f %% (total - sampling - prompt eval - eval) / (total)\n", __func__, t_unacc_ms, t_unacc_pc); |
380 | 0 | LOG_INF("%s: graphs reused = %10d\n", __func__, data.n_reused); |
381 | |
|
382 | 0 | llama_memory_breakdown_print(ctx); |
383 | 0 | } |
384 | 0 | } |
385 | | |
386 | 0 | llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first) { |
387 | 0 | llama_synchronize(ctx); |
388 | | |
389 | | // start measuring sampling time after the llama_context synchronization in order to not measure any ongoing async operations |
390 | 0 | const auto tm = gsmpl->tm(); |
391 | |
|
392 | 0 | gsmpl->set_logits(ctx, idx); |
393 | |
|
394 | 0 | auto & grmr = gsmpl->grmr; |
395 | 0 | auto & chain = gsmpl->chain; |
396 | 0 | auto & cur_p = gsmpl->cur_p; // initialized by set_logits |
397 | |
|
398 | 0 | if (grammar_first) { |
399 | 0 | llama_sampler_apply(grmr, &cur_p); |
400 | 0 | } |
401 | |
|
402 | 0 | llama_sampler_apply(chain, &cur_p); |
403 | |
|
404 | 0 | GGML_ASSERT(cur_p.selected != -1 && "no selected token during sampling - check your sampling configuration"); |
405 | |
|
406 | 0 | const llama_token id = cur_p.data[cur_p.selected].id; |
407 | |
|
408 | 0 | if (grammar_first) { |
409 | 0 | return id; |
410 | 0 | } |
411 | | |
412 | | // check if it the sampled token fits the grammar |
413 | 0 | { |
414 | 0 | llama_token_data single_token_data = { id, 1.0f, 0.0f }; |
415 | 0 | llama_token_data_array single_token_data_array = { &single_token_data, 1, -1, false }; |
416 | |
|
417 | 0 | llama_sampler_apply(grmr, &single_token_data_array); |
418 | |
|
419 | 0 | const bool is_valid = single_token_data_array.data[0].logit != -INFINITY; |
420 | 0 | if (is_valid) { |
421 | 0 | return id; |
422 | 0 | } |
423 | 0 | } |
424 | | |
425 | | // resampling: |
426 | | // if the token is not valid, sample again, but first apply the grammar sampler and then the sampling chain |
427 | 0 | gsmpl->set_logits(ctx, idx); |
428 | |
|
429 | 0 | llama_sampler_apply(grmr, &cur_p); |
430 | 0 | llama_sampler_apply(chain, &cur_p); |
431 | |
|
432 | 0 | GGML_ASSERT(cur_p.selected != -1 && "no selected token during re-sampling - check your sampling configuration"); |
433 | |
|
434 | 0 | return cur_p.data[cur_p.selected].id; |
435 | 0 | } |
436 | | |
437 | 0 | std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector<int> & idxs, const llama_tokens & draft, bool grammar_first) { |
438 | 0 | GGML_ASSERT(idxs.size() == draft.size() + 1 && "idxs.size() must be draft.size() + 1"); |
439 | |
|
440 | 0 | std::vector<llama_token> result; |
441 | 0 | result.reserve(idxs.size()); |
442 | |
|
443 | 0 | size_t i = 0; |
444 | 0 | for (; i < draft.size(); i++) { |
445 | 0 | const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first); |
446 | |
|
447 | 0 | common_sampler_accept(gsmpl, id, true); |
448 | |
|
449 | 0 | result.push_back(id); |
450 | |
|
451 | 0 | if (draft[i] != id) { |
452 | 0 | break; |
453 | 0 | } |
454 | 0 | } |
455 | |
|
456 | 0 | if (i == draft.size()) { |
457 | 0 | const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first); |
458 | |
|
459 | 0 | common_sampler_accept(gsmpl, id, true); |
460 | |
|
461 | 0 | result.push_back(id); |
462 | 0 | } |
463 | |
|
464 | 0 | return result; |
465 | 0 | } |
466 | | |
467 | 0 | std::vector<llama_token> common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first) { |
468 | 0 | std::vector<int> idxs(draft.size() + 1); |
469 | 0 | for (size_t i = 0; i < idxs.size(); ++i) { |
470 | 0 | idxs[i] = i; |
471 | 0 | } |
472 | |
|
473 | 0 | return common_sampler_sample_and_accept_n(gsmpl, ctx, idxs, draft, grammar_first); |
474 | 0 | } |
475 | | |
476 | 0 | uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) { |
477 | 0 | return llama_sampler_get_seed(gsmpl->chain); |
478 | 0 | } |
479 | | |
480 | | // helpers |
481 | | |
482 | 0 | llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl, bool do_sort) { |
483 | 0 | const auto tm = gsmpl->tm(); |
484 | |
|
485 | 0 | auto * res = &gsmpl->cur_p; |
486 | |
|
487 | 0 | if (do_sort && !res->sorted) { |
488 | | // remember the selected token before sorting |
489 | 0 | const llama_token id = res->data[res->selected].id; |
490 | |
|
491 | 0 | std::sort(res->data, res->data + res->size, [](const llama_token_data & a, const llama_token_data & b) { |
492 | 0 | return a.p > b.p; |
493 | 0 | }); |
494 | | |
495 | | // restore the selected token after sorting |
496 | 0 | for (size_t i = 0; i < res->size; ++i) { |
497 | 0 | if (res->data[i].id == id) { |
498 | 0 | res->selected = i; |
499 | 0 | break; |
500 | 0 | } |
501 | 0 | } |
502 | |
|
503 | 0 | res->sorted = true; |
504 | 0 | } |
505 | |
|
506 | 0 | return res; |
507 | 0 | } |
508 | | |
509 | 0 | llama_token common_sampler_last(const struct common_sampler * gsmpl) { |
510 | 0 | return gsmpl->prev.rat(0); |
511 | 0 | } |
512 | | |
513 | 0 | std::string common_sampler_print(const struct common_sampler * gsmpl) { |
514 | 0 | std::string result = "logits "; |
515 | |
|
516 | 0 | for (int i = 0; i < llama_sampler_chain_n(gsmpl->chain); i++) { |
517 | 0 | const auto * smpl = llama_sampler_chain_get(gsmpl->chain, i); |
518 | 0 | result += std::string("-> ") + llama_sampler_name(smpl) + " "; |
519 | 0 | } |
520 | |
|
521 | 0 | return result; |
522 | 0 | } |
523 | | |
524 | 0 | std::string common_sampler_prev_str(common_sampler * gsmpl, llama_context * ctx_main, int n) { |
525 | 0 | n = std::min(n, (int) gsmpl->prev.size()); |
526 | |
|
527 | 0 | if (n <= 0) { |
528 | 0 | return ""; |
529 | 0 | } |
530 | | |
531 | 0 | std::string result; |
532 | 0 | result.reserve(8*n); // 8 is the average length of a token [citation needed], TODO: compute this from the vocab |
533 | |
|
534 | 0 | for (int i = n - 1; i >= 0; i--) { |
535 | 0 | const llama_token id = gsmpl->prev.rat(i); |
536 | |
|
537 | 0 | GGML_ASSERT(id != LLAMA_TOKEN_NULL && "null token in the sampling history - should not happen"); |
538 | |
|
539 | 0 | result += common_token_to_piece(ctx_main, id); |
540 | 0 | } |
541 | |
|
542 | 0 | return result; |
543 | 0 | } |
544 | | |
545 | 0 | char common_sampler_type_to_chr(enum common_sampler_type cnstr) { |
546 | 0 | switch (cnstr) { |
547 | 0 | case COMMON_SAMPLER_TYPE_DRY: return 'd'; |
548 | 0 | case COMMON_SAMPLER_TYPE_TOP_K: return 'k'; |
549 | 0 | case COMMON_SAMPLER_TYPE_TYPICAL_P: return 'y'; |
550 | 0 | case COMMON_SAMPLER_TYPE_TOP_P: return 'p'; |
551 | 0 | case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return 's'; |
552 | 0 | case COMMON_SAMPLER_TYPE_MIN_P: return 'm'; |
553 | 0 | case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't'; |
554 | 0 | case COMMON_SAMPLER_TYPE_XTC: return 'x'; |
555 | 0 | case COMMON_SAMPLER_TYPE_INFILL: return 'i'; |
556 | 0 | case COMMON_SAMPLER_TYPE_PENALTIES: return 'e'; |
557 | 0 | default : return '?'; |
558 | 0 | } |
559 | 0 | } |
560 | | |
561 | 0 | std::string common_sampler_type_to_str(enum common_sampler_type cnstr) { |
562 | 0 | switch (cnstr) { |
563 | 0 | case COMMON_SAMPLER_TYPE_DRY: return "dry"; |
564 | 0 | case COMMON_SAMPLER_TYPE_TOP_K: return "top_k"; |
565 | 0 | case COMMON_SAMPLER_TYPE_TYPICAL_P: return "typ_p"; |
566 | 0 | case COMMON_SAMPLER_TYPE_TOP_P: return "top_p"; |
567 | 0 | case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return "top_n_sigma"; |
568 | 0 | case COMMON_SAMPLER_TYPE_MIN_P: return "min_p"; |
569 | 0 | case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature"; |
570 | 0 | case COMMON_SAMPLER_TYPE_XTC: return "xtc"; |
571 | 0 | case COMMON_SAMPLER_TYPE_INFILL: return "infill"; |
572 | 0 | case COMMON_SAMPLER_TYPE_PENALTIES: return "penalties"; |
573 | 0 | default : return ""; |
574 | 0 | } |
575 | 0 | } |
576 | | |
577 | 0 | std::vector<common_sampler_type> common_sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names) { |
578 | 0 | std::unordered_map<std::string, common_sampler_type> sampler_canonical_name_map { |
579 | 0 | { "dry", COMMON_SAMPLER_TYPE_DRY }, |
580 | 0 | { "top_k", COMMON_SAMPLER_TYPE_TOP_K }, |
581 | 0 | { "top_p", COMMON_SAMPLER_TYPE_TOP_P }, |
582 | 0 | { "top_n_sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA }, |
583 | 0 | { "typ_p", COMMON_SAMPLER_TYPE_TYPICAL_P }, |
584 | 0 | { "min_p", COMMON_SAMPLER_TYPE_MIN_P }, |
585 | 0 | { "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE }, |
586 | 0 | { "xtc", COMMON_SAMPLER_TYPE_XTC }, |
587 | 0 | { "infill", COMMON_SAMPLER_TYPE_INFILL }, |
588 | 0 | { "penalties", COMMON_SAMPLER_TYPE_PENALTIES }, |
589 | 0 | }; |
590 | | |
591 | | // since samplers names are written multiple ways |
592 | | // make it ready for both system names and input names |
593 | 0 | std::unordered_map<std::string, common_sampler_type> sampler_alt_name_map { |
594 | 0 | { "top-k", COMMON_SAMPLER_TYPE_TOP_K }, |
595 | 0 | { "top-p", COMMON_SAMPLER_TYPE_TOP_P }, |
596 | 0 | { "top-n-sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA }, |
597 | 0 | { "nucleus", COMMON_SAMPLER_TYPE_TOP_P }, |
598 | 0 | { "typical-p", COMMON_SAMPLER_TYPE_TYPICAL_P }, |
599 | 0 | { "typical", COMMON_SAMPLER_TYPE_TYPICAL_P }, |
600 | 0 | { "typ-p", COMMON_SAMPLER_TYPE_TYPICAL_P }, |
601 | 0 | { "typ", COMMON_SAMPLER_TYPE_TYPICAL_P }, |
602 | 0 | { "min-p", COMMON_SAMPLER_TYPE_MIN_P }, |
603 | 0 | { "temp", COMMON_SAMPLER_TYPE_TEMPERATURE }, |
604 | 0 | }; |
605 | |
|
606 | 0 | std::vector<common_sampler_type> samplers; |
607 | 0 | samplers.reserve(names.size()); |
608 | |
|
609 | 0 | for (const auto & name : names) { |
610 | 0 | auto sampler = sampler_canonical_name_map.find(name); |
611 | 0 | if (sampler != sampler_canonical_name_map.end()) { |
612 | 0 | samplers.push_back(sampler->second); |
613 | 0 | continue; |
614 | 0 | } |
615 | 0 | if (allow_alt_names) { |
616 | 0 | sampler = sampler_alt_name_map.find(name); |
617 | 0 | if (sampler != sampler_alt_name_map.end()) { |
618 | 0 | samplers.push_back(sampler->second); |
619 | 0 | continue; |
620 | 0 | } |
621 | 0 | } |
622 | 0 | LOG_WRN("%s: unable to match sampler by name '%s'\n", __func__, name.c_str()); |
623 | 0 | } |
624 | |
|
625 | 0 | return samplers; |
626 | 0 | } |
627 | | |
628 | 0 | std::vector<common_sampler_type> common_sampler_types_from_chars(const std::string & chars) { |
629 | 0 | std::unordered_map<char, common_sampler_type> sampler_name_map = { |
630 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_DRY), COMMON_SAMPLER_TYPE_DRY }, |
631 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_K), COMMON_SAMPLER_TYPE_TOP_K }, |
632 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TYPICAL_P), COMMON_SAMPLER_TYPE_TYPICAL_P }, |
633 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_P), COMMON_SAMPLER_TYPE_TOP_P }, |
634 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_N_SIGMA), COMMON_SAMPLER_TYPE_TOP_N_SIGMA }, |
635 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_MIN_P), COMMON_SAMPLER_TYPE_MIN_P }, |
636 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE }, |
637 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC), COMMON_SAMPLER_TYPE_XTC }, |
638 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_INFILL), COMMON_SAMPLER_TYPE_INFILL }, |
639 | 0 | { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_PENALTIES), COMMON_SAMPLER_TYPE_PENALTIES }, |
640 | 0 | }; |
641 | |
|
642 | 0 | std::vector<common_sampler_type> samplers; |
643 | 0 | samplers.reserve(chars.size()); |
644 | |
|
645 | 0 | for (const auto & c : chars) { |
646 | 0 | const auto sampler = sampler_name_map.find(c); |
647 | 0 | if (sampler != sampler_name_map.end()) { |
648 | 0 | samplers.push_back(sampler->second); |
649 | 0 | } else { |
650 | 0 | LOG_WRN("%s: unable to match sampler by char '%c'\n", __func__, c); |
651 | 0 | } |
652 | 0 | } |
653 | |
|
654 | 0 | return samplers; |
655 | 0 | } |