Coverage Report

Created: 2026-01-18 06:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/common/common.h
Line
Count
Source
1
// Various helper functions and utilities
2
3
#pragma once
4
5
#include "ggml-opt.h"
6
#include "llama-cpp.h"
7
8
#include <set>
9
#include <sstream>
10
#include <string>
11
#include <string_view>
12
#include <vector>
13
#include <map>
14
15
#if defined(_WIN32) && !defined(_WIN32_WINNT)
16
#define _WIN32_WINNT 0x0A00
17
#endif
18
19
#ifdef _WIN32
20
#define DIRECTORY_SEPARATOR '\\'
21
#else
22
0
#define DIRECTORY_SEPARATOR '/'
23
#endif // _WIN32
24
25
#define die(msg)          do { fputs("error: " msg "\n", stderr);                exit(1); } while (0)
26
#define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0)
27
28
#define print_build_info() do {                                                                     \
29
    fprintf(stderr, "%s: build = %d (%s)\n",      __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT);      \
30
    fprintf(stderr, "%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET);    \
31
} while(0)
32
33
struct common_time_meas {
34
    common_time_meas(int64_t & t_acc, bool disable = false);
35
    ~common_time_meas();
36
37
    const int64_t t_start_us;
38
39
    int64_t & t_acc;
40
};
41
42
struct common_adapter_lora_info {
43
    std::string path;
44
    float scale;
45
46
    std::string task_name;
47
    std::string prompt_prefix;
48
49
    struct llama_adapter_lora * ptr;
50
};
51
52
using llama_tokens = std::vector<llama_token>;
53
54
// build info
55
extern int LLAMA_BUILD_NUMBER;
56
extern const char * LLAMA_COMMIT;
57
extern const char * LLAMA_COMPILER;
58
extern const char * LLAMA_BUILD_TARGET;
59
60
struct common_control_vector_load_info;
61
62
//
63
// CPU utils
64
//
65
66
struct cpu_params {
67
    int      n_threads                   = -1;
68
    bool     cpumask[GGML_MAX_N_THREADS] = {false}; // CPU affinity mask.
69
    bool     mask_valid                  = false;   // Default: any CPU
70
    enum ggml_sched_priority  priority   = GGML_SCHED_PRIO_NORMAL;  // Scheduling prio : (0 - normal, 1 - medium, 2 - high, 3 - realtime)
71
    bool     strict_cpu                  = false;   // Use strict CPU placement
72
    uint32_t poll                        = 50;      // Polling (busywait) level (0 - no polling, 100 - mostly polling)
73
};
74
75
int32_t cpu_get_num_physical_cores();
76
int32_t cpu_get_num_math();
77
78
//
79
// Common params
80
//
81
82
enum llama_example {
83
    LLAMA_EXAMPLE_BATCHED,
84
    LLAMA_EXAMPLE_DEBUG,
85
    LLAMA_EXAMPLE_COMMON,
86
    LLAMA_EXAMPLE_SPECULATIVE,
87
    LLAMA_EXAMPLE_COMPLETION,
88
    LLAMA_EXAMPLE_CLI,
89
    LLAMA_EXAMPLE_EMBEDDING,
90
    LLAMA_EXAMPLE_PERPLEXITY,
91
    LLAMA_EXAMPLE_RETRIEVAL,
92
    LLAMA_EXAMPLE_PASSKEY,
93
    LLAMA_EXAMPLE_IMATRIX,
94
    LLAMA_EXAMPLE_BENCH,
95
    LLAMA_EXAMPLE_SERVER,
96
    LLAMA_EXAMPLE_CVECTOR_GENERATOR,
97
    LLAMA_EXAMPLE_EXPORT_LORA,
98
    LLAMA_EXAMPLE_MTMD,
99
    LLAMA_EXAMPLE_LOOKUP,
100
    LLAMA_EXAMPLE_PARALLEL,
101
    LLAMA_EXAMPLE_TTS,
102
    LLAMA_EXAMPLE_DIFFUSION,
103
    LLAMA_EXAMPLE_FINETUNE,
104
    LLAMA_EXAMPLE_FIT_PARAMS,
105
106
    LLAMA_EXAMPLE_COUNT,
107
};
108
109
enum common_sampler_type {
110
    COMMON_SAMPLER_TYPE_NONE        = 0,
111
    COMMON_SAMPLER_TYPE_DRY         = 1,
112
    COMMON_SAMPLER_TYPE_TOP_K       = 2,
113
    COMMON_SAMPLER_TYPE_TOP_P       = 3,
114
    COMMON_SAMPLER_TYPE_MIN_P       = 4,
115
  //COMMON_SAMPLER_TYPE_TFS_Z       = 5,
116
    COMMON_SAMPLER_TYPE_TYPICAL_P   = 6,
117
    COMMON_SAMPLER_TYPE_TEMPERATURE = 7,
118
    COMMON_SAMPLER_TYPE_XTC         = 8,
119
    COMMON_SAMPLER_TYPE_INFILL      = 9,
120
    COMMON_SAMPLER_TYPE_PENALTIES   = 10,
121
    COMMON_SAMPLER_TYPE_TOP_N_SIGMA = 11,
122
    COMMON_SAMPLER_TYPE_ADAPTIVE_P  = 12,
123
};
124
125
// dimensionality reduction methods, used by cvector-generator
126
enum dimre_method {
127
    DIMRE_METHOD_PCA,
128
    DIMRE_METHOD_MEAN,
129
};
130
131
enum common_conversation_mode {
132
    COMMON_CONVERSATION_MODE_DISABLED = 0,
133
    COMMON_CONVERSATION_MODE_ENABLED  = 1,
134
    COMMON_CONVERSATION_MODE_AUTO     = 2,
135
};
136
137
enum common_grammar_trigger_type {
138
    COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN,
139
    COMMON_GRAMMAR_TRIGGER_TYPE_WORD,
140
    COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN,
141
    COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
142
};
143
144
struct common_grammar_trigger {
145
    common_grammar_trigger_type type;
146
    std::string value;
147
    llama_token token = LLAMA_TOKEN_NULL;
148
};
149
150
enum common_params_sampling_config : uint64_t {
151
    COMMON_PARAMS_SAMPLING_CONFIG_SAMPLERS        = 1 << 0,
152
    COMMON_PARAMS_SAMPLING_CONFIG_TOP_K           = 1 << 1,
153
    COMMON_PARAMS_SAMPLING_CONFIG_TOP_P           = 1 << 2,
154
    COMMON_PARAMS_SAMPLING_CONFIG_MIN_P           = 1 << 3,
155
    COMMON_PARAMS_SAMPLING_CONFIG_XTC_PROBABILITY = 1 << 4,
156
    COMMON_PARAMS_SAMPLING_CONFIG_XTC_THRESHOLD   = 1 << 5,
157
    COMMON_PARAMS_SAMPLING_CONFIG_TEMP            = 1 << 6,
158
    COMMON_PARAMS_SAMPLING_CONFIG_PENALTY_LAST_N  = 1 << 7,
159
    COMMON_PARAMS_SAMPLING_CONFIG_PENALTY_REPEAT  = 1 << 8,
160
    COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT        = 1 << 9,
161
    COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT_TAU    = 1 << 10,
162
    COMMON_PARAMS_SAMPLING_CONFIG_MIROSTAT_ETA    = 1 << 11,
163
};
164
165
166
// sampling parameters
167
struct common_params_sampling {
168
    uint32_t seed = LLAMA_DEFAULT_SEED; // the seed used to initialize llama_sampler
169
170
    int32_t n_prev             = 64;     // number of previous tokens to remember
171
    int32_t n_probs            = 0;      // if greater than 0, output the probabilities of top n_probs tokens.
172
    int32_t min_keep           = 0;      // 0 = disabled, otherwise samplers should return at least min_keep tokens
173
    int32_t top_k              = 40;     // <= 0 to use vocab size
174
    float   top_p              = 0.95f;  // 1.0 = disabled
175
    float   min_p              = 0.05f;  // 0.0 = disabled
176
    float   xtc_probability    = 0.00f;  // 0.0 = disabled
177
    float   xtc_threshold      = 0.10f;  // > 0.5 disables XTC
178
    float   typ_p              = 1.00f;  // typical_p, 1.0 = disabled
179
    float   temp               = 0.80f;  // <= 0.0 to sample greedily, 0.0 to not output probabilities
180
    float   dynatemp_range     = 0.00f;  // 0.0 = disabled
181
    float   dynatemp_exponent  = 1.00f;  // controls how entropy maps to temperature in dynamic temperature sampler
182
    int32_t penalty_last_n     = 64;     // last n tokens to penalize (0 = disable penalty, -1 = context size)
183
    float   penalty_repeat     = 1.00f;  // 1.0 = disabled
184
    float   penalty_freq       = 0.00f;  // 0.0 = disabled
185
    float   penalty_present    = 0.00f;  // 0.0 = disabled
186
    float   dry_multiplier     = 0.0f;   // 0.0 = disabled;      DRY repetition penalty for tokens extending repetition:
187
    float   dry_base           = 1.75f;  // 0.0 = disabled;      multiplier * base ^ (length of sequence before token - allowed length)
188
    int32_t dry_allowed_length = 2;      // tokens extending repetitions beyond this receive penalty
189
    int32_t dry_penalty_last_n = -1;     // how many tokens to scan for repetitions (0 = disable penalty, -1 = context size)
190
    float   adaptive_target    = -1.0f;  // select tokens near this probability (valid range 0.0 to 1.0; negative = disabled)
191
    float   adaptive_decay     = 0.90f;  // EMA decay for adaptation; history ≈ 1/(1-decay) tokens (0.0 - 0.99)
192
    int32_t mirostat           = 0;      // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
193
    float   top_n_sigma        = -1.00f; // -1.0 = disabled
194
    float   mirostat_tau       = 5.00f;  // target entropy
195
    float   mirostat_eta       = 0.10f;  // learning rate
196
    bool    ignore_eos         = false;
197
    bool    no_perf            = false;  // disable performance metrics
198
    bool    timing_per_token   = false;
199
200
    uint64_t user_sampling_config = 0; // bitfield to track user-specified samplers
201
202
    std::vector<std::string> dry_sequence_breakers = {"\n", ":", "\"", "*"};     // default sequence breakers for DRY
203
204
    std::vector<enum common_sampler_type> samplers = {
205
        COMMON_SAMPLER_TYPE_PENALTIES,
206
        COMMON_SAMPLER_TYPE_DRY,
207
        COMMON_SAMPLER_TYPE_TOP_N_SIGMA,
208
        COMMON_SAMPLER_TYPE_TOP_K,
209
        COMMON_SAMPLER_TYPE_TYPICAL_P,
210
        COMMON_SAMPLER_TYPE_TOP_P,
211
        COMMON_SAMPLER_TYPE_MIN_P,
212
        COMMON_SAMPLER_TYPE_XTC,
213
        COMMON_SAMPLER_TYPE_TEMPERATURE,
214
    };
215
216
    std::string                         grammar; // optional BNF-like grammar to constrain sampling
217
    bool                                grammar_lazy = false;
218
    std::vector<common_grammar_trigger> grammar_triggers; // optional triggers (for lazy grammars)
219
    std::set<llama_token>               preserved_tokens;
220
221
    std::vector<llama_logit_bias> logit_bias;     // logit biases to apply
222
    std::vector<llama_logit_bias> logit_bias_eog; // pre-calculated logit biases for EOG tokens
223
224
    bool backend_sampling = false;
225
226
0
    bool has_logit_bias() const {
227
0
        return !logit_bias.empty();
228
0
    }
229
230
    // print the parameters into a string
231
    std::string print() const;
232
};
233
234
struct common_params_model {
235
    std::string path        = ""; // model local path                                       // NOLINT
236
    std::string url         = ""; // model url to download                                  // NOLINT
237
    std::string hf_repo     = ""; // HF repo                                                // NOLINT
238
    std::string hf_file     = ""; // HF file                                                // NOLINT
239
    std::string docker_repo = ""; // Docker repo                                            // NOLINT
240
    std::string name        = ""; // in format <user>/<model>[:<tag>] (tag is optional)     // NOLINT
241
};
242
243
struct common_params_speculative {
244
    std::vector<ggml_backend_dev_t> devices; // devices to use for offloading
245
246
    int32_t n_ctx        =     0; // draft context size
247
    int32_t n_max        =    16; // maximum number of tokens to draft during speculative decoding
248
    int32_t n_min        =     0; // minimum number of draft tokens to use for speculative decoding
249
    int32_t n_gpu_layers =    -1; // number of layers to store in VRAM for the draft model (-1 - use default)
250
    float   p_split      =  0.1f; // speculative decoding split probability
251
    float   p_min        = 0.75f; // minimum speculative decoding probability (greedy)
252
    std::vector<std::pair<std::string, std::string>> replacements; // main to speculative model replacements
253
    std::vector<llama_model_tensor_buft_override> tensor_buft_overrides;
254
255
    ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K
256
    ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V
257
258
    struct cpu_params cpuparams;
259
    struct cpu_params cpuparams_batch;
260
261
    struct common_params_model model;
262
};
263
264
struct common_params_vocoder {
265
    struct common_params_model model;
266
267
    std::string speaker_file = ""; // speaker file path                                      // NOLINT
268
269
    bool use_guide_tokens = false; // enable guide tokens to improve TTS accuracy            // NOLINT
270
};
271
272
struct common_params_diffusion {
273
    int32_t steps         = 128;
274
    bool    visual_mode   = false;
275
276
    float   eps           = 0;        // epsilon for timesteps
277
    int32_t block_length  = 0;        // block length for generation
278
279
    int32_t algorithm     = 4;        // default algorithm: low-confidence
280
    float   alg_temp      = 0.0f;     // algorithm temperature
281
282
    float   cfg_scale     = 0;        // classifier-free guidance scale
283
    bool    add_gumbel_noise = false; // add gumbel noise to the logits if temp > 0.0
284
};
285
286
// reasoning API response format (not to be confused as chat template's reasoning format)
287
enum common_reasoning_format {
288
    COMMON_REASONING_FORMAT_NONE,
289
    COMMON_REASONING_FORMAT_AUTO,            // Same as deepseek, using `message.reasoning_content`
290
    COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY, // Extract thinking tag contents and return as `message.reasoning_content`, or leave inline in <think> tags in stream mode
291
    COMMON_REASONING_FORMAT_DEEPSEEK,        // Extract thinking tag contents and return as `message.reasoning_content`, including in streaming deltas.
292
    // do not extend this enum unless you absolutely have to
293
    // in most cases, use COMMON_REASONING_FORMAT_AUTO
294
    // see: https://github.com/ggml-org/llama.cpp/pull/15408
295
};
296
297
298
struct lr_opt {
299
    float    lr0          = 1e-5; // learning rate at first epoch
300
    float    lr_min       = -1;
301
    float    decay_epochs = -1;   // if >0, the learning rate starts at lr0 and decays to lr_min after this many epochs
302
    float    scale_epoch  = 0;
303
    float    wd           = 0;
304
    unsigned epochs       = 2;
305
306
    unsigned epoch; // set by optimizer outer (epochs) loop
307
    // learning rate decay - constant LR per epoch only for now
308
    float get_lr(float e) const;
309
0
    float get_lr() const { return get_lr(epoch); }
310
    // must call after arg parse, before get_lr
311
    void init();
312
};
313
314
struct ggml_opt_optimizer_params common_opt_lr_pars(void * userdata);
315
316
struct common_params {
317
    int32_t n_predict             =    -1; // max. number of new tokens to predict, -1 == no limit
318
    int32_t n_ctx                 =     0; // context size, 0 == context the model was trained with
319
    int32_t n_batch               =  2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
320
    int32_t n_ubatch              =   512; // physical batch size for prompt processing (must be >=32 to use BLAS)
321
    int32_t n_keep                =     0; // number of tokens to keep from initial prompt
322
    int32_t n_chunks              =    -1; // max number of chunks to process (-1 = unlimited)
323
    int32_t n_parallel            =     1; // number of parallel sequences to decode
324
    int32_t n_sequences           =     1; // number of sequences to decode
325
    int32_t grp_attn_n            =     1; // group-attention factor
326
    int32_t grp_attn_w            =   512; // group-attention width
327
    int32_t n_print               =    -1; // print token count every n tokens (-1 = disabled)
328
    float   rope_freq_base        =  0.0f; // RoPE base frequency
329
    float   rope_freq_scale       =  0.0f; // RoPE frequency scaling factor
330
    float   yarn_ext_factor       = -1.0f; // YaRN extrapolation mix factor
331
    float   yarn_attn_factor      = -1.0f; // YaRN magnitude scaling factor
332
    float   yarn_beta_fast        = -1.0f; // YaRN low correction dim
333
    float   yarn_beta_slow        = -1.0f; // YaRN high correction dim
334
    int32_t yarn_orig_ctx         =     0; // YaRN original context length
335
336
    // offload params
337
    std::vector<ggml_backend_dev_t> devices; // devices to use for offloading
338
339
    int32_t n_gpu_layers       = -1;   // number of layers to store in VRAM, -1 is auto, <= -2 is all
340
    int32_t main_gpu           = 0;    // the GPU that is used for scratch and small tensors
341
    float   tensor_split[128]  = {0};  // how split tensors should be distributed across GPUs
342
    bool    fit_params         = true; // whether to fit unset model/context parameters to free device memory
343
    int32_t fit_params_min_ctx = 4096; // minimum context size to set when trying to reduce memory use
344
345
    // margin per device in bytes for fitting parameters to free memory:
346
    std::vector<size_t> fit_params_target = std::vector<size_t>(llama_max_devices(), 1024 * 1024*1024);
347
348
    enum llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs
349
350
    struct cpu_params cpuparams;
351
    struct cpu_params cpuparams_batch;
352
353
    ggml_backend_sched_eval_callback cb_eval = nullptr;
354
    void * cb_eval_user_data                 = nullptr;
355
356
    ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED;
357
358
    enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
359
    enum llama_pooling_type      pooling_type      = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings
360
    enum llama_attention_type    attention_type    = LLAMA_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings
361
    enum llama_flash_attn_type   flash_attn_type   = LLAMA_FLASH_ATTN_TYPE_AUTO; // whether to use Flash Attention
362
363
    struct common_params_sampling    sampling;
364
    struct common_params_speculative speculative;
365
    struct common_params_vocoder     vocoder;
366
    struct common_params_diffusion   diffusion;
367
368
    struct common_params_model model;
369
370
    std::string model_alias          = ""; // model alias                                                   // NOLINT
371
    std::string hf_token             = ""; // HF token                                                      // NOLINT
372
    std::string prompt               = "";                                                                  // NOLINT
373
    std::string system_prompt        = "";                                                                  // NOLINT
374
    std::string prompt_file          = ""; // store the external prompt file name                           // NOLINT
375
    std::string path_prompt_cache    = ""; // path to file for saving/loading prompt eval state             // NOLINT
376
    std::string input_prefix         = ""; // string to prefix user inputs with                             // NOLINT
377
    std::string input_suffix         = ""; // string to suffix user inputs with                             // NOLINT
378
    std::string lookup_cache_static  = ""; // path of static ngram cache file for lookup decoding           // NOLINT
379
    std::string lookup_cache_dynamic = ""; // path of dynamic ngram cache file for lookup decoding          // NOLINT
380
    std::string logits_file          = ""; // file for saving *all* logits                                  // NOLINT
381
382
    // llama-debug specific options
383
    std::string logits_output_dir = "data"; // directory for saving logits output files                     // NOLINT
384
    bool        save_logits       = false;  // whether to save logits to files                              // NOLINT
385
    std::vector<std::string> tensor_filter; // filter tensor names for debug output (regex)                 // NOLINT
386
387
    std::vector<std::string> in_files;   // all input files
388
    std::vector<std::string> antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts)
389
    std::vector<llama_model_kv_override> kv_overrides;
390
    std::vector<llama_model_tensor_buft_override> tensor_buft_overrides;
391
392
    bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_adapter_lora_apply)
393
    std::vector<common_adapter_lora_info> lora_adapters; // lora adapter path with user defined scale
394
395
    std::vector<common_control_vector_load_info> control_vectors; // control vector with user defined scale
396
397
    int32_t verbosity                  = 3;  // LOG_LEVEL_INFO
398
    int32_t control_vector_layer_start = -1; // layer range for control vector
399
    int32_t control_vector_layer_end   = -1; // layer range for control vector
400
    bool    offline                    = false;
401
402
    int32_t ppl_stride      = 0;     // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
403
    int32_t ppl_output_type = 0;     // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
404
                                     //                                       (which is more convenient to use for plotting)
405
                                     //
406
    bool   hellaswag        = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
407
    size_t hellaswag_tasks  = 400;   // number of tasks to use when computing the HellaSwag score
408
409
    bool   winogrande       = false; // compute Winogrande score over random tasks from datafile supplied in prompt
410
    size_t winogrande_tasks = 0;     // number of tasks to use when computing the Winogrande score. If 0, all tasks will be computed
411
412
    bool   multiple_choice  = false;  // compute TruthfulQA score over random tasks from datafile supplied in prompt
413
    size_t multiple_choice_tasks = 0; // number of tasks to use when computing the TruthfulQA score. If 0, all tasks will be computed
414
415
    bool   kl_divergence    = false; // compute KL divergence
416
417
    bool usage             = false; // print usage
418
    bool completion        = false; // print source-able completion script
419
    bool use_color         = false; // use color to distinguish generations and inputs
420
    bool special           = false; // enable special token output
421
    bool interactive       = false; // interactive mode
422
    bool interactive_first = false; // wait for user input immediately
423
    bool prompt_cache_all  = false; // save user input and generations to prompt cache
424
    bool prompt_cache_ro   = false; // open the prompt cache read-only and do not update it
425
426
    bool escape            = true;  // escape "\n", "\r", "\t", "\'", "\"", and "\\"
427
    bool multiline_input   = false; // reverse the usage of `\`
428
    bool simple_io         = false; // improves compatibility with subprocesses and limited consoles
429
    bool cont_batching     = true;  // insert new sequences for decoding on-the-fly
430
    bool no_perf           = false; // disable performance metrics
431
    bool show_timings      = true;  // show timing information on CLI
432
    bool ctx_shift         = false; // context shift on infinite text generation
433
    bool swa_full          = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)
434
    bool kv_unified        = false; // enable unified KV cache
435
436
    bool input_prefix_bos  = false; // prefix BOS to user inputs, preceding input_prefix
437
    bool use_mmap          = true;  // enable mmap to use filesystem cache
438
    bool use_direct_io     = true;  // read from disk without buffering for faster model loading
439
    bool use_mlock         = false; // use mlock to keep model in memory
440
    bool verbose_prompt    = false; // print prompt tokens before generation
441
    bool display_prompt    = true;  // print prompt before generation
442
    bool no_kv_offload     = false; // disable KV offloading
443
    bool warmup            = true;  // warmup run
444
    bool check_tensors     = false; // validate tensor data
445
    bool no_op_offload     = false; // globally disable offload host tensor operations to device
446
    bool no_extra_bufts    = false; // disable extra buffer types (used for weight repacking)
447
    bool no_host           = false; // bypass host buffer allowing extra buffers to be used
448
449
    bool single_turn       = false; // single turn chat conversation
450
451
    ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K
452
    ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V
453
454
    common_conversation_mode conversation_mode = COMMON_CONVERSATION_MODE_AUTO;
455
456
    // multimodal models (see tools/mtmd)
457
    struct common_params_model mmproj;
458
    bool mmproj_use_gpu = true;     // use GPU for multimodal model
459
    bool no_mmproj = false;         // explicitly disable multimodal model
460
    std::vector<std::string> image; // path to image file(s)
461
    int image_min_tokens = -1;
462
    int image_max_tokens = -1;
463
464
    // finetune
465
    struct lr_opt lr;
466
    enum ggml_opt_optimizer_type optimizer = GGML_OPT_OPTIMIZER_TYPE_ADAMW;
467
    float val_split = 0.05f; // fraction of the data used for the validation set
468
469
    // embedding
470
    bool embedding         = false; // get only sentence embedding
471
    int32_t embd_normalize = 2;     // normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
472
    std::string embd_out   = "";    // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix
473
    std::string embd_sep   = "\n";  // separator of embeddings
474
    std::string cls_sep    = "\t";  // separator of classification sequences
475
476
    // server params
477
    int32_t port              = 8080;         // server listens on this network port
478
    int32_t timeout_read      = 600;          // http read timeout in seconds
479
    int32_t timeout_write     = timeout_read; // http write timeout in seconds
480
    int32_t n_threads_http    = -1;           // number of threads to process HTTP requests (TODO: support threadpool)
481
    int32_t n_cache_reuse     = 0;            // min chunk size to reuse from the cache via KV shifting
482
    bool    cache_prompt      = true;         // whether to enable prompt caching
483
    int32_t n_ctx_checkpoints = 8;            // max number of context checkpoints per slot
484
    int32_t cache_ram_mib     = 8192;         // -1 = no limit, 0 - disable, 1 = 1 MiB, etc.
485
486
    std::string hostname      = "127.0.0.1";
487
    std::string public_path   = "";                                                                         // NOLINT
488
    std::string api_prefix    = "";                                                                         // NOLINT
489
    std::string chat_template = "";                                                                         // NOLINT
490
    bool use_jinja = true;                                                                                  // NOLINT
491
    bool enable_chat_template = true;
492
    common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK;
493
    int reasoning_budget = -1;
494
    bool prefill_assistant = true; // if true, any trailing assistant message will be prefilled into the response
495
    int sleep_idle_seconds = -1;   // if >0, server will sleep after this many seconds of idle time
496
497
    std::vector<std::string> api_keys;
498
499
    std::string ssl_file_key  = "";                                                                         // NOLINT
500
    std::string ssl_file_cert = "";                                                                         // NOLINT
501
502
    std::map<std::string, std::string> default_template_kwargs;
503
504
    // webui configs
505
    bool webui = true;
506
    std::string webui_config_json;
507
508
    // "advanced" endpoints are disabled by default for better security
509
    bool endpoint_slots   = true;
510
    bool endpoint_props   = false; // only control POST requests, not GET
511
    bool endpoint_metrics = false;
512
513
    // router server configs
514
    std::string models_dir    = ""; // directory containing models for the router server
515
    std::string models_preset = ""; // directory containing model presets for the router server
516
    int models_max = 4;             // maximum number of models to load simultaneously
517
    bool models_autoload = true;    // automatically load models when requested via the router server
518
519
    bool log_json = false;
520
521
    std::string slot_save_path;
522
    std::string media_path; // path to directory for loading media files
523
524
    float slot_prompt_similarity = 0.1f;
525
526
    // batched-bench params
527
    bool is_pp_shared   = false;
528
    bool is_tg_separate = false;
529
530
    std::vector<int32_t> n_pp;
531
    std::vector<int32_t> n_tg;
532
    std::vector<int32_t> n_pl;
533
534
    // retrieval params
535
    std::vector<std::string> context_files; // context files to embed
536
537
    int32_t chunk_size = 64; // chunk size for context embedding
538
539
    std::string chunk_separator = "\n"; // chunk separator for context embedding
540
541
    // passkey params
542
    int32_t n_junk = 250; // number of times to repeat the junk text
543
    int32_t i_pos  = -1;  // position of the passkey in the junk text
544
545
    // imatrix params
546
    int32_t n_out_freq  = 10; // output the imatrix every n_out_freq iterations
547
    int32_t n_save_freq =  0; // save the imatrix every n_save_freq iterations
548
    int32_t i_chunk     =  0; // start processing from this chunk
549
    int8_t  imat_dat    =  0; // whether the legacy imatrix.dat format should be output (gguf <= 0 < dat)
550
551
    bool process_output  = false; // collect data for the output tensor
552
    bool compute_ppl     = true;  // whether to compute perplexity
553
    bool show_statistics = false; // show imatrix statistics per tensor
554
    bool parse_special   = false; // whether to parse special tokens during imatrix tokenization
555
556
    // cvector-generator params
557
    int n_pca_batch = 100;
558
    int n_pca_iterations = 1000;
559
    dimre_method cvector_dimre_method = DIMRE_METHOD_PCA;
560
    std::string cvector_positive_file = "tools/cvector-generator/positive.txt";
561
    std::string cvector_negative_file = "tools/cvector-generator/negative.txt";
562
563
    bool spm_infill = false; // suffix/prefix/middle pattern for infill
564
565
    // batched-bench params
566
    bool batched_bench_output_jsonl = false;
567
568
    // common params
569
    std::string out_file; // output filename for all example programs
570
    // optional callback for model loading progress and cancellation:
571
    // called with a progress value between 0.0 and 1.0.
572
    // return false from callback to abort model loading or true to continue
573
    llama_progress_callback load_progress_callback = NULL;
574
    void *                  load_progress_callback_user_data = NULL;
575
576
0
    bool has_speculative() const {
577
0
        return !speculative.model.path.empty() || !speculative.model.hf_repo.empty();
578
0
    }
579
};
580
581
// call once at the start of a program if it uses libcommon
582
// initializes the logging system and prints info about the build
583
void common_init();
584
585
std::string common_params_get_system_info(const common_params & params);
586
587
bool parse_cpu_range(const std::string & range, bool(&boolmask)[GGML_MAX_N_THREADS]);
588
bool parse_cpu_mask(const std::string & mask, bool(&boolmask)[GGML_MAX_N_THREADS]);
589
void postprocess_cpu_params(cpu_params & cpuparams, const cpu_params * role_model = nullptr);
590
bool set_process_priority(enum ggml_sched_priority prio);
591
592
//
593
// String utils
594
//
595
596
#ifdef __GNUC__
597
#    if defined(__MINGW32__) && !defined(__clang__)
598
#        define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
599
#    else
600
#        define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
601
#    endif
602
#else
603
#    define LLAMA_COMMON_ATTRIBUTE_FORMAT(...)
604
#endif
605
606
LLAMA_COMMON_ATTRIBUTE_FORMAT(1, 2)
607
std::string string_format(const char * fmt, ...);
608
609
std::string string_strip(const std::string & str);
610
std::string string_get_sortable_timestamp();
611
612
std::string string_join(const std::vector<std::string> & values, const std::string & separator);
613
std::vector<std::string> string_split(const std::string & str, const std::string & delimiter);
614
std::string string_repeat(const std::string & str, size_t n);
615
616
void string_replace_all(std::string & s, const std::string & search, const std::string & replace);
617
618
std::string regex_escape(const std::string & s);
619
620
template<class T>
621
static std::vector<T> string_split(const std::string & str, char delim) {
622
    static_assert(!std::is_same<T, std::string>::value, "Please use the specialized version for std::string");
623
    std::vector<T> values;
624
    std::istringstream str_stream(str);
625
    std::string token;
626
    while (std::getline(str_stream, token, delim)) {
627
        T value;
628
        std::istringstream token_stream(token);
629
        token_stream >> value;
630
        values.push_back(value);
631
    }
632
    return values;
633
}
634
635
template<>
636
std::vector<std::string> string_split<std::string>(const std::string & input, char separator)
637
0
{
638
0
    std::vector<std::string> parts;
639
0
    size_t begin_pos = 0;
640
0
    size_t separator_pos = input.find(separator);
641
0
    while (separator_pos != std::string::npos) {
642
0
        std::string part = input.substr(begin_pos, separator_pos - begin_pos);
643
0
        parts.emplace_back(part);
644
0
        begin_pos = separator_pos + 1;
645
0
        separator_pos = input.find(separator, begin_pos);
646
0
    }
647
0
    parts.emplace_back(input.substr(begin_pos, separator_pos - begin_pos));
648
0
    return parts;
649
0
}
Unexecuted instantiation: fuzz_inference.cpp:std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > string_split<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, char)
Unexecuted instantiation: common.cpp:std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > string_split<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, char)
Unexecuted instantiation: log.cpp:std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > string_split<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, char)
Unexecuted instantiation: sampling.cpp:std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > string_split<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, char)
650
651
static bool string_starts_with(const std::string & str,
652
0
                               const std::string & prefix) {  // While we wait for C++20's std::string::starts_with...
653
0
    return str.rfind(prefix, 0) == 0;
654
0
}
Unexecuted instantiation: fuzz_inference.cpp:string_starts_with(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)
Unexecuted instantiation: common.cpp:string_starts_with(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)
Unexecuted instantiation: log.cpp:string_starts_with(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)
Unexecuted instantiation: sampling.cpp:string_starts_with(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&)
655
656
// While we wait for C++20's std::string::ends_with...
657
bool string_ends_with(const std::string_view & str, const std::string_view & suffix);
658
bool string_remove_suffix(std::string & str, const std::string_view & suffix);
659
size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop);
660
661
bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
662
void string_process_escapes(std::string & input);
663
664
std::string string_from(bool value);
665
std::string string_from(const std::vector<int> & values);
666
std::string string_from(const struct llama_context * ctx, const std::vector<llama_token> & tokens);
667
std::string string_from(const struct llama_context * ctx, const struct llama_batch & batch);
668
669
//
670
// Filesystem utils
671
//
672
673
bool fs_validate_filename(const std::string & filename, bool allow_subdirs = false);
674
bool fs_create_directory_with_parents(const std::string & path);
675
bool fs_is_directory(const std::string & path);
676
677
std::string fs_get_cache_directory();
678
std::string fs_get_cache_file(const std::string & filename);
679
680
struct common_file_info {
681
    std::string path;
682
    std::string name;
683
    size_t      size = 0; // in bytes
684
    bool        is_dir = false;
685
};
686
std::vector<common_file_info> fs_list(const std::string & path, bool include_directories);
687
688
//
689
// TTY utils
690
//
691
692
// Auto-detect if colors can be enabled based on terminal and environment
693
bool tty_can_use_colors();
694
695
//
696
// Model utils
697
//
698
699
struct common_sampler;
700
701
// note: defines the model, context, samplers, ets. lifetimes
702
struct common_init_result {
703
    common_init_result(common_params & params);
704
    ~common_init_result();
705
706
    llama_model * model();
707
    llama_context * context();
708
709
    common_sampler * sampler(llama_seq_id seq_id);
710
    void reset_samplers();
711
712
    std::vector<llama_adapter_lora_ptr> & lora();
713
714
    void free_context();
715
716
private:
717
    struct impl;
718
    std::unique_ptr<impl> pimpl;
719
};
720
721
using common_init_result_ptr = std::unique_ptr<common_init_result>;
722
723
common_init_result_ptr common_init_from_params(common_params & params);
724
725
struct llama_model_params     common_model_params_to_llama  (      common_params & params);
726
struct llama_context_params   common_context_params_to_llama(const common_params & params);
727
struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params);
728
729
// clear LoRA adapters from context, then apply new list of adapters
730
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora);
731
732
std::string                   get_model_endpoint();
733
734
//
735
// Batch utils
736
//
737
738
void common_batch_clear(struct llama_batch & batch);
739
740
void common_batch_add(
741
                 struct llama_batch & batch,
742
                        llama_token   id,
743
                          llama_pos   pos,
744
    const std::vector<llama_seq_id> & seq_ids,
745
                               bool   logits);
746
747
//
748
// Token utils
749
//
750
751
// longest common prefix
752
size_t common_lcp(const llama_tokens & a, const llama_tokens & b);
753
754
// longet common subsequence
755
size_t common_lcs(const llama_tokens & a, const llama_tokens & b);
756
757
//
758
// Vocab utils
759
//
760
761
// tokenizes a string into a vector of tokens
762
// should work similar to Python's `tokenizer.encode`
763
std::vector<llama_token> common_tokenize(
764
  const struct llama_context * ctx,
765
           const std::string & text,
766
                        bool   add_special,
767
                        bool   parse_special = false);
768
769
std::vector<llama_token> common_tokenize(
770
    const struct llama_vocab * vocab,
771
           const std::string & text,
772
                        bool   add_special,
773
                        bool   parse_special = false);
774
775
// tokenizes a token into a piece, optionally renders special/control tokens
776
// should work similar to Python's `tokenizer.id_to_piece`
777
std::string common_token_to_piece(
778
        const struct llama_context * ctx,
779
                       llama_token   token,
780
                       bool          special = true);
781
782
std::string common_token_to_piece(
783
          const struct llama_vocab * vocab,
784
                       llama_token   token,
785
                       bool          special = true);
786
787
// detokenizes a vector of tokens into a string
788
// should work similar to Python's `tokenizer.decode`
789
// optionally renders special/control tokens
790
std::string common_detokenize(
791
            const struct llama_context * ctx,
792
        const std::vector<llama_token> & tokens,
793
                                  bool   special = true);
794
795
std::string common_detokenize(
796
              const struct llama_vocab * vocab,
797
        const std::vector<llama_token> & tokens,
798
                                  bool   special = true);
799
800
//
801
// Embedding utils
802
//
803
804
// TODO: repace embd_norm with an enum
805
void common_embd_normalize(const float * inp, float * out, int n, int embd_norm);
806
807
float common_embd_similarity_cos(const float * embd1, const float * embd2, int n);
808
809
//
810
// Control vector utils
811
//
812
813
struct common_control_vector_data {
814
    int n_embd;
815
816
    // stores data for layers [1, n_layer] where n_layer = data.size() / n_embd
817
    std::vector<float> data;
818
};
819
820
struct common_control_vector_load_info {
821
    float strength;
822
823
    std::string fname;
824
};
825
826
// Load control vectors, scale each by strength, and add them together.
827
// On error, returns {-1, empty}
828
common_control_vector_data common_control_vector_load(const std::vector<common_control_vector_load_info> & load_infos);
829
830
//
831
// Split utils
832
//
833
834
namespace {
835
836
const char * const LLM_KV_SPLIT_NO            = "split.no";
837
const char * const LLM_KV_SPLIT_COUNT         = "split.count";
838
const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
839
840
}
841
842
//
843
// MoE utils
844
//
845
846
const char * const LLM_FFN_EXPS_REGEX = "\\.ffn_(up|down|gate)_(ch|)exps";
847
848
0
static std::string llm_ffn_exps_block_regex(int idx) {
849
0
    return string_format("blk\\.%d%s", idx, LLM_FFN_EXPS_REGEX);
850
0
}
Unexecuted instantiation: fuzz_inference.cpp:llm_ffn_exps_block_regex(int)
Unexecuted instantiation: common.cpp:llm_ffn_exps_block_regex(int)
Unexecuted instantiation: log.cpp:llm_ffn_exps_block_regex(int)
Unexecuted instantiation: sampling.cpp:llm_ffn_exps_block_regex(int)
851
852
0
static llama_model_tensor_buft_override llm_ffn_exps_cpu_override() {
853
0
    return { LLM_FFN_EXPS_REGEX, ggml_backend_cpu_buffer_type() };
854
0
}
Unexecuted instantiation: fuzz_inference.cpp:llm_ffn_exps_cpu_override()
Unexecuted instantiation: common.cpp:llm_ffn_exps_cpu_override()
Unexecuted instantiation: log.cpp:llm_ffn_exps_cpu_override()
Unexecuted instantiation: sampling.cpp:llm_ffn_exps_cpu_override()
855
856
//
857
// training utils
858
//
859
860
ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride);
861
862
// "adamw" or "sgd" (case insensitive)
863
enum ggml_opt_optimizer_type common_opt_get_optimizer(const char *);