Coverage Report

Created: 2026-03-21 06:50

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-hparams.h
Line
Count
Source
1
#pragma once
2
3
#include "llama.h"
4
5
#include <array>
6
#include <cassert>
7
8
// bump if necessary
9
#define LLAMA_MAX_LAYERS  512
10
#define LLAMA_MAX_EXPERTS 512 // Qwen3 Next
11
12
enum llama_expert_gating_func_type {
13
    LLAMA_EXPERT_GATING_FUNC_TYPE_NONE           = 0,
14
    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX        = 1,
15
    LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID        = 2,
16
    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT = 3, // applied to the router weights instead of the logits
17
};
18
19
enum llama_swa_type {
20
    LLAMA_SWA_TYPE_NONE      = 0,
21
    LLAMA_SWA_TYPE_STANDARD  = 1,
22
    LLAMA_SWA_TYPE_CHUNKED   = 2,
23
    LLAMA_SWA_TYPE_SYMMETRIC = 3,
24
};
25
26
struct llama_hparams_posnet {
27
    uint32_t n_embd;
28
    uint32_t n_layer;
29
};
30
31
struct llama_hparams_convnext {
32
    uint32_t n_embd;
33
    uint32_t n_layer;
34
};
35
36
struct llama_hparams {
37
    bool vocab_only;
38
    bool no_alloc;
39
    bool rope_finetuned;
40
    bool use_par_res;
41
    bool swin_norm;
42
43
    uint32_t n_ctx_train; // context size the model was trained on
44
    uint32_t n_embd;
45
    uint32_t n_layer;
46
    int32_t n_layer_kv_from_start = -1; // if non-negative, the first n_layer_kv_from_start layers have KV cache
47
    uint32_t n_expert = 0;
48
    uint32_t n_expert_used = 0;
49
    uint32_t n_rel_attn_bkts = 0;
50
51
    // different head size for full_attention and SWA layers
52
    uint32_t n_embd_head_k_full; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
53
    uint32_t n_embd_head_v_full; // dimension of values (d_v) aka n_embd_head
54
    uint32_t n_embd_head_k_swa;
55
    uint32_t n_embd_head_v_swa;
56
57
    // different RoPE dimensions for full_attention and SWA layers
58
    uint32_t n_rot_full;
59
    uint32_t n_rot_swa;
60
61
    // note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA
62
    uint32_t n_embd_head_k_mla_impl = 0;
63
    uint32_t n_embd_head_v_mla_impl = 0;
64
65
    // for WavTokenizer
66
    struct llama_hparams_posnet   posnet;
67
    struct llama_hparams_convnext convnext;
68
69
    uint32_t n_shortconv_l_cache  = 0;
70
71
    std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_arr;
72
    std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
73
    std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
74
75
    uint32_t n_layer_dense_lead = 0;
76
    uint32_t n_lora_q           = 0;
77
    uint32_t n_lora_kv          = 0;
78
    uint32_t n_ff_exp           = 0;
79
    uint32_t n_ff_shexp         = 0;
80
    uint32_t n_ff_chexp         = 0;
81
    uint32_t n_expert_shared    = 0;
82
    uint32_t n_norm_groups      = 0;
83
    uint32_t n_expert_groups    = 0;
84
    uint32_t n_group_used       = 0;
85
    uint32_t n_group_experts    = 0;
86
87
    float    expert_group_scale   = 0.05f;
88
    float    expert_weights_scale = 0.0f;
89
    bool     expert_weights_norm  = false;
90
    uint32_t expert_gating_func   = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE;
91
    uint32_t moe_every_n_layers   = 0;
92
    uint32_t moe_latent_size      = 0;
93
    uint32_t nextn_predict_layers = 0;
94
95
    float f_norm_eps;
96
    float f_norm_rms_eps;
97
    float f_norm_group_eps;
98
99
    float f_attn_logit_softcapping   = 50.0f;
100
    float f_router_logit_softcapping = 30.0f;
101
    float f_final_logit_softcapping  = 30.0f;
102
103
    // for RWKV
104
    uint32_t rescale_every_n_layers = 0;
105
    uint32_t time_mix_extra_dim     = 0;
106
    uint32_t time_decay_extra_dim   = 0;
107
    uint32_t wkv_head_size          = 0;
108
    uint32_t token_shift_count      = 2;
109
    uint32_t n_lora_decay           = 0;
110
    uint32_t n_lora_iclr            = 0;
111
    uint32_t n_lora_value_res_mix   = 0;
112
    uint32_t n_lora_gate            = 0;
113
114
    float    rope_attn_factor = 1.0f;
115
    float    rope_freq_base_train;
116
    float    rope_freq_base_train_swa  = 10000.0f;
117
    float    rope_freq_scale_train;
118
    float    rope_freq_scale_train_swa = 1.0f;
119
120
    uint32_t n_ctx_orig_yarn;
121
    float    rope_yarn_log_mul = 0.0f;
122
123
    float    yarn_ext_factor  = -1.0f;
124
    float    yarn_attn_factor =  1.0f;
125
    float    yarn_beta_fast   = 32.0f;
126
    float    yarn_beta_slow   =  1.0f;
127
128
    std::array<int, 4> rope_sections;
129
130
    // Sliding Window Attention (SWA)
131
    llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
132
    // the size of the sliding window (0 - no SWA)
133
    uint32_t n_swa = 0;
134
    // if swa_layers[il] == 1, then layer il is SWA
135
    // if swa_layers[il] == 0, then layer il is dense (i.e. non-SWA)
136
    // by default, all layers are dense
137
    // note: using uint32_t type for compatibility reason
138
    std::array<uint32_t, LLAMA_MAX_LAYERS> swa_layers;
139
140
    // for State Space Models
141
    uint32_t ssm_d_conv  = 0;
142
    uint32_t ssm_d_inner = 0;
143
    uint32_t ssm_d_state = 0;
144
    uint32_t ssm_dt_rank = 0;
145
    uint32_t ssm_n_group = 0;
146
147
    // for Kimi Linear KDA
148
    uint32_t n_embd_head_kda = 0;
149
150
    // for hybrid state space models
151
    std::array<bool, LLAMA_MAX_LAYERS> recurrent_layer_arr;
152
153
    bool ssm_dt_b_c_rms = false;
154
155
    float f_clamp_kqv      = 0.0f;
156
    float f_max_alibi_bias = 0.0f;
157
    float f_logit_scale    = 0.0f;
158
159
    // Additional scale factors (Granite/Granite MoE)
160
    float f_residual_scale  = 0.0f;
161
    float f_embedding_scale = 0.0f;
162
    float f_attention_scale = 0.0f;
163
164
    // grok-2
165
    float    f_attn_out_scale = 0.0f;
166
    uint32_t attn_temp_length = 0;
167
168
    bool causal_attn   = true;
169
    bool use_alibi     = false;
170
    bool attn_soft_cap = false;
171
    bool use_kq_norm   = false;
172
173
    // for Classifiers
174
    uint32_t n_cls_out = 1;
175
176
    // output embedding dimension (0 = use n_embd)
177
    uint32_t n_embd_out_impl = 0;
178
179
    // llama4 smallthinker
180
    uint32_t n_moe_layer_step        = 0;
181
    uint32_t n_no_rope_layer_step    = 4;
182
    uint32_t n_attn_temp_floor_scale = 0;
183
    float    f_attn_temp_scale       = 0.0f;
184
    float    f_attn_temp_offset      = 0.0f; // offset position index
185
186
    // gemma3n altup
187
    uint32_t n_altup      = 4; // altup_num_inputs
188
    uint32_t i_altup_act  = 0; // altup_active_idx
189
    uint32_t laurel_rank  = 64;
190
    uint32_t n_embd_altup = 256;
191
192
    // needed for sentence-transformers dense layers
193
    uint32_t dense_2_feat_in  = 0;  // in_features of the 2_Dense
194
    uint32_t dense_2_feat_out = 0;  // out_features of the 2_Dense
195
    uint32_t dense_3_feat_in  = 0;  // in_features of the 3_Dense
196
    uint32_t dense_3_feat_out = 0;  // out_features of the 3_Dense
197
198
    // xIELU
199
    std::array<float, LLAMA_MAX_LAYERS> xielu_alpha_n;
200
    std::array<float, LLAMA_MAX_LAYERS> xielu_alpha_p;
201
    std::array<float, LLAMA_MAX_LAYERS> xielu_beta;
202
    std::array<float, LLAMA_MAX_LAYERS> xielu_eps;
203
204
    // DSA (deepseek sparse attention)
205
    uint32_t indexer_n_head    = 0;
206
    uint32_t indexer_head_size = 0;
207
    uint32_t indexer_top_k     = 0;
208
209
    // qwen3vl deepstack
210
    uint32_t n_deepstack_layers = 0;
211
212
    // needed by encoder-decoder models (e.g. T5, FLAN-T5)
213
    // ref: https://github.com/ggml-org/llama.cpp/pull/8141
214
    llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
215
    uint32_t    dec_n_layer        = 0;
216
217
    enum llama_pooling_type      pooling_type            = LLAMA_POOLING_TYPE_NONE;
218
    enum llama_rope_type         rope_type               = LLAMA_ROPE_TYPE_NONE;
219
    enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
220
221
222
    // Step35: optional per-layer clamps for (Swi)GLU
223
    std::array<float, LLAMA_MAX_LAYERS> swiglu_clamp_exp; // clamping for expert FFN
224
    std::array<float, LLAMA_MAX_LAYERS> swiglu_clamp_shexp; // shared expert
225
226
    // this value n_pattern means that every nth layer is dense (i.e. non-SWA)
227
    // dense_first means whether the pattern is start with a dense layer
228
    // note that if n_pattern == 0, all layers are SWA
229
    //           if n_pattern == 1, all layers are dense
230
    // example 1: n_pattern = 3, dense_first = false
231
    //   il == 0: swa
232
    //   il == 1: swa
233
    //   il == 2: dense
234
    //   il == 3: swa
235
    //   il == 4: swa
236
    //   il == 5: dense
237
    //   il == 6: swa
238
    //   etc ...
239
    // example 2: n_pattern = 2, dense_first = true
240
    //   il == 0: dense
241
    //   il == 1: swa
242
    //   il == 2: dense
243
    //   il == 3: swa
244
    //   etc ...
245
    void set_swa_pattern(uint32_t n_pattern, bool dense_first = false);
246
247
    // return true if one of the layers is SWA
248
    bool is_swa_any() const;
249
250
    uint32_t n_head(uint32_t il = 0) const;
251
252
    uint32_t n_head_kv(uint32_t il = 0) const;
253
254
    uint32_t n_ff(uint32_t il = 0) const;
255
256
    uint32_t n_gqa(uint32_t il = 0) const;
257
258
    uint32_t n_rot(uint32_t il = 0) const;
259
260
    // dimension of main + auxiliary input embeddings
261
    uint32_t n_embd_inp() const;
262
263
    // dimension of output embeddings
264
    uint32_t n_embd_out() const;
265
266
    // dimension of key/value embeddings for each head (per layer)
267
    uint32_t n_embd_head_k(uint32_t il = 0) const;
268
    uint32_t n_embd_head_v(uint32_t il = 0) const;
269
270
    // dimension of key embeddings across all k-v heads
271
    uint32_t n_embd_k_gqa(uint32_t il = 0) const;
272
273
    // dimension of value embeddings across all k-v heads
274
    uint32_t n_embd_v_gqa(uint32_t il = 0) const;
275
276
    // true if any layer has a different n_embd_k_gqa/n_embd_v_gqa
277
    bool is_n_embd_k_gqa_variable() const;
278
    bool is_n_embd_v_gqa_variable() const;
279
280
    // return the maximum n_embd_k_gqa/n_embd_v_gqa across all layers
281
    uint32_t n_embd_k_gqa_max() const;
282
    uint32_t n_embd_v_gqa_max() const;
283
284
    // dimension of the rolling state embeddings
285
    // corresponds to Mamba's conv_states size or RWKV's token_shift states size
286
    uint32_t n_embd_r() const;
287
288
    // dimension of the recurrent state embeddings
289
    uint32_t n_embd_s() const;
290
291
    // whether or not the given layer is recurrent (for hybrid models)
292
    bool is_recurrent(uint32_t il) const;
293
294
    uint32_t n_pos_per_embd() const;
295
296
    bool is_swa(uint32_t il) const;
297
298
    // note: currently only support if either all or none of the layers are MLA
299
    bool is_mla() const;
300
301
    uint32_t n_embd_head_k_mla() const;
302
    uint32_t n_embd_head_v_mla() const;
303
304
    bool has_kv(uint32_t il) const;
305
306
    // number of layers for which has_kv() returns true
307
    uint32_t n_layer_kv() const;
308
309
    // note that this function uses different SWA parameters from those in the hparams
310
    // note: inlined on purpose for performance reasons
311
    // TODO: think of a better place for this function
312
    // TODO: pack the SWA params in a struct?
313
0
    static bool is_masked_swa(uint32_t n_swa, llama_swa_type swa_type, llama_pos p0, llama_pos p1) {
314
0
        assert(p0 >= 0 && p1 >= 0);
315
316
0
        switch (swa_type) {
317
0
            case LLAMA_SWA_TYPE_NONE:
318
0
                {
319
0
                } break;
320
0
            case LLAMA_SWA_TYPE_STANDARD:
321
0
                {
322
0
                    if (p1 - p0 >= (int32_t) n_swa) {
323
0
                        return true;
324
0
                    }
325
0
                } break;
326
0
            case LLAMA_SWA_TYPE_CHUNKED:
327
0
                {
328
0
                    const llama_pos pos_chunk_start = (p1 / n_swa) * n_swa;
329
330
0
                    if (p0 < pos_chunk_start) {
331
0
                        return true;
332
0
                    }
333
0
                } break;
334
0
            case LLAMA_SWA_TYPE_SYMMETRIC:
335
0
                {
336
0
                    const int32_t half_n_swa = (int32_t) n_swa / 2;
337
0
                    const int32_t pos_diff = p1 - p0;
338
339
                    // Mask if outside the symmetric window
340
0
                    if (pos_diff < -half_n_swa || pos_diff > half_n_swa) {
341
0
                        return true;
342
0
                    }
343
0
                } break;
344
0
        }
345
346
0
        return false;
347
0
    }
348
349
350
    bool use_mrope() const;
351
};
352
353
static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");