/src/llama.cpp/src/llama-hparams.h
Line | Count | Source |
1 | | #pragma once |
2 | | |
3 | | #include "llama.h" |
4 | | |
5 | | #include <array> |
6 | | #include <cassert> |
7 | | |
8 | | // bump if necessary |
9 | | #define LLAMA_MAX_LAYERS 512 |
10 | | #define LLAMA_MAX_EXPERTS 512 // Qwen3 Next |
11 | | |
12 | | enum llama_expert_gating_func_type { |
13 | | LLAMA_EXPERT_GATING_FUNC_TYPE_NONE = 0, |
14 | | LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1, |
15 | | LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2, |
16 | | LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT = 3, // applied to the router weights instead of the logits |
17 | | }; |
18 | | |
19 | | enum llama_swa_type { |
20 | | LLAMA_SWA_TYPE_NONE = 0, |
21 | | LLAMA_SWA_TYPE_STANDARD = 1, |
22 | | LLAMA_SWA_TYPE_CHUNKED = 2, |
23 | | LLAMA_SWA_TYPE_SYMMETRIC = 3, |
24 | | }; |
25 | | |
26 | | struct llama_hparams_posnet { |
27 | | uint32_t n_embd; |
28 | | uint32_t n_layer; |
29 | | }; |
30 | | |
31 | | struct llama_hparams_convnext { |
32 | | uint32_t n_embd; |
33 | | uint32_t n_layer; |
34 | | }; |
35 | | |
36 | | struct llama_hparams { |
37 | | bool vocab_only; |
38 | | bool no_alloc; |
39 | | bool rope_finetuned; |
40 | | bool use_par_res; |
41 | | bool swin_norm; |
42 | | |
43 | | uint32_t n_ctx_train; // context size the model was trained on |
44 | | uint32_t n_embd; |
45 | | uint32_t n_embd_features = 0; |
46 | | uint32_t n_layer; |
47 | | int32_t n_layer_kv_from_start = -1; // if non-negative, the first n_layer_kv_from_start layers have KV cache |
48 | | uint32_t n_rot; |
49 | | uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads |
50 | | uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head |
51 | | uint32_t n_expert = 0; |
52 | | uint32_t n_expert_used = 0; |
53 | | uint32_t n_rel_attn_bkts = 0; |
54 | | |
55 | | // note: deepseek2 using MLA converts into MQA with larger heads, then decompresses to MHA |
56 | | uint32_t n_embd_head_k_mla = 0; |
57 | | uint32_t n_embd_head_v_mla = 0; |
58 | | |
59 | | // for WavTokenizer |
60 | | struct llama_hparams_posnet posnet; |
61 | | struct llama_hparams_convnext convnext; |
62 | | |
63 | | uint32_t n_shortconv_l_cache = 0; |
64 | | |
65 | | std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_arr; |
66 | | std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr; |
67 | | std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr; |
68 | | |
69 | | uint32_t n_layer_dense_lead = 0; |
70 | | uint32_t n_lora_q = 0; |
71 | | uint32_t n_lora_kv = 0; |
72 | | uint32_t n_ff_exp = 0; |
73 | | uint32_t n_ff_shexp = 0; |
74 | | uint32_t n_ff_chexp = 0; |
75 | | uint32_t n_expert_shared = 0; |
76 | | uint32_t n_norm_groups = 0; |
77 | | uint32_t n_expert_groups = 0; |
78 | | uint32_t n_group_used = 0; |
79 | | uint32_t n_group_experts = 0; |
80 | | |
81 | | float expert_group_scale = 0.05f; |
82 | | float expert_weights_scale = 0.0f; |
83 | | bool expert_weights_norm = false; |
84 | | uint32_t expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE; |
85 | | uint32_t moe_every_n_layers = 0; |
86 | | uint32_t nextn_predict_layers = 0; |
87 | | |
88 | | float f_norm_eps; |
89 | | float f_norm_rms_eps; |
90 | | float f_norm_group_eps; |
91 | | |
92 | | float f_attn_logit_softcapping = 50.0f; |
93 | | float f_router_logit_softcapping = 30.0f; |
94 | | float f_final_logit_softcapping = 30.0f; |
95 | | |
96 | | // for RWKV |
97 | | uint32_t rescale_every_n_layers = 0; |
98 | | uint32_t time_mix_extra_dim = 0; |
99 | | uint32_t time_decay_extra_dim = 0; |
100 | | uint32_t wkv_head_size = 0; |
101 | | uint32_t token_shift_count = 2; |
102 | | uint32_t n_lora_decay = 0; |
103 | | uint32_t n_lora_iclr = 0; |
104 | | uint32_t n_lora_value_res_mix = 0; |
105 | | uint32_t n_lora_gate = 0; |
106 | | |
107 | | float rope_attn_factor = 1.0f; |
108 | | float rope_freq_base_train; |
109 | | float rope_freq_base_train_swa = 10000.0f; |
110 | | float rope_freq_scale_train; |
111 | | float rope_freq_scale_train_swa = 1.0f; |
112 | | |
113 | | uint32_t n_ctx_orig_yarn; |
114 | | float rope_yarn_log_mul = 0.0f; |
115 | | |
116 | | float yarn_ext_factor = -1.0f; |
117 | | float yarn_attn_factor = 1.0f; |
118 | | float yarn_beta_fast = 32.0f; |
119 | | float yarn_beta_slow = 1.0f; |
120 | | |
121 | | std::array<int, 4> rope_sections; |
122 | | |
123 | | // Sliding Window Attention (SWA) |
124 | | llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE; |
125 | | // the size of the sliding window (0 - no SWA) |
126 | | uint32_t n_swa = 0; |
127 | | // if swa_layers[il] == 1, then layer il is SWA |
128 | | // if swa_layers[il] == 0, then layer il is dense (i.e. non-SWA) |
129 | | // by default, all layers are dense |
130 | | // note: using uint32_t type for compatibility reason |
131 | | std::array<uint32_t, LLAMA_MAX_LAYERS> swa_layers; |
132 | | |
133 | | // for State Space Models |
134 | | uint32_t ssm_d_conv = 0; |
135 | | uint32_t ssm_d_inner = 0; |
136 | | uint32_t ssm_d_state = 0; |
137 | | uint32_t ssm_dt_rank = 0; |
138 | | uint32_t ssm_n_group = 0; |
139 | | |
140 | | // for hybrid state space models |
141 | | std::array<bool, LLAMA_MAX_LAYERS> recurrent_layer_arr; |
142 | | |
143 | | bool ssm_dt_b_c_rms = false; |
144 | | |
145 | | float f_clamp_kqv = 0.0f; |
146 | | float f_max_alibi_bias = 0.0f; |
147 | | float f_logit_scale = 0.0f; |
148 | | |
149 | | // Additional scale factors (Granite/Granite MoE) |
150 | | float f_residual_scale = 0.0f; |
151 | | float f_embedding_scale = 0.0f; |
152 | | float f_attention_scale = 0.0f; |
153 | | |
154 | | // grok-2 |
155 | | float f_attn_out_scale = 0.0f; |
156 | | uint32_t attn_temp_length = 0; |
157 | | |
158 | | bool causal_attn = true; |
159 | | bool use_alibi = false; |
160 | | bool attn_soft_cap = false; |
161 | | bool use_kq_norm = false; |
162 | | |
163 | | // for Classifiers |
164 | | uint32_t n_cls_out = 1; |
165 | | |
166 | | // output embedding dimension (0 = use n_embd) |
167 | | uint32_t n_embd_out = 0; |
168 | | |
169 | | // llama4 smallthinker |
170 | | uint32_t n_moe_layer_step = 0; |
171 | | uint32_t n_no_rope_layer_step = 4; |
172 | | uint32_t n_attn_temp_floor_scale = 0; |
173 | | float f_attn_temp_scale = 0.0f; |
174 | | float f_attn_temp_offset = 0.0f; // offset position index |
175 | | |
176 | | // gemma3n altup |
177 | | uint32_t n_altup = 4; // altup_num_inputs |
178 | | uint32_t i_altup_act = 0; // altup_active_idx |
179 | | uint32_t laurel_rank = 64; |
180 | | uint32_t n_embd_altup = 256; |
181 | | |
182 | | // needed for sentence-transformers dense layers |
183 | | uint32_t dense_2_feat_in = 0; // in_features of the 2_Dense |
184 | | uint32_t dense_2_feat_out = 0; // out_features of the 2_Dense |
185 | | uint32_t dense_3_feat_in = 0; // in_features of the 3_Dense |
186 | | uint32_t dense_3_feat_out = 0; // out_features of the 3_Dense |
187 | | |
188 | | // xIELU |
189 | | std::array<float, LLAMA_MAX_LAYERS> xielu_alpha_n; |
190 | | std::array<float, LLAMA_MAX_LAYERS> xielu_alpha_p; |
191 | | std::array<float, LLAMA_MAX_LAYERS> xielu_beta; |
192 | | std::array<float, LLAMA_MAX_LAYERS> xielu_eps; |
193 | | |
194 | | // qwen3vl deepstack |
195 | | uint32_t n_deepstack_layers = 0; |
196 | | |
197 | | // needed by encoder-decoder models (e.g. T5, FLAN-T5) |
198 | | // ref: https://github.com/ggerganov/llama.cpp/pull/8141 |
199 | | llama_token dec_start_token_id = LLAMA_TOKEN_NULL; |
200 | | uint32_t dec_n_layer = 0; |
201 | | |
202 | | enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE; |
203 | | enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE; |
204 | | enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE; |
205 | | |
206 | | // this value n_pattern means that every nth layer is dense (i.e. non-SWA) |
207 | | // dense_first means whether the pattern is start with a dense layer |
208 | | // note that if n_pattern == 0, all layers are SWA |
209 | | // if n_pattern == 1, all layers are dense |
210 | | // example 1: n_pattern = 3, dense_first = false |
211 | | // il == 0: swa |
212 | | // il == 1: swa |
213 | | // il == 2: dense |
214 | | // il == 3: swa |
215 | | // il == 4: swa |
216 | | // il == 5: dense |
217 | | // il == 6: swa |
218 | | // etc ... |
219 | | // example 2: n_pattern = 2, dense_first = true |
220 | | // il == 0: dense |
221 | | // il == 1: swa |
222 | | // il == 2: dense |
223 | | // il == 3: swa |
224 | | // etc ... |
225 | | void set_swa_pattern(uint32_t n_pattern, bool dense_first = false); |
226 | | |
227 | | // return true if one of the layers is SWA |
228 | | bool is_swa_any() const; |
229 | | |
230 | | uint32_t n_head(uint32_t il = 0) const; |
231 | | |
232 | | uint32_t n_head_kv(uint32_t il = 0) const; |
233 | | |
234 | | uint32_t n_ff(uint32_t il = 0) const; |
235 | | |
236 | | uint32_t n_gqa(uint32_t il = 0) const; |
237 | | |
238 | | // dimension of main + auxiliary input embeddings |
239 | | uint32_t n_embd_inp() const; |
240 | | |
241 | | // dimension of output embeddings |
242 | | uint32_t get_n_embd_out() const; |
243 | | |
244 | | // dimension of key embeddings across all k-v heads |
245 | | uint32_t n_embd_k_gqa(uint32_t il = 0) const; |
246 | | |
247 | | // dimension of value embeddings across all k-v heads |
248 | | uint32_t n_embd_v_gqa(uint32_t il = 0) const; |
249 | | |
250 | | // true if any layer has a different n_embd_k_gqa/n_embd_v_gqa |
251 | | bool is_n_embd_k_gqa_variable() const; |
252 | | bool is_n_embd_v_gqa_variable() const; |
253 | | |
254 | | // return the maximum n_embd_k_gqa/n_embd_v_gqa across all layers |
255 | | uint32_t n_embd_k_gqa_max() const; |
256 | | uint32_t n_embd_v_gqa_max() const; |
257 | | |
258 | | // dimension of the rolling state embeddings |
259 | | // corresponds to Mamba's conv_states size or RWKV's token_shift states size |
260 | | uint32_t n_embd_r() const; |
261 | | |
262 | | // dimension of the recurrent state embeddings |
263 | | uint32_t n_embd_s() const; |
264 | | |
265 | | // whether or not the given layer is recurrent (for hybrid models) |
266 | | bool is_recurrent(uint32_t il) const; |
267 | | |
268 | | uint32_t n_pos_per_embd() const; |
269 | | |
270 | | bool is_swa(uint32_t il) const; |
271 | | |
272 | | bool has_kv(uint32_t il) const; |
273 | | |
274 | | // number of layers for which has_kv() returns true |
275 | | uint32_t n_layer_kv() const; |
276 | | |
277 | | // note that this function uses different SWA parameters from those in the hparams |
278 | | // note: inlined on purpose for performance reasons |
279 | | // TODO: think of a better place for this function |
280 | | // TODO: pack the SWA params in a struct? |
281 | 0 | static bool is_masked_swa(uint32_t n_swa, llama_swa_type swa_type, llama_pos p0, llama_pos p1) { |
282 | 0 | assert(p0 >= 0 && p1 >= 0); |
283 | |
|
284 | 0 | switch (swa_type) { |
285 | 0 | case LLAMA_SWA_TYPE_NONE: |
286 | 0 | { |
287 | 0 | } break; |
288 | 0 | case LLAMA_SWA_TYPE_STANDARD: |
289 | 0 | { |
290 | 0 | if (p1 - p0 >= (int32_t) n_swa) { |
291 | 0 | return true; |
292 | 0 | } |
293 | 0 | } break; |
294 | 0 | case LLAMA_SWA_TYPE_CHUNKED: |
295 | 0 | { |
296 | 0 | const llama_pos pos_chunk_start = (p1 / n_swa) * n_swa; |
297 | |
|
298 | 0 | if (p0 < pos_chunk_start) { |
299 | 0 | return true; |
300 | 0 | } |
301 | 0 | } break; |
302 | 0 | case LLAMA_SWA_TYPE_SYMMETRIC: |
303 | 0 | { |
304 | 0 | const int32_t half_n_swa = (int32_t) n_swa / 2; |
305 | 0 | const int32_t pos_diff = p1 - p0; |
306 | | |
307 | | // Mask if outside the symmetric window |
308 | 0 | if (pos_diff < -half_n_swa || pos_diff > half_n_swa) { |
309 | 0 | return true; |
310 | 0 | } |
311 | 0 | } break; |
312 | 0 | } |
313 | | |
314 | 0 | return false; |
315 | 0 | } |
316 | | |
317 | | |
318 | | bool use_mrope() const; |
319 | | }; |
320 | | |
321 | | static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable"); |