/src/llama.cpp/src/llama-hparams.cpp
Line | Count | Source |
1 | | #include "llama-hparams.h" |
2 | | |
3 | | #include "ggml.h" |
4 | | |
5 | | #include <algorithm> |
6 | | #include <cassert> |
7 | | |
8 | 0 | void llama_hparams::set_swa_pattern(uint32_t n_pattern, bool dense_first) { |
9 | 0 | if (dense_first) { |
10 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
11 | 0 | swa_layers[il] = n_pattern == 0 || (il % n_pattern != 0); |
12 | 0 | } |
13 | 0 | } else { |
14 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
15 | 0 | swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1)); |
16 | 0 | } |
17 | 0 | } |
18 | 0 | } |
19 | | |
20 | 0 | bool llama_hparams::is_swa_any() const { |
21 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
22 | 0 | if (swa_layers[il]) { |
23 | 0 | return true; |
24 | 0 | } |
25 | 0 | } |
26 | | |
27 | 0 | return false; |
28 | 0 | } |
29 | | |
30 | 0 | uint32_t llama_hparams::n_head(uint32_t il) const { |
31 | 0 | if (il < n_layer) { |
32 | 0 | return n_head_arr[il]; |
33 | 0 | } |
34 | | |
35 | 0 | GGML_ABORT("fatal error"); |
36 | 0 | } |
37 | | |
38 | 0 | uint32_t llama_hparams::n_head_kv(uint32_t il) const { |
39 | 0 | if (il < n_layer) { |
40 | 0 | return n_head_kv_arr[il]; |
41 | 0 | } |
42 | | |
43 | 0 | GGML_ABORT("fatal error"); |
44 | 0 | } |
45 | | |
46 | 0 | uint32_t llama_hparams::n_ff(uint32_t il) const { |
47 | 0 | if (il < n_layer) { |
48 | 0 | return n_ff_arr[il]; |
49 | 0 | } |
50 | | |
51 | 0 | GGML_ABORT("fatal error"); |
52 | 0 | } |
53 | | |
54 | 0 | uint32_t llama_hparams::n_gqa(uint32_t il) const { |
55 | 0 | const uint32_t n_head = this->n_head(il); |
56 | 0 | const uint32_t n_head_kv = this->n_head_kv(il); |
57 | |
|
58 | 0 | if (n_head_kv == 0) { |
59 | 0 | return 0; |
60 | 0 | } |
61 | | |
62 | 0 | return n_head/n_head_kv; |
63 | 0 | } |
64 | | |
65 | 0 | uint32_t llama_hparams::n_embd_inp() const { |
66 | 0 | uint32_t n_embd_inp = n_embd; |
67 | |
|
68 | 0 | if (n_deepstack_layers > 0) { |
69 | 0 | n_embd_inp += n_embd * n_deepstack_layers; |
70 | 0 | } |
71 | |
|
72 | 0 | return n_embd_inp; |
73 | 0 | } |
74 | | |
75 | 0 | uint32_t llama_hparams::get_n_embd_out() const { |
76 | 0 | return n_embd_out > 0 ? n_embd_out : n_embd; |
77 | 0 | } |
78 | | |
79 | 0 | uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const { |
80 | 0 | const uint32_t n_head_kv = this->n_head_kv(il); |
81 | |
|
82 | 0 | return n_embd_head_k * n_head_kv; |
83 | 0 | } |
84 | | |
85 | 0 | uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const { |
86 | 0 | const uint32_t n_head_kv = this->n_head_kv(il); |
87 | |
|
88 | 0 | return n_embd_head_v * n_head_kv; |
89 | 0 | } |
90 | | |
91 | 0 | bool llama_hparams::is_n_embd_k_gqa_variable() const { |
92 | 0 | const uint32_t val = n_embd_k_gqa(); |
93 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
94 | 0 | if (val != n_embd_k_gqa(il)) { |
95 | 0 | return true; |
96 | 0 | } |
97 | 0 | } |
98 | | |
99 | 0 | return false; |
100 | 0 | } |
101 | | |
102 | 0 | bool llama_hparams::is_n_embd_v_gqa_variable() const { |
103 | 0 | const uint32_t val = n_embd_v_gqa(); |
104 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
105 | 0 | if (val != n_embd_v_gqa(il)) { |
106 | 0 | return true; |
107 | 0 | } |
108 | 0 | } |
109 | | |
110 | 0 | return false; |
111 | 0 | } |
112 | | |
113 | 0 | uint32_t llama_hparams::n_embd_k_gqa_max() const { |
114 | 0 | uint32_t val = n_embd_k_gqa(); |
115 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
116 | 0 | val = std::max(val, n_embd_k_gqa(il)); |
117 | 0 | } |
118 | |
|
119 | 0 | return val; |
120 | 0 | } |
121 | | |
122 | 0 | uint32_t llama_hparams::n_embd_v_gqa_max() const { |
123 | 0 | uint32_t val = n_embd_v_gqa(); |
124 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
125 | 0 | val = std::max(val, n_embd_v_gqa(il)); |
126 | 0 | } |
127 | |
|
128 | 0 | return val; |
129 | 0 | } |
130 | | |
131 | 0 | uint32_t llama_hparams::n_embd_r() const { |
132 | 0 | if (wkv_head_size != 0) { |
133 | | // for RWKV models |
134 | 0 | return token_shift_count * n_embd; |
135 | 0 | } |
136 | | |
137 | 0 | if (n_shortconv_l_cache != 0) { |
138 | | // for LFM2 models |
139 | 0 | return n_embd * (n_shortconv_l_cache - 1); |
140 | 0 | } |
141 | | |
142 | | // TODO: maybe support other convolution strides than 1 |
143 | | // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed |
144 | | // Corresponds to Mamba's conv_states size |
145 | 0 | return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * (ssm_d_inner + 2*ssm_n_group*ssm_d_state); |
146 | 0 | } |
147 | | |
148 | 0 | uint32_t llama_hparams::n_embd_s() const { |
149 | 0 | if (wkv_head_size != 0) { |
150 | | // corresponds to RWKV's wkv_states size |
151 | 0 | return n_embd * wkv_head_size; |
152 | 0 | } |
153 | | |
154 | | // corresponds to Mamba's ssm_states size |
155 | 0 | return ssm_d_state * ssm_d_inner; |
156 | 0 | } |
157 | | |
158 | 0 | bool llama_hparams::is_recurrent(uint32_t il) const { |
159 | 0 | if (il < n_layer) { |
160 | 0 | return recurrent_layer_arr[il]; |
161 | 0 | } |
162 | | |
163 | 0 | GGML_ABORT("%s: il (%u) out of bounds (n_layer: %u)\n", __func__, il, n_layer); |
164 | 0 | } |
165 | | |
166 | 0 | uint32_t llama_hparams::n_pos_per_embd() const { |
167 | 0 | return rope_type == LLAMA_ROPE_TYPE_MROPE || rope_type == LLAMA_ROPE_TYPE_IMROPE ? 4 : 1; |
168 | 0 | } |
169 | | |
170 | 0 | bool llama_hparams::is_swa(uint32_t il) const { |
171 | 0 | if (il < n_layer) { |
172 | 0 | return swa_layers[il]; |
173 | 0 | } |
174 | | |
175 | 0 | GGML_ABORT("fatal error"); |
176 | 0 | } |
177 | | |
178 | 0 | bool llama_hparams::has_kv(uint32_t il) const { |
179 | 0 | if (n_layer_kv_from_start >= 0) { |
180 | 0 | if (il < (uint32_t) n_layer_kv_from_start) { |
181 | 0 | return true; |
182 | 0 | } |
183 | | |
184 | 0 | return false; |
185 | 0 | } |
186 | | |
187 | | // by default, all layers have kv |
188 | 0 | return true; |
189 | 0 | } |
190 | | |
191 | 0 | uint32_t llama_hparams::n_layer_kv() const { |
192 | 0 | uint32_t res = 0; |
193 | |
|
194 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
195 | 0 | if (has_kv(il)) { |
196 | 0 | res++; |
197 | 0 | } |
198 | 0 | } |
199 | |
|
200 | 0 | return res; |
201 | 0 | } |
202 | | |
203 | 0 | bool llama_hparams::is_masked_swa(uint32_t n_swa, llama_swa_type swa_type, llama_pos p0, llama_pos p1) { |
204 | 0 | assert(p0 >= 0 && p1 >= 0); |
205 | |
|
206 | 0 | switch (swa_type) { |
207 | 0 | case LLAMA_SWA_TYPE_NONE: |
208 | 0 | { |
209 | 0 | } break; |
210 | 0 | case LLAMA_SWA_TYPE_STANDARD: |
211 | 0 | { |
212 | 0 | if (p1 - p0 >= (int32_t) n_swa) { |
213 | 0 | return true; |
214 | 0 | } |
215 | 0 | } break; |
216 | 0 | case LLAMA_SWA_TYPE_CHUNKED: |
217 | 0 | { |
218 | 0 | const llama_pos pos_chunk_start = (p1 / n_swa) * n_swa; |
219 | |
|
220 | 0 | if (p0 < pos_chunk_start) { |
221 | 0 | return true; |
222 | 0 | } |
223 | 0 | } break; |
224 | 0 | case LLAMA_SWA_TYPE_SYMMETRIC: |
225 | 0 | { |
226 | 0 | const int32_t half_n_swa = (int32_t) n_swa / 2; |
227 | 0 | const int32_t pos_diff = p1 - p0; |
228 | | |
229 | | // Mask if outside the symmetric window |
230 | 0 | if (pos_diff < -half_n_swa || pos_diff > half_n_swa) { |
231 | 0 | return true; |
232 | 0 | } |
233 | 0 | } break; |
234 | 0 | } |
235 | | |
236 | 0 | return false; |
237 | 0 | } |
238 | | |
239 | 0 | bool llama_hparams::use_mrope() const { |
240 | 0 | return rope_sections[0] > 0 && rope_sections[1] > 0; |
241 | 0 | } |