/src/llama.cpp/src/models/deepseek2.cpp
Line | Count | Source |
1 | | #include "models.h" |
2 | | |
3 | | llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_graph_params & params) : |
4 | 0 | llm_graph_context(params) { |
5 | | // lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B |
6 | 0 | bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26); |
7 | |
|
8 | 0 | const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0); |
9 | | |
10 | | // note: these are the actual head sizes you get when treating as MHA or after "decompression" using wv_b for MLA |
11 | 0 | const int64_t n_embd_head_k = is_mla ? hparams.n_embd_head_k_mla : hparams.n_embd_head_k; |
12 | 0 | const int64_t n_embd_head_v = is_mla ? hparams.n_embd_head_v_mla : hparams.n_embd_head_v; |
13 | |
|
14 | 0 | const int64_t n_embd_head_qk_rope = hparams.n_rot; |
15 | 0 | const int64_t n_embd_head_qk_nope = n_embd_head_k - n_embd_head_qk_rope; |
16 | |
|
17 | 0 | const uint32_t kv_lora_rank = hparams.n_lora_kv; |
18 | | |
19 | | // We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly. |
20 | | // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. |
21 | | // And also: https://github.com/ggml-org/llama.cpp/pull/17945 [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX] |
22 | | |
23 | | // first cancel the adjustment from llama_hparams::yarn_attn_factor_adjust to get the original attn_factor |
24 | 0 | GGML_ASSERT(ext_factor >= 0.0f); |
25 | 0 | const float attn_factor_org = attn_factor * (1.0f + 0.1f * logf(1.0f / freq_scale)); |
26 | | |
27 | | // use the original attn_factor to pre-scale the kq_scale |
28 | 0 | const float mscale = attn_factor_org * (1.0f + 0.1f * hparams.rope_yarn_log_mul * logf(1.0f / freq_scale)); |
29 | 0 | const float kq_scale = 1.0f * mscale * mscale / sqrtf(float(n_embd_head_k)); |
30 | |
|
31 | 0 | ggml_tensor * cur; |
32 | 0 | ggml_tensor * inpL; |
33 | | |
34 | | // {n_embd, n_tokens} |
35 | 0 | inpL = build_inp_embd(model.tok_embd); |
36 | | |
37 | | // (optional) temperature tuning - used by mistral-large |
38 | 0 | ggml_tensor * inp_attn_scale = nullptr; |
39 | 0 | if (hparams.f_attn_temp_scale != 0.0f) { |
40 | 0 | inp_attn_scale = build_inp_attn_scale(); |
41 | 0 | } |
42 | | |
43 | | // inp_pos - contains the positions |
44 | 0 | ggml_tensor * inp_pos = build_inp_pos(); |
45 | |
|
46 | 0 | auto * inp_attn = build_attn_inp_kv(); |
47 | |
|
48 | 0 | ggml_tensor * inp_out_ids = build_inp_out_ids(); |
49 | |
|
50 | 0 | for (int il = 0; il < n_layer; ++il) { |
51 | 0 | ggml_tensor * inpSA = inpL; |
52 | | |
53 | | // norm |
54 | 0 | cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); |
55 | 0 | cb(cur, "attn_norm", il); |
56 | | |
57 | | // self_attention |
58 | 0 | { |
59 | 0 | ggml_tensor * q = NULL; |
60 | 0 | if (!is_lite) { |
61 | 0 | q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur); |
62 | 0 | cb(q, "q", il); |
63 | |
|
64 | 0 | q = build_norm(q, model.layers[il].attn_q_a_norm, nullptr, LLM_NORM_RMS, il); |
65 | 0 | cb(q, "q", il); |
66 | |
|
67 | 0 | q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q); |
68 | 0 | cb(q, "q", il); |
69 | 0 | } else { |
70 | 0 | q = ggml_mul_mat(ctx0, model.layers[il].wq, cur); |
71 | 0 | cb(q, "q", il); |
72 | 0 | } |
73 | | // split into {n_embd_head_qk_nope, n_head, n_tokens} |
74 | 0 | ggml_tensor * q_nope = |
75 | 0 | ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, ggml_row_size(q->type, n_embd_head_k), |
76 | 0 | ggml_row_size(q->type, n_embd_head_k) * n_head, 0); |
77 | 0 | cb(q_nope, "q_nope", il); |
78 | | |
79 | | // and {n_embd_head_qk_rope, n_head, n_tokens} |
80 | 0 | ggml_tensor * q_pe = ggml_view_3d( |
81 | 0 | ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, ggml_row_size(q->type, n_embd_head_k), |
82 | 0 | ggml_row_size(q->type, n_embd_head_k) * n_head, ggml_row_size(q->type, n_embd_head_qk_nope)); |
83 | 0 | cb(q_pe, "q_pe", il); |
84 | |
|
85 | 0 | ggml_tensor * kv_cmpr_pe = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur); |
86 | 0 | cb(kv_cmpr_pe, "kv_cmpr_pe", il); |
87 | | |
88 | | // split into {kv_lora_rank, n_tokens} |
89 | 0 | ggml_tensor * kv_cmpr = |
90 | 0 | ggml_view_2d(ctx0, kv_cmpr_pe, kv_lora_rank, n_tokens, |
91 | 0 | ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope), 0); |
92 | 0 | cb(kv_cmpr, "kv_cmpr", il); |
93 | | |
94 | | // and {n_embd_head_qk_rope, 1, n_tokens} |
95 | 0 | ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_cmpr_pe, n_embd_head_qk_rope, 1, n_tokens, |
96 | 0 | ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope), |
97 | 0 | ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope), |
98 | 0 | ggml_row_size(kv_cmpr_pe->type, kv_lora_rank)); |
99 | 0 | cb(k_pe, "k_pe", il); |
100 | |
|
101 | 0 | q_pe = ggml_rope_ext(ctx0, q_pe, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, |
102 | 0 | ext_factor, attn_factor, beta_fast, beta_slow); |
103 | 0 | cb(q_pe, "q_pe", il); |
104 | |
|
105 | 0 | k_pe = ggml_rope_ext(ctx0, k_pe, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, |
106 | 0 | ext_factor, attn_factor, beta_fast, beta_slow); |
107 | 0 | cb(k_pe, "k_pe", il); |
108 | |
|
109 | 0 | kv_cmpr = build_norm(kv_cmpr, model.layers[il].attn_kv_a_norm, nullptr, LLM_NORM_RMS, il); |
110 | 0 | cb(kv_cmpr, "kv_cmpr", il); |
111 | |
|
112 | 0 | if (is_mla) { |
113 | | // {n_embd_head_qk_nope, n_tokens, n_head} |
114 | 0 | q_nope = ggml_permute(ctx0, q_nope, 0, 2, 1, 3); |
115 | 0 | cb(q_nope, "q_nope_perm", il); |
116 | | |
117 | | // {n_embd_head_qk_nope, kv_lora_rank, n_head} x {n_embd_head_qk_nope, n_tokens, n_head} |
118 | 0 | ggml_tensor * q_nope_absorbed = ggml_mul_mat(ctx0, model.layers[il].wk_b, q_nope); |
119 | 0 | cb(q_nope_absorbed, "q_nope_absorbed", il); |
120 | | |
121 | | // {kv_lora_rank, n_head, n_tokens} |
122 | 0 | q_nope_absorbed = ggml_permute(ctx0, q_nope_absorbed, 0, 2, 1, 3); |
123 | 0 | cb(q_nope_absorbed, "q_nope_absorbed_perm", il); |
124 | | |
125 | | // {n_embd_head_qk_rope + kv_lora_rank, n_head, n_tokens} |
126 | | // note: rope must go first for in-place context shifting in build_rope_shift() |
127 | 0 | ggml_tensor * Qcur = ggml_concat(ctx0, q_pe, q_nope_absorbed, 0); |
128 | 0 | cb(Qcur, "Qcur", il); |
129 | |
|
130 | 0 | kv_cmpr = ggml_reshape_3d(ctx0, kv_cmpr, kv_lora_rank, 1, n_tokens); |
131 | 0 | cb(kv_cmpr, "kv_cmpr_reshape", il); |
132 | | |
133 | | // {n_embd_head_qk_rope + kv_lora_rank, 1, n_tokens} |
134 | 0 | ggml_tensor * Kcur = ggml_concat(ctx0, k_pe, kv_cmpr, 0); |
135 | 0 | cb(Kcur, "Kcur", il); |
136 | | |
137 | | // {kv_lora_rank, 1, n_tokens} |
138 | 0 | ggml_tensor * Vcur = kv_cmpr; |
139 | 0 | cb(Vcur, "Vcur", il); |
140 | |
|
141 | 0 | if (inp_attn_scale) { |
142 | | // apply llama 4 temperature scaling |
143 | 0 | Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale); |
144 | 0 | cb(Qcur, "Qcur_attn_temp_scaled", il); |
145 | 0 | } |
146 | | |
147 | | // note: MLA with the absorption optimzation converts into MQA (ie: GQA with 1 group) |
148 | 0 | cur = build_attn(inp_attn, |
149 | 0 | model.layers[il].wo, NULL, |
150 | 0 | Qcur, Kcur, Vcur, nullptr, nullptr, model.layers[il].wv_b, kq_scale, il); |
151 | 0 | } else { |
152 | 0 | ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_cmpr); |
153 | 0 | cb(kv, "kv", il); |
154 | | |
155 | | // split into {n_embd_head_qk_nope, n_head, n_tokens} |
156 | 0 | ggml_tensor * k_nope = |
157 | 0 | ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens, |
158 | 0 | ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v), |
159 | 0 | ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v) * n_head, 0); |
160 | 0 | cb(k_nope, "k_nope_view", il); |
161 | | |
162 | | // and {n_embd_head_v, n_head, n_tokens} |
163 | 0 | ggml_tensor * Vcur = ggml_view_3d(ctx0, kv, n_embd_head_v, n_head, n_tokens, |
164 | 0 | ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v), |
165 | 0 | ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v) * n_head, |
166 | 0 | ggml_row_size(kv->type, n_embd_head_qk_nope)); |
167 | 0 | cb(Vcur, "Vcur_view", il); |
168 | |
|
169 | 0 | Vcur = ggml_cont(ctx0, Vcur); |
170 | 0 | cb(Vcur, "Vcur_cont", il); |
171 | | |
172 | | // note: rope must go first for in-place context shifting in build_rope_shift() |
173 | 0 | ggml_tensor * Qcur = ggml_concat(ctx0, q_pe, q_nope, 0); |
174 | 0 | cb(Qcur, "Qcur", il); |
175 | |
|
176 | 0 | ggml_tensor * Kcur = ggml_concat(ctx0, ggml_repeat(ctx0, k_pe, q_pe), k_nope, 0); |
177 | 0 | cb(Kcur, "Kcur", il); |
178 | |
|
179 | 0 | if (inp_attn_scale) { |
180 | | // apply llama 4 temperature scaling |
181 | 0 | Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale); |
182 | 0 | cb(Qcur, "Qcur_attn_temp_scaled", il); |
183 | 0 | } |
184 | | |
185 | | // note: MLA without the absorption optimization converts into MHA (ie: GQA with full n_head groups) |
186 | 0 | cur = build_attn(inp_attn, |
187 | 0 | model.layers[il].wo, NULL, |
188 | 0 | Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il); |
189 | 0 | } |
190 | 0 | } |
191 | 0 | if (il == n_layer - 1 && inp_out_ids) { |
192 | 0 | cur = ggml_get_rows(ctx0, cur, inp_out_ids); |
193 | 0 | inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); |
194 | 0 | } |
195 | 0 | ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); |
196 | 0 | cb(ffn_inp, "ffn_inp", il); |
197 | |
|
198 | 0 | cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); |
199 | 0 | cb(cur, "ffn_norm", il); |
200 | |
|
201 | 0 | if ((uint32_t) il < hparams.n_layer_dense_lead) { |
202 | 0 | cur = build_ffn(cur, |
203 | 0 | model.layers[il].ffn_up, NULL, NULL, |
204 | 0 | model.layers[il].ffn_gate, NULL, NULL, |
205 | 0 | model.layers[il].ffn_down, NULL, NULL, |
206 | 0 | NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); |
207 | 0 | cb(cur, "ffn_out", il); |
208 | 0 | } else { |
209 | | // MoE branch |
210 | 0 | ggml_tensor * moe_out = build_moe_ffn(cur, |
211 | 0 | model.layers[il].ffn_gate_inp, |
212 | 0 | model.layers[il].ffn_up_exps, |
213 | 0 | model.layers[il].ffn_gate_exps, |
214 | 0 | model.layers[il].ffn_down_exps, |
215 | 0 | model.layers[il].ffn_exp_probs_b, |
216 | 0 | n_expert, n_expert_used, |
217 | 0 | LLM_FFN_SILU, hparams.expert_weights_norm, |
218 | 0 | true, hparams.expert_weights_scale, |
219 | 0 | (llama_expert_gating_func_type) hparams.expert_gating_func, |
220 | 0 | il); |
221 | 0 | cb(moe_out, "ffn_moe_out", il); |
222 | | |
223 | | // FFN shared expert |
224 | 0 | { |
225 | 0 | ggml_tensor * ffn_shexp = |
226 | 0 | build_ffn(cur, |
227 | 0 | model.layers[il].ffn_up_shexp, NULL, NULL, |
228 | 0 | model.layers[il].ffn_gate_shexp, NULL, NULL, |
229 | 0 | model.layers[il].ffn_down_shexp, NULL, NULL, |
230 | 0 | NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); |
231 | 0 | cb(ffn_shexp, "ffn_shexp", il); |
232 | |
|
233 | 0 | cur = ggml_add(ctx0, moe_out, ffn_shexp); |
234 | 0 | cb(cur, "ffn_out", il); |
235 | 0 | } |
236 | 0 | } |
237 | 0 | cur = ggml_add(ctx0, cur, ffn_inp); |
238 | |
|
239 | 0 | cur = build_cvec(cur, il); |
240 | 0 | cb(cur, "l_out", il); |
241 | | |
242 | | // input for next layer |
243 | 0 | inpL = cur; |
244 | 0 | } |
245 | 0 | cur = inpL; |
246 | |
|
247 | 0 | cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1); |
248 | |
|
249 | 0 | cb(cur, "result_norm", -1); |
250 | 0 | res->t_embd = cur; |
251 | | |
252 | | // lm_head |
253 | 0 | cur = ggml_mul_mat(ctx0, model.output, cur); |
254 | |
|
255 | 0 | cb(cur, "result_output", -1); |
256 | 0 | res->t_logits = cur; |
257 | |
|
258 | 0 | ggml_build_forward_expand(gf, cur); |
259 | 0 | } |