/src/llama.cpp/src/models/lfm2.cpp
Line | Count | Source |
1 | | #include "models.h" |
2 | | |
3 | | #include "../llama-memory-hybrid.h" |
4 | | |
5 | | |
6 | | llm_build_lfm2::llm_build_lfm2(const llama_model & model, const llm_graph_params & params) : |
7 | 0 | llm_graph_context(params), |
8 | 0 | model(model) { |
9 | 0 | ggml_tensor * cur = build_inp_embd(model.tok_embd); |
10 | 0 | cb(cur, "model.embed_tokens", -1); |
11 | |
|
12 | 0 | ggml_tensor * inp_pos = build_inp_pos(); |
13 | 0 | auto * inp_hybrid = build_inp_mem_hybrid(); |
14 | 0 | ggml_tensor * inp_out_ids = build_inp_out_ids(); |
15 | |
|
16 | 0 | for (int il = 0; il < n_layer; ++il) { |
17 | 0 | const bool is_moe_layer = il >= static_cast<int>(hparams.n_layer_dense_lead); |
18 | |
|
19 | 0 | auto * prev_cur = cur; |
20 | 0 | cur = build_norm(cur, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); |
21 | 0 | cb(cur, "model.layers.{}.operator_norm", il); |
22 | |
|
23 | 0 | cur = hparams.is_recurrent(il) ? build_shortconv_block(cur, inp_hybrid->get_recr(), il) : |
24 | 0 | build_attn_block(cur, inp_pos, inp_hybrid->get_attn(), il); |
25 | |
|
26 | 0 | if (il == n_layer - 1 && inp_out_ids) { |
27 | 0 | cur = ggml_get_rows(ctx0, cur, inp_out_ids); |
28 | 0 | prev_cur = ggml_get_rows(ctx0, prev_cur, inp_out_ids); |
29 | 0 | } |
30 | |
|
31 | 0 | cur = ggml_add(ctx0, prev_cur, cur); |
32 | |
|
33 | 0 | auto * ffn_norm_out = build_norm(cur, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); |
34 | 0 | cb(ffn_norm_out, "model.layers.{}.ffn_norm", il); |
35 | |
|
36 | 0 | ggml_tensor * ffn_out = |
37 | 0 | is_moe_layer ? build_moe_feed_forward(ffn_norm_out, il) : build_dense_feed_forward(ffn_norm_out, il); |
38 | 0 | cb(ffn_norm_out, "model.layers.{}.ffn_out", il); |
39 | |
|
40 | 0 | cur = ggml_add(ctx0, cur, ffn_out); |
41 | 0 | } |
42 | |
|
43 | 0 | cur = build_norm(cur, model.tok_norm, NULL, LLM_NORM_RMS, -1); |
44 | 0 | cb(cur, "model.embedding_norm", -1); |
45 | 0 | res->t_embd = cur; |
46 | |
|
47 | 0 | cur = build_lora_mm(model.output, cur); |
48 | 0 | cb(cur, "lm_head", -1); |
49 | |
|
50 | 0 | res->t_logits = cur; |
51 | |
|
52 | 0 | ggml_build_forward_expand(gf, cur); |
53 | 0 | } |
54 | | |
55 | 0 | ggml_tensor * llm_build_lfm2::build_moe_feed_forward(ggml_tensor * cur, int il) const { |
56 | 0 | return build_moe_ffn(cur, |
57 | 0 | model.layers[il].ffn_gate_inp, model.layers[il].ffn_up_exps, |
58 | 0 | model.layers[il].ffn_gate_exps, model.layers[il].ffn_down_exps, |
59 | 0 | model.layers[il].ffn_exp_probs_b, n_expert, n_expert_used, LLM_FFN_SILU, true, false, 0.0, |
60 | 0 | static_cast<llama_expert_gating_func_type>(hparams.expert_gating_func), il); |
61 | 0 | } |
62 | | |
63 | 0 | ggml_tensor * llm_build_lfm2::build_dense_feed_forward(ggml_tensor * cur, int il) const { |
64 | 0 | GGML_ASSERT(!model.layers[il].ffn_up_b); |
65 | 0 | GGML_ASSERT(!model.layers[il].ffn_gate_b); |
66 | 0 | GGML_ASSERT(!model.layers[il].ffn_down_b); |
67 | 0 | return build_ffn(cur, |
68 | 0 | model.layers[il].ffn_up, NULL, NULL, |
69 | 0 | model.layers[il].ffn_gate, NULL, NULL, |
70 | 0 | model.layers[il].ffn_down, NULL, NULL, |
71 | 0 | NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); |
72 | 0 | } |
73 | | |
74 | | ggml_tensor * llm_build_lfm2::build_attn_block(ggml_tensor * cur, |
75 | | ggml_tensor * inp_pos, |
76 | | llm_graph_input_attn_kv * inp_attn, |
77 | 0 | int il) const { |
78 | 0 | GGML_ASSERT(hparams.n_embd_v_gqa(il) == hparams.n_embd_k_gqa(il)); |
79 | 0 | const auto n_embd_head = hparams.n_embd_head_v; |
80 | 0 | const auto n_head_kv = hparams.n_head_kv(il); |
81 | |
|
82 | 0 | auto * q = build_lora_mm(model.layers[il].wq, cur); |
83 | 0 | cb(q, "model.layers.{}.self_attn.q_proj", il); |
84 | 0 | auto * k = build_lora_mm(model.layers[il].wk, cur); |
85 | 0 | cb(k, "model.layers.{}.self_attn.k_proj", il); |
86 | 0 | auto * v = build_lora_mm(model.layers[il].wv, cur); |
87 | 0 | cb(v, "model.layers.{}.self_attn.v_proj", il); |
88 | |
|
89 | 0 | q = ggml_reshape_3d(ctx0, q, n_embd_head, n_head, n_tokens); |
90 | 0 | k = ggml_reshape_3d(ctx0, k, n_embd_head, n_head_kv, n_tokens); |
91 | 0 | v = ggml_reshape_3d(ctx0, v, n_embd_head, n_head_kv, n_tokens); |
92 | | |
93 | | // qk norm |
94 | 0 | q = build_norm(q, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); |
95 | 0 | cb(q, "model.layers.{}.self_attn.q_layernorm", il); |
96 | 0 | k = build_norm(k, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); |
97 | 0 | cb(k, "model.layers.{}.self_attn.k_layernorm", il); |
98 | | |
99 | | // RoPE |
100 | 0 | q = ggml_rope_ext(ctx0, q, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, |
101 | 0 | attn_factor, beta_fast, beta_slow); |
102 | 0 | k = ggml_rope_ext(ctx0, k, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, |
103 | 0 | attn_factor, beta_fast, beta_slow); |
104 | |
|
105 | 0 | cur = build_attn(inp_attn, |
106 | 0 | model.layers[il].wo, NULL, |
107 | 0 | q, k, v, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il); |
108 | |
|
109 | 0 | cb(cur, "model.layers.{}.self_attn.out_proj", il); |
110 | |
|
111 | 0 | return cur; |
112 | 0 | } |
113 | | |
114 | 0 | ggml_tensor * llm_build_lfm2::build_shortconv_block(ggml_tensor * cur, llm_graph_input_rs * inp_recr, int il) { |
115 | 0 | const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx)->get_recr(); |
116 | 0 | const uint32_t kv_head = mctx_cur->get_head(); |
117 | 0 | const int64_t n_seq_tokens = ubatch.n_seq_tokens; |
118 | 0 | const int64_t n_seqs = ubatch.n_seqs; |
119 | 0 | GGML_ASSERT(n_seqs != 0); |
120 | 0 | GGML_ASSERT(ubatch.equal_seqs()); |
121 | 0 | GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); |
122 | |
|
123 | 0 | GGML_ASSERT(hparams.n_shortconv_l_cache > 1); |
124 | 0 | const uint32_t d_conv = hparams.n_shortconv_l_cache - 1; |
125 | | |
126 | | // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs} |
127 | 0 | cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], n_seq_tokens, n_seqs); |
128 | |
|
129 | 0 | auto * bcx = build_lora_mm(model.layers[il].shortconv.in_proj, cur); |
130 | 0 | cb(bcx, "model.layers.{}.conv.in_proj", il); |
131 | |
|
132 | 0 | constexpr auto n_chunks = 3; |
133 | 0 | GGML_ASSERT(bcx->ne[0] % n_chunks == 0); |
134 | 0 | const auto chunk_size = bcx->ne[0] / n_chunks; |
135 | 0 | auto * b = ggml_view_3d(ctx0, bcx, chunk_size, bcx->ne[1], bcx->ne[2], bcx->nb[1], bcx->nb[2], |
136 | 0 | 0 * chunk_size * ggml_element_size(bcx)); |
137 | 0 | auto * c = ggml_view_3d(ctx0, bcx, chunk_size, bcx->ne[1], bcx->ne[2], bcx->nb[1], bcx->nb[2], |
138 | 0 | 1 * chunk_size * ggml_element_size(bcx)); |
139 | 0 | auto * x = ggml_view_3d(ctx0, bcx, chunk_size, bcx->ne[1], bcx->ne[2], bcx->nb[1], bcx->nb[2], |
140 | 0 | 2 * chunk_size * ggml_element_size(bcx)); |
141 | |
|
142 | 0 | auto * bx = ggml_transpose(ctx0, ggml_mul(ctx0, b, x)); |
143 | | |
144 | | // read conv state |
145 | 0 | auto * conv_state = mctx_cur->get_r_l(il); |
146 | 0 | auto * conv_rs = build_rs(inp_recr, conv_state, hparams.n_embd_r(), n_seqs); |
147 | 0 | auto * conv = ggml_reshape_3d(ctx0, conv_rs, d_conv, hparams.n_embd, n_seqs); |
148 | |
|
149 | 0 | bx = ggml_concat(ctx0, conv, bx, 0); |
150 | 0 | GGML_ASSERT(bx->ne[0] > conv->ne[0]); |
151 | | |
152 | | // last d_conv columns is a new conv state |
153 | 0 | auto * new_conv = ggml_view_3d(ctx0, bx, conv->ne[0], bx->ne[1], bx->ne[2], bx->nb[1], bx->nb[2], |
154 | 0 | (bx->ne[0] - conv->ne[0]) * ggml_element_size(bx)); |
155 | 0 | GGML_ASSERT(ggml_are_same_shape(conv, new_conv)); |
156 | | |
157 | | // write new conv conv state |
158 | 0 | ggml_build_forward_expand(gf, ggml_cpy(ctx0, new_conv, |
159 | 0 | ggml_view_1d(ctx0, conv_state, ggml_nelements(new_conv), |
160 | 0 | kv_head * d_conv * n_embd * ggml_element_size(new_conv)))); |
161 | |
|
162 | 0 | auto * conv_kernel = model.layers[il].shortconv.conv; |
163 | 0 | auto * conv_out = ggml_ssm_conv(ctx0, bx, conv_kernel); |
164 | 0 | cb(conv_out, "model.layers.{}.conv.conv", il); |
165 | |
|
166 | 0 | auto * y = ggml_mul(ctx0, c, conv_out); |
167 | 0 | y = build_lora_mm(model.layers[il].shortconv.out_proj, y); |
168 | 0 | cb(y, "model.layers.{}.conv.out_proj", il); |
169 | | // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens} |
170 | 0 | y = ggml_reshape_2d(ctx0, y, y->ne[0], n_seq_tokens * n_seqs); |
171 | |
|
172 | 0 | return y; |
173 | 0 | } |