/src/llama.cpp/src/models/mistral3.cpp
Line | Count | Source |
1 | | #include "models.h" |
2 | | |
3 | 0 | llm_build_mistral3::llm_build_mistral3(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { |
4 | 0 | const int64_t n_embd_head = hparams.n_embd_head_v; |
5 | |
|
6 | 0 | GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); |
7 | 0 | GGML_ASSERT(n_embd_head == hparams.n_rot); |
8 | |
|
9 | 0 | ggml_tensor * cur; |
10 | 0 | ggml_tensor * inpL; |
11 | |
|
12 | 0 | inpL = build_inp_embd(model.tok_embd); |
13 | | |
14 | | // inp_pos - contains the positions |
15 | 0 | ggml_tensor * inp_pos = build_inp_pos(); |
16 | | |
17 | | // (optional) temperature tuning |
18 | 0 | ggml_tensor * inp_attn_scale = nullptr; |
19 | 0 | if (hparams.f_attn_temp_scale != 0.0f) { |
20 | 0 | inp_attn_scale = build_inp_attn_scale(); |
21 | 0 | } |
22 | |
|
23 | 0 | auto * inp_attn = build_attn_inp_kv(); |
24 | |
|
25 | 0 | const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; |
26 | |
|
27 | 0 | ggml_tensor * inp_out_ids = build_inp_out_ids(); |
28 | |
|
29 | 0 | for (int il = 0; il < n_layer; ++il) { |
30 | 0 | ggml_tensor * inpSA = inpL; |
31 | | |
32 | | // norm |
33 | 0 | cur = build_norm(inpL, |
34 | 0 | model.layers[il].attn_norm, NULL, |
35 | 0 | LLM_NORM_RMS, il); |
36 | 0 | cb(cur, "attn_norm", il); |
37 | | |
38 | | // self-attention |
39 | 0 | { |
40 | | // rope freq factors for llama3; may return nullptr for llama2 and other models |
41 | 0 | ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); |
42 | | |
43 | | // compute Q and K and RoPE them |
44 | 0 | ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); |
45 | 0 | cb(Qcur, "Qcur", il); |
46 | 0 | if (model.layers[il].bq) { |
47 | 0 | Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); |
48 | 0 | cb(Qcur, "Qcur", il); |
49 | 0 | } |
50 | 0 | ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); |
51 | 0 | cb(Kcur, "Kcur", il); |
52 | 0 | if (model.layers[il].bk) { |
53 | 0 | Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); |
54 | 0 | cb(Kcur, "Kcur", il); |
55 | 0 | } |
56 | 0 | ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); |
57 | 0 | cb(Vcur, "Vcur", il); |
58 | 0 | if (model.layers[il].bv) { |
59 | 0 | Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); |
60 | 0 | cb(Vcur, "Vcur", il); |
61 | 0 | } |
62 | 0 | Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); |
63 | 0 | Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); |
64 | 0 | Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); |
65 | |
|
66 | 0 | Qcur = ggml_rope_ext( |
67 | 0 | ctx0, Qcur, inp_pos, rope_factors, |
68 | 0 | n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, |
69 | 0 | ext_factor, attn_factor, beta_fast, beta_slow |
70 | 0 | ); |
71 | |
|
72 | 0 | Kcur = ggml_rope_ext( |
73 | 0 | ctx0, Kcur, inp_pos, rope_factors, |
74 | 0 | n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, |
75 | 0 | ext_factor, attn_factor, beta_fast, beta_slow |
76 | 0 | ); |
77 | |
|
78 | 0 | cb(Qcur, "Qcur", il); |
79 | 0 | cb(Kcur, "Kcur", il); |
80 | 0 | cb(Vcur, "Vcur", il); |
81 | |
|
82 | 0 | if (inp_attn_scale) { |
83 | | // apply llama 4 temperature scaling |
84 | 0 | Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale); |
85 | 0 | cb(Qcur, "Qcur_attn_temp_scaled", il); |
86 | 0 | } |
87 | |
|
88 | 0 | cur = build_attn(inp_attn, |
89 | 0 | model.layers[il].wo, model.layers[il].bo, |
90 | 0 | Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il); |
91 | 0 | cb(cur, "attn_out", il); |
92 | 0 | } |
93 | 0 | if (il == n_layer - 1 && inp_out_ids) { |
94 | 0 | cur = ggml_get_rows(ctx0, cur, inp_out_ids); |
95 | 0 | inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); |
96 | 0 | } |
97 | 0 | ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); |
98 | 0 | cb(ffn_inp, "ffn_inp", il); |
99 | | |
100 | | // feed-forward network (non-MoE) |
101 | 0 | if (model.layers[il].ffn_gate_inp == nullptr) { |
102 | |
|
103 | 0 | cur = build_norm(ffn_inp, |
104 | 0 | model.layers[il].ffn_norm, NULL, |
105 | 0 | LLM_NORM_RMS, il); |
106 | 0 | cb(cur, "ffn_norm", il); |
107 | |
|
108 | 0 | cur = build_ffn(cur, |
109 | 0 | model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, |
110 | 0 | model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, |
111 | 0 | model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, |
112 | 0 | NULL, |
113 | 0 | LLM_FFN_SILU, LLM_FFN_PAR, il); |
114 | 0 | cb(cur, "ffn_out", il); |
115 | 0 | } else { |
116 | | // MoE branch |
117 | 0 | cur = build_norm(ffn_inp, |
118 | 0 | model.layers[il].ffn_norm, NULL, |
119 | 0 | LLM_NORM_RMS, il); |
120 | 0 | cb(cur, "ffn_norm", il); |
121 | |
|
122 | 0 | cur = build_moe_ffn(cur, |
123 | 0 | model.layers[il].ffn_gate_inp, |
124 | 0 | model.layers[il].ffn_up_exps, |
125 | 0 | model.layers[il].ffn_gate_exps, |
126 | 0 | model.layers[il].ffn_down_exps, |
127 | 0 | nullptr, |
128 | 0 | n_expert, n_expert_used, |
129 | 0 | LLM_FFN_SILU, true, |
130 | 0 | false, 0.0, |
131 | 0 | LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, |
132 | 0 | il); |
133 | 0 | cb(cur, "ffn_moe_out", il); |
134 | 0 | } |
135 | 0 | cur = ggml_add(ctx0, cur, ffn_inp); |
136 | 0 | cb(cur, "ffn_out", il); |
137 | |
|
138 | 0 | cur = build_cvec(cur, il); |
139 | 0 | cb(cur, "l_out", il); |
140 | | |
141 | | // input for next layer |
142 | 0 | inpL = cur; |
143 | 0 | } |
144 | 0 | cur = inpL; |
145 | |
|
146 | 0 | cur = build_norm(cur, |
147 | 0 | model.output_norm, NULL, |
148 | 0 | LLM_NORM_RMS, -1); |
149 | |
|
150 | 0 | cb(cur, "result_norm", -1); |
151 | 0 | res->t_embd = cur; |
152 | | |
153 | | // lm_head |
154 | 0 | cur = build_lora_mm(model.output, cur); |
155 | |
|
156 | 0 | cb(cur, "result_output", -1); |
157 | 0 | res->t_logits = cur; |
158 | |
|
159 | 0 | ggml_build_forward_expand(gf, cur); |
160 | 0 | } |