/src/llama.cpp/src/models/exaone-moe.cpp
Line | Count | Source |
1 | | #include "models.h" |
2 | | |
3 | | |
4 | | llm_build_exaone_moe::llm_build_exaone_moe(const llama_model & model, const llm_graph_params & params) : |
5 | 0 | llm_graph_context(params) { |
6 | 0 | const int64_t n_embd_head = hparams.n_embd_head_k; |
7 | |
|
8 | 0 | GGML_ASSERT(n_embd_head == hparams.n_embd_head_v); |
9 | 0 | GGML_ASSERT(n_embd_head == hparams.n_rot); |
10 | |
|
11 | 0 | ggml_tensor * cur; |
12 | 0 | ggml_tensor * inpL; |
13 | |
|
14 | 0 | inpL = build_inp_embd(model.tok_embd); |
15 | | |
16 | | // inp_pos - contains the positions |
17 | 0 | ggml_tensor * inp_pos = build_inp_pos(); |
18 | |
|
19 | 0 | auto * inp_attn_iswa = build_attn_inp_kv_iswa(); |
20 | |
|
21 | 0 | ggml_tensor * inp_out_ids = build_inp_out_ids(); |
22 | |
|
23 | 0 | const int n_transformer_layers = n_layer - hparams.nextn_predict_layers; |
24 | 0 | for (int il = 0; il < n_transformer_layers; ++il) { |
25 | 0 | ggml_tensor * inpSA = inpL; |
26 | | |
27 | | // use RoPE for SWA layers |
28 | 0 | const bool is_local_layer = hparams.is_swa(il); |
29 | | |
30 | | // norm |
31 | 0 | cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); |
32 | 0 | cb(cur, "attn_norm", il); |
33 | | |
34 | | // self-attention |
35 | 0 | { |
36 | 0 | ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); |
37 | | |
38 | | // compute Q and K and RoPE them |
39 | 0 | ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); |
40 | 0 | cb(Qcur, "Qcur", il); |
41 | |
|
42 | 0 | ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); |
43 | 0 | cb(Kcur, "Kcur", il); |
44 | |
|
45 | 0 | ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); |
46 | 0 | cb(Vcur, "Vcur", il); |
47 | |
|
48 | 0 | Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); |
49 | 0 | Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); |
50 | 0 | Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); |
51 | |
|
52 | 0 | Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); |
53 | 0 | Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); |
54 | 0 | cb(Qcur, "Qcur_normed", il); |
55 | 0 | cb(Kcur, "Kcur_normed", il); |
56 | |
|
57 | 0 | if (is_local_layer) { |
58 | 0 | Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, |
59 | 0 | freq_scale, ext_factor, attn_factor, beta_fast, beta_slow); |
60 | |
|
61 | 0 | Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, |
62 | 0 | freq_scale, ext_factor, attn_factor, beta_fast, beta_slow); |
63 | 0 | } |
64 | 0 | cb(Qcur, "Qcur", il); |
65 | 0 | cb(Kcur, "Kcur", il); |
66 | 0 | cb(Vcur, "Vcur", il); |
67 | |
|
68 | 0 | cur = build_attn(inp_attn_iswa, |
69 | 0 | model.layers[il].wo, NULL, |
70 | 0 | Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il); |
71 | 0 | cb(cur, "attn_out", il); |
72 | 0 | } |
73 | 0 | if (il == n_transformer_layers - 1 && inp_out_ids) { |
74 | 0 | cur = ggml_get_rows(ctx0, cur, inp_out_ids); |
75 | 0 | inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); |
76 | 0 | } |
77 | 0 | ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); |
78 | 0 | cb(ffn_inp, "ffn_inp", il); |
79 | | |
80 | | // norm |
81 | 0 | cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); |
82 | 0 | cb(cur, "ffn_norm", il); |
83 | | |
84 | | // feed-forward network |
85 | 0 | if (model.layers[il].ffn_gate_inp == nullptr) { |
86 | | // dense branch |
87 | 0 | cur = build_ffn(cur, |
88 | 0 | model.layers[il].ffn_up, NULL, NULL, |
89 | 0 | model.layers[il].ffn_gate, NULL, NULL, |
90 | 0 | model.layers[il].ffn_down, NULL, NULL, NULL, |
91 | 0 | LLM_FFN_SILU, LLM_FFN_PAR, il); |
92 | 0 | cb(cur, "ffn_out", il); |
93 | 0 | } else { |
94 | | // MoE branch |
95 | 0 | ggml_tensor * moe_out = build_moe_ffn(cur, |
96 | 0 | model.layers[il].ffn_gate_inp, |
97 | 0 | model.layers[il].ffn_up_exps, |
98 | 0 | model.layers[il].ffn_gate_exps, |
99 | 0 | model.layers[il].ffn_down_exps, |
100 | 0 | model.layers[il].ffn_exp_probs_b, |
101 | 0 | n_expert, n_expert_used, |
102 | 0 | LLM_FFN_SILU, hparams.expert_weights_norm, |
103 | 0 | true, hparams.expert_weights_scale, |
104 | 0 | (llama_expert_gating_func_type) hparams.expert_gating_func, |
105 | 0 | il); |
106 | 0 | cb(moe_out, "ffn_moe_out", il); |
107 | | |
108 | | // FFN shared expert |
109 | 0 | { |
110 | 0 | ggml_tensor * ffn_shexp = |
111 | 0 | build_ffn(cur, |
112 | 0 | model.layers[il].ffn_up_shexp, NULL, NULL, |
113 | 0 | model.layers[il].ffn_gate_shexp, NULL, NULL, |
114 | 0 | model.layers[il].ffn_down_shexp, NULL, NULL, |
115 | 0 | NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); |
116 | 0 | cb(ffn_shexp, "ffn_shexp", il); |
117 | |
|
118 | 0 | cur = ggml_add(ctx0, moe_out, ffn_shexp); |
119 | 0 | cb(cur, "ffn_out", il); |
120 | 0 | } |
121 | 0 | } |
122 | |
|
123 | 0 | cur = ggml_add(ctx0, cur, ffn_inp); |
124 | |
|
125 | 0 | cur = build_cvec(cur, il); |
126 | 0 | cb(cur, "l_out", il); |
127 | | |
128 | | // input for next layer |
129 | 0 | inpL = cur; |
130 | 0 | } |
131 | 0 | cur = inpL; |
132 | | |
133 | | // final norm |
134 | 0 | cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1); |
135 | |
|
136 | 0 | cb(cur, "result_norm", -1); |
137 | 0 | res->t_embd = cur; |
138 | | |
139 | | // lm_head |
140 | 0 | cur = build_lora_mm(model.output, cur); |
141 | |
|
142 | 0 | cb(cur, "result_output", -1); |
143 | 0 | res->t_logits = cur; |
144 | |
|
145 | 0 | ggml_build_forward_expand(gf, cur); |
146 | 0 | } |