Coverage Report

Created: 2026-03-21 06:50

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/models/exaone-moe.cpp
Line
Count
Source
1
#include "models.h"
2
3
llm_build_exaone_moe::llm_build_exaone_moe(const llama_model & model, const llm_graph_params & params) :
4
0
    llm_graph_context(params) {
5
0
    const int64_t n_embd_head = hparams.n_embd_head_k();
6
7
0
    GGML_ASSERT(n_embd_head == hparams.n_embd_head_v());
8
0
    GGML_ASSERT(n_embd_head == n_rot);
9
10
0
    ggml_tensor * cur;
11
0
    ggml_tensor * inpL;
12
13
0
    inpL = build_inp_embd(model.tok_embd);
14
15
    // inp_pos - contains the positions
16
0
    ggml_tensor * inp_pos = build_inp_pos();
17
18
0
    auto * inp_attn_iswa = build_attn_inp_kv_iswa();
19
20
0
    ggml_tensor * inp_out_ids = build_inp_out_ids();
21
22
0
    const int n_transformer_layers = n_layer - hparams.nextn_predict_layers;
23
0
    for (int il = 0; il < n_transformer_layers; ++il) {
24
0
        ggml_tensor * inpSA = inpL;
25
26
        // use RoPE for SWA layers
27
0
        const bool is_local_layer = hparams.is_swa(il);
28
29
        // norm
30
0
        cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
31
0
        cb(cur, "attn_norm", il);
32
33
        // self-attention
34
0
        {
35
0
            ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
36
37
            // compute Q and K and RoPE them
38
0
            ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
39
0
            cb(Qcur, "Qcur", il);
40
41
0
            ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
42
0
            cb(Kcur, "Kcur", il);
43
44
0
            ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
45
0
            cb(Vcur, "Vcur", il);
46
47
0
            Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
48
0
            Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
49
0
            Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
50
51
0
            Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
52
0
            Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
53
0
            cb(Qcur, "Qcur_normed", il);
54
0
            cb(Kcur, "Kcur_normed", il);
55
56
0
            if (is_local_layer) {
57
0
                Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base,
58
0
                                     freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
59
60
0
                Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base,
61
0
                                     freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
62
0
            }
63
0
            cb(Qcur, "Qcur", il);
64
0
            cb(Kcur, "Kcur", il);
65
0
            cb(Vcur, "Vcur", il);
66
67
0
            cur = build_attn(inp_attn_iswa,
68
0
                model.layers[il].wo, NULL,
69
0
                Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
70
0
            cb(cur, "attn_out", il);
71
0
        }
72
0
        if (il == n_transformer_layers - 1 && inp_out_ids) {
73
0
            cur   = ggml_get_rows(ctx0, cur, inp_out_ids);
74
0
            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
75
0
        }
76
0
        ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
77
0
        cb(ffn_inp, "ffn_inp", il);
78
79
        // norm
80
0
        cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
81
0
        cb(cur, "ffn_norm", il);
82
83
        // feed-forward network
84
0
        if (model.layers[il].ffn_gate_inp == nullptr) {
85
            // dense branch
86
0
            cur = build_ffn(cur,
87
0
                    model.layers[il].ffn_up, NULL, NULL,
88
0
                    model.layers[il].ffn_gate, NULL, NULL,
89
0
                    model.layers[il].ffn_down, NULL, NULL, NULL,
90
0
                    LLM_FFN_SILU, LLM_FFN_PAR, il);
91
0
            cb(cur, "ffn_out", il);
92
0
        } else {
93
            // MoE branch
94
0
            ggml_tensor * moe_out = build_moe_ffn(cur,
95
0
                model.layers[il].ffn_gate_inp,
96
0
                model.layers[il].ffn_up_exps,
97
0
                model.layers[il].ffn_gate_exps,
98
0
                model.layers[il].ffn_down_exps,
99
0
                model.layers[il].ffn_exp_probs_b,
100
0
                n_expert, n_expert_used,
101
0
                LLM_FFN_SILU, hparams.expert_weights_norm,
102
0
                hparams.expert_weights_scale,
103
0
                (llama_expert_gating_func_type) hparams.expert_gating_func,
104
0
                il);
105
0
            cb(moe_out, "ffn_moe_out", il);
106
107
            // FFN shared expert
108
0
            {
109
0
                ggml_tensor * ffn_shexp =
110
0
                    build_ffn(cur,
111
0
                        model.layers[il].ffn_up_shexp, NULL, NULL,
112
0
                        model.layers[il].ffn_gate_shexp, NULL, NULL,
113
0
                        model.layers[il].ffn_down_shexp, NULL, NULL,
114
0
                        NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
115
0
                cb(ffn_shexp, "ffn_shexp", il);
116
117
0
                cur = ggml_add(ctx0, moe_out, ffn_shexp);
118
0
                cb(cur, "ffn_out", il);
119
0
            }
120
0
        }
121
122
0
        cur = ggml_add(ctx0, cur, ffn_inp);
123
124
0
        cur = build_cvec(cur, il);
125
0
        cb(cur, "l_out", il);
126
127
        // input for next layer
128
0
        inpL = cur;
129
0
    }
130
0
    cur = inpL;
131
132
    // final norm
133
0
    cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
134
135
0
    cb(cur, "result_norm", -1);
136
0
    res->t_embd = cur;
137
138
    // lm_head
139
0
    cur = build_lora_mm(model.output, cur);
140
141
0
    cb(cur, "result_output", -1);
142
0
    res->t_logits = cur;
143
144
0
    ggml_build_forward_expand(gf, cur);
145
0
}