Coverage Report

Created: 2026-03-07 06:35

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/models/step35-iswa.cpp
Line
Count
Source
1
#include "models.h"
2
3
0
llm_build_step35_iswa::llm_build_step35_iswa(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
4
0
    ggml_tensor * cur;
5
0
    ggml_tensor * inpL;
6
7
0
    inpL = build_inp_embd(model.tok_embd);
8
0
    ggml_tensor * inp_pos     = build_inp_pos();
9
0
    auto        * inp_attn    = build_attn_inp_kv_iswa();
10
0
    ggml_tensor * inp_out_ids = build_inp_out_ids();
11
12
0
    for (int il = 0; il < n_layer; ++il) {
13
0
        ggml_tensor * inpSA = inpL;
14
15
0
        const uint32_t n_head_l    = hparams.n_head(il);
16
0
        const uint32_t n_head_kv_l = hparams.n_head_kv(il);
17
18
0
        const float freq_base_l  = model.get_rope_freq_base(cparams, il);
19
0
        const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
20
21
0
        cur = inpL;
22
23
        // dump pre-attn RMSNorm input to pinpoint layer boundary issues
24
0
        cb(cur, "attn_norm_in", il);
25
26
        // self-attention
27
0
        {
28
0
            cur = build_norm(cur, model.layers[il].attn_norm, nullptr, LLM_NORM_RMS, il);
29
0
            cb(cur, "attn_norm", il);
30
0
            ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
31
0
            ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
32
0
            ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
33
34
0
            cb(Qcur, "Qcur", il);
35
0
            cb(Kcur, "Kcur", il);
36
0
            cb(Vcur, "Vcur", il);
37
38
0
            Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head_l,    n_tokens);
39
0
            Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv_l, n_tokens);
40
0
            Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head_v, n_head_kv_l, n_tokens);
41
42
            // Q/K per-head RMSNorm (Step35 q_norm / k_norm)
43
0
            if (model.layers[il].attn_q_norm) {
44
0
                Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, nullptr, LLM_NORM_RMS, il);
45
0
                cb(Qcur, "Qcur_normed", il);
46
0
            }
47
0
            if (model.layers[il].attn_k_norm) {
48
0
                Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, nullptr, LLM_NORM_RMS, il);
49
0
                cb(Kcur, "Kcur_normed", il);
50
0
            }
51
52
            // RoPE (partial rotary factors per layer)
53
0
            const bool is_swa = hparams.is_swa(il);
54
0
            ggml_tensor * rope_factors = is_swa ? nullptr : model.get_rope_factors(cparams, il);
55
0
            const int64_t n_rot_l = is_swa ? hparams.n_rot : (hparams.n_rot / 2);
56
0
            Qcur = ggml_rope_ext(
57
0
                ctx0, Qcur, inp_pos, rope_factors,
58
0
                n_rot_l, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
59
0
                ext_factor, attn_factor, beta_fast, beta_slow
60
0
            );
61
0
            Kcur = ggml_rope_ext(
62
0
                ctx0, Kcur, inp_pos, rope_factors,
63
0
                n_rot_l, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
64
0
                ext_factor, attn_factor, beta_fast, beta_slow
65
0
            );
66
0
            cb(Qcur, "Qcur_pos", il);
67
0
            cb(Kcur, "Kcur_pos", il);
68
69
0
            const float kq_scale = 1.0f / sqrtf(float(n_embd_head_k));
70
0
            ggml_tensor * attn_out = build_attn(inp_attn,
71
0
                    nullptr, nullptr,
72
0
                    Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
73
0
            cb(attn_out, "attn_out", il);
74
            // head-wise attention gate: sigmoid(g_proj(x)) in torch
75
0
            if (model.layers[il].wqkv_gate) {
76
0
                ggml_tensor * gate = build_lora_mm(model.layers[il].wqkv_gate, cur); // [n_head_l, n_tokens]
77
0
                cb(gate, "attn_gate", il);
78
79
0
                gate = ggml_sigmoid(ctx0, gate);
80
0
                cb(gate, "attn_gate_sigmoid", il);
81
82
                // reshape + broadcast to [n_embd_head_v, n_head_l, n_tokens]
83
0
                ggml_tensor * attn_3d = ggml_reshape_3d(ctx0, attn_out, n_embd_head_v, n_head_l, n_tokens);
84
0
                ggml_tensor * gate_3d = ggml_reshape_3d(ctx0, gate,       1,          n_head_l, n_tokens);
85
0
                cb(gate_3d, "attn_gate_3d", il);
86
87
0
                attn_3d = ggml_mul(ctx0, attn_3d, gate_3d);
88
0
                cb(attn_3d, "attn_gated_3d", il);
89
90
0
                attn_out = ggml_reshape_2d(ctx0, attn_3d, n_embd_head_v * n_head_l, n_tokens);
91
0
                cb(attn_out, "attn_gated", il);
92
0
            }
93
94
            // output projection
95
0
            cur = build_lora_mm(model.layers[il].wo, attn_out);
96
0
            cb(cur, "attn_proj", il);
97
0
        }
98
99
0
        if (il == n_layer - 1 && inp_out_ids) {
100
0
            cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);
101
0
            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
102
0
        }
103
104
0
        ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
105
0
        cb(ffn_inp, "ffn_inp", il);
106
107
0
        cur = build_norm(ffn_inp, model.layers[il].ffn_norm, nullptr, LLM_NORM_RMS, il);
108
0
        cb(cur, "ffn_norm", il);
109
110
        // feed-forward
111
0
        if (model.layers[il].ffn_gate_inp == nullptr) {
112
            // dense MLP
113
0
            cur = build_ffn(cur,
114
0
                    model.layers[il].ffn_up,   model.layers[il].ffn_up_b,   nullptr,
115
0
                    model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, nullptr,
116
0
                    model.layers[il].ffn_down, model.layers[il].ffn_down_b, nullptr,
117
0
                    nullptr,
118
0
                    LLM_FFN_SILU, LLM_FFN_PAR, il);
119
0
            cb(cur, "ffn_out", il);
120
0
        } else {
121
            // MoE routed experts
122
0
            const bool  norm_w  = hparams.expert_weights_norm;
123
0
            const float w_scale = hparams.expert_weights_scale;
124
0
            const bool  scale_w = w_scale != 0.0f;
125
0
            ggml_tensor * moe_out = build_moe_ffn(cur,
126
0
                    model.layers[il].ffn_gate_inp,
127
0
                    model.layers[il].ffn_up_exps,
128
0
                    model.layers[il].ffn_gate_exps,
129
0
                    model.layers[il].ffn_down_exps,
130
0
                    model.layers[il].ffn_exp_probs_b,
131
0
                    n_expert, n_expert_used,
132
0
                    LLM_FFN_SILU,
133
0
                    norm_w, scale_w, w_scale,
134
0
                    (llama_expert_gating_func_type) hparams.expert_gating_func,
135
0
                    il);
136
0
            cb(moe_out, "ffn_moe_out", il);
137
138
            // shared expert MLP (always added on MoE layers in Step35)
139
0
            ggml_tensor * sh_out = build_ffn(cur,
140
0
                    model.layers[il].ffn_up_shexp,   nullptr, nullptr,
141
0
                    model.layers[il].ffn_gate_shexp, nullptr, nullptr,
142
0
                    model.layers[il].ffn_down_shexp, nullptr, nullptr,
143
0
                    nullptr,
144
0
                    LLM_FFN_SILU, LLM_FFN_PAR, il);
145
0
            cb(sh_out, "ffn_shared_out", il);
146
147
0
            cur = ggml_add(ctx0, moe_out, sh_out);
148
0
            cb(cur, "ffn_out", il);
149
0
        }
150
0
        cur = ggml_add(ctx0, cur, ffn_inp);
151
0
        cur = build_cvec(cur, il);
152
0
        cb(cur, "l_out", il);
153
154
0
        inpL = cur;
155
0
    }
156
157
0
    cur = inpL;
158
159
0
    cur = build_norm(cur, model.output_norm, nullptr, LLM_NORM_RMS, -1);
160
0
    cb(cur, "result_norm", -1);
161
0
    res->t_embd = cur;
162
163
0
    cur = build_lora_mm(model.output, cur);
164
0
    cb(cur, "result_output", -1);
165
0
    res->t_logits = cur;
166
167
0
    ggml_build_forward_expand(gf, cur);
168
0
}