Coverage Report

Created: 2026-03-21 06:50

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/models/granite.cpp
Line
Count
Source
1
#include "models.h"
2
3
llm_build_granite::llm_build_granite(
4
    const llama_model & model,
5
    const llm_graph_params & params)
6
0
    : llm_graph_context(params) {
7
8
0
    const int64_t n_embd_head = hparams.n_embd_head_v();
9
10
0
    GGML_ASSERT(n_embd_head == hparams.n_embd_head_k());
11
0
    GGML_ASSERT(n_embd_head == n_rot);
12
13
0
    ggml_tensor * cur;
14
0
    ggml_tensor * inpL;
15
16
0
    inpL = build_inp_embd(model.tok_embd);
17
18
    // inp_pos - built only if rope enabled
19
0
    ggml_tensor * inp_pos = nullptr;
20
0
    if (hparams.rope_finetuned) {
21
0
        inp_pos = build_inp_pos();
22
0
    }
23
0
    auto * inp_attn = build_attn_inp_kv();
24
25
0
    ggml_tensor * inp_out_ids = build_inp_out_ids();
26
27
0
    for (int il = 0; il < n_layer; ++il) {
28
0
        ggml_tensor * inpSA = inpL;
29
30
        // norm
31
0
        cur = build_norm(inpL,
32
0
                model.layers[il].attn_norm, NULL,
33
0
                LLM_NORM_RMS, il);
34
0
        cb(cur, "attn_norm", il);
35
36
        // self-attention
37
0
        cur = build_attention_layer(
38
0
            cur, inp_pos, inp_attn,
39
0
            model, n_embd_head, il);
40
41
0
        if (il == n_layer - 1 && inp_out_ids) {
42
0
            cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);
43
0
            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
44
0
        }
45
        // ffn
46
0
        cur = build_layer_ffn(cur, inpSA, model, il);
47
48
        // input for next layer
49
0
        inpL = cur;
50
0
    }
51
0
    cur = inpL;
52
53
0
    cur = build_norm(cur,
54
0
            model.output_norm, NULL,
55
0
            LLM_NORM_RMS, -1);
56
57
0
    cb(cur, "result_norm", -1);
58
0
    res->t_embd = cur;
59
60
    // lm_head
61
0
    cur = build_lora_mm(model.output, cur);
62
63
    // For Granite architectures - scale logits
64
0
    cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale);
65
0
    cb(cur, "result_output", -1);
66
0
    res->t_logits = cur;
67
68
0
    ggml_build_forward_expand(gf, cur);
69
0
}
70
71
ggml_tensor * llm_build_granite::build_attention_layer(
72
          ggml_tensor             * cur,
73
          ggml_tensor             * inp_pos,
74
          llm_graph_input_attn_kv * inp_attn,
75
    const llama_model             & model,
76
    const int64_t                 n_embd_head,
77
0
    const int                     il) {
78
79
    // compute Q and K and (optionally) RoPE them
80
0
    ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
81
0
    cb(Qcur, "Qcur", il);
82
0
    if (model.layers[il].bq) {
83
0
        Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
84
0
        cb(Qcur, "Qcur", il);
85
0
    }
86
87
0
    ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
88
0
    cb(Kcur, "Kcur", il);
89
0
    if (model.layers[il].bk) {
90
0
        Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
91
0
        cb(Kcur, "Kcur", il);
92
0
    }
93
94
0
    ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
95
0
    cb(Vcur, "Vcur", il);
96
0
    if (model.layers[il].bv) {
97
0
        Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
98
0
        cb(Vcur, "Vcur", il);
99
0
    }
100
101
0
    Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, hparams.n_head(il),    n_tokens);
102
0
    Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, hparams.n_head_kv(il), n_tokens);
103
0
    Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, hparams.n_head_kv(il), n_tokens);
104
105
0
    const bool use_rope = hparams.rope_finetuned;
106
0
    if (use_rope) {
107
0
        ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
108
0
        Qcur = ggml_rope_ext(
109
0
                ctx0, Qcur, inp_pos, rope_factors,
110
0
                n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
111
0
                ext_factor, attn_factor, beta_fast, beta_slow
112
0
                );
113
114
0
        Kcur = ggml_rope_ext(
115
0
                ctx0, Kcur, inp_pos, rope_factors,
116
0
                n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
117
0
                ext_factor, attn_factor, beta_fast, beta_slow
118
0
                );
119
0
    }
120
121
0
    cb(Qcur, "Qcur", il);
122
0
    cb(Kcur, "Kcur", il);
123
0
    cb(Vcur, "Vcur", il);
124
125
0
    const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
126
0
    cur = build_attn(inp_attn,
127
0
            model.layers[il].wo, model.layers[il].bo,
128
0
            Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
129
0
            cb(cur, "attn_out", il);
130
0
    return cur;
131
0
}
132
133
ggml_tensor * llm_build_granite::build_layer_ffn(
134
          ggml_tensor       * cur,
135
          ggml_tensor       * inpSA,
136
    const llama_model       & model,
137
0
    const int                 il) {
138
139
    // For Granite architectures - scale residual
140
0
    if (hparams.f_residual_scale) {
141
0
        cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
142
0
    }
143
0
    ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
144
0
    cb(ffn_inp, "ffn_inp", il);
145
146
    // feed-forward network (non-MoE)
147
0
    if (model.layers[il].ffn_gate_inp == nullptr) {
148
149
0
        cur = build_norm(ffn_inp,
150
0
                model.layers[il].ffn_norm, NULL,
151
0
                LLM_NORM_RMS, il);
152
0
                cb(cur, "ffn_norm", il);
153
154
0
        cur = build_ffn(cur,
155
0
                model.layers[il].ffn_up,   model.layers[il].ffn_up_b,   NULL,
156
0
                model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
157
0
                model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
158
0
                NULL,
159
0
                LLM_FFN_SILU, LLM_FFN_PAR, il);
160
0
                cb(cur, "ffn_out", il);
161
162
0
    } else {
163
        // MoE branch
164
0
        cur = build_norm(ffn_inp,
165
0
                model.layers[il].ffn_norm, NULL,
166
0
                LLM_NORM_RMS, il);
167
0
                cb(cur, "ffn_norm", il);
168
169
0
        ggml_tensor * moe_out = build_moe_ffn(cur,
170
0
                model.layers[il].ffn_gate_inp,
171
0
                model.layers[il].ffn_up_exps,
172
0
                model.layers[il].ffn_gate_exps,
173
0
                model.layers[il].ffn_down_exps,
174
0
                nullptr,
175
0
                n_expert, n_expert_used,
176
0
                LLM_FFN_SILU, true,
177
0
                hparams.expert_weights_scale,
178
0
                LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
179
0
                il);
180
0
        cb(moe_out, "ffn_moe_out", il);
181
182
        // For Granite MoE Shared
183
0
        if (hparams.n_ff_shexp > 0) {
184
0
            ggml_tensor * ffn_shexp = build_ffn(cur,
185
0
                model.layers[il].ffn_up_shexp,   NULL, NULL,
186
0
                model.layers[il].ffn_gate_shexp, NULL, NULL,
187
0
                model.layers[il].ffn_down_shexp, NULL, NULL,
188
0
                NULL,
189
0
                LLM_FFN_SILU, LLM_FFN_PAR, il);
190
0
            cb(ffn_shexp, "ffn_shexp", il);
191
192
0
            cur = ggml_add(ctx0, moe_out, ffn_shexp);
193
0
            cb(cur, "ffn_out", il);
194
0
        } else {
195
0
            cur = moe_out;
196
0
        }
197
0
    }
198
199
    // For Granite architectures - scale residual
200
0
    if (hparams.f_residual_scale) {
201
0
        cur = ggml_scale(ctx0, cur, hparams.f_residual_scale);
202
0
    }
203
0
    cur = ggml_add(ctx0, cur, ffn_inp);
204
0
    cb(cur, "ffn_out", il);
205
206
0
    cur = build_cvec(cur, il);
207
0
    cb(cur, "l_out", il);
208
209
0
    return cur;
210
0
}