Coverage Report

Created: 2025-12-28 06:26

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-graph.cpp
Line
Count
Source
1
#include "llama-graph.h"
2
3
#include "llama-impl.h"
4
#include "llama-batch.h"
5
#include "llama-cparams.h"
6
7
#include "llama-kv-cache.h"
8
#include "llama-kv-cache-iswa.h"
9
#include "llama-memory-hybrid.h"
10
#include "llama-memory-recurrent.h"
11
12
#include <cassert>
13
#include <cmath>
14
#include <cstring>
15
16
0
void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) {
17
0
    if (ubatch->token) {
18
0
        const int64_t n_tokens = ubatch->n_tokens;
19
20
0
        ggml_backend_tensor_set(tokens, ubatch->token, 0, n_tokens*ggml_element_size(tokens));
21
0
    }
22
23
0
    if (ubatch->embd) {
24
0
        const int64_t n_embd   = embd->ne[0];
25
0
        const int64_t n_tokens = ubatch->n_tokens;
26
27
0
        ggml_backend_tensor_set(embd, ubatch->embd, 0, n_tokens*n_embd*ggml_element_size(embd));
28
0
    }
29
0
}
30
31
0
bool llm_graph_input_embd::can_reuse(const llm_graph_params & params) {
32
0
    bool res = true;
33
34
0
    res &= (!tokens && !params.ubatch.token) || (tokens && tokens->ne[0] == params.ubatch.n_tokens);
35
0
    res &= (!embd   && !params.ubatch.embd)  || (embd   &&   embd->ne[0] == params.ubatch.n_tokens);
36
37
0
    return res;
38
0
}
39
40
0
void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) {
41
0
    if (ubatch->pos && pos) {
42
0
        const int64_t n_tokens = ubatch->n_tokens;
43
44
0
        if (ubatch->token && n_pos_per_embd == 4) {
45
            // in case we're using M-RoPE with text tokens, convert the 1D positions to 4D
46
            // the 3 first dims are the same, and 4th dim is all 0
47
0
            std::vector<llama_pos> pos_data(n_tokens*n_pos_per_embd);
48
            // copy the first dimension
49
0
            for (int i = 0; i < n_tokens; ++i) {
50
0
                pos_data[               i] = ubatch->pos[i];
51
0
                pos_data[    n_tokens + i] = ubatch->pos[i];
52
0
                pos_data[2 * n_tokens + i] = ubatch->pos[i];
53
0
                pos_data[3 * n_tokens + i] = 0; // 4th dim is 0
54
0
            }
55
0
            ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos));
56
0
        } else {
57
0
            ggml_backend_tensor_set(pos, ubatch->pos, 0, n_tokens*n_pos_per_embd*ggml_element_size(pos));
58
0
        }
59
0
    }
60
0
}
61
62
0
bool llm_graph_input_pos::can_reuse(const llm_graph_params & params) {
63
0
    bool res = true;
64
65
0
    res &= pos->ne[0] == params.ubatch.n_tokens;
66
67
0
    return res;
68
0
}
69
70
0
void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) {
71
0
    if (ubatch->pos && attn_scale) {
72
0
        const int64_t n_tokens = ubatch->n_tokens;
73
74
0
        GGML_ASSERT(f_attn_temp_scale != 0.0f);
75
0
        GGML_ASSERT(n_attn_temp_floor_scale != 0);
76
77
0
        std::vector<float> attn_scale_data(n_tokens, 0.0f);
78
0
        for (int i = 0; i < n_tokens; ++i) {
79
0
            const float pos = ubatch->pos[i];
80
0
            attn_scale_data[i] = std::log(
81
0
                std::floor((pos + f_attn_temp_offset) / n_attn_temp_floor_scale) + 1.0
82
0
            ) * f_attn_temp_scale + 1.0;
83
0
        }
84
85
0
        ggml_backend_tensor_set(attn_scale, attn_scale_data.data(), 0, n_tokens*ggml_element_size(attn_scale));
86
0
    }
87
0
}
88
89
0
void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) {
90
0
    if (pos_bucket) {
91
0
        const int64_t n_tokens = ubatch->n_tokens;
92
93
0
        GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer));
94
0
        GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
95
96
0
        int32_t * data = (int32_t *) pos_bucket->data;
97
98
0
        for (int h = 0; h < 1; ++h) {
99
0
            for (int j = 0; j < n_tokens; ++j) {
100
0
                for (int i = 0; i < n_tokens; ++i) {
101
0
                    data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch->pos[i], ubatch->pos[j], hparams.n_rel_attn_bkts, true);
102
0
                }
103
0
            }
104
0
        }
105
0
    }
106
0
}
107
108
0
void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) {
109
0
    if (pos_bucket) {
110
0
        mctx->set_input_pos_bucket(pos_bucket, ubatch);
111
0
    }
112
0
}
113
114
0
void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
115
0
    GGML_ASSERT(out_ids);
116
117
0
    const int64_t n_tokens = ubatch->n_tokens;
118
119
0
    GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
120
0
    int32_t * data = (int32_t *) out_ids->data;
121
122
0
    if (n_outputs == n_tokens) {
123
0
        for (int i = 0; i < n_tokens; ++i) {
124
0
            data[i] = i;
125
0
        }
126
127
0
        return;
128
0
    }
129
130
0
    GGML_ASSERT(ubatch->output);
131
132
0
    int n_outputs = 0;
133
134
0
    for (int i = 0; i < n_tokens; ++i) {
135
0
        if (ubatch->output[i]) {
136
0
            data[n_outputs++] = i;
137
0
        }
138
0
    }
139
0
}
140
141
0
bool llm_graph_input_out_ids::can_reuse(const llm_graph_params & params) {
142
0
    bool res = true;
143
144
0
    res &= n_outputs == params.n_outputs;
145
146
0
    return res;
147
0
}
148
149
0
void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) {
150
0
    if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
151
0
        const int64_t n_tokens     = ubatch->n_tokens;
152
0
        const int64_t n_seq_tokens = ubatch->n_seq_tokens;
153
0
        const int64_t n_seqs_unq   = ubatch->n_seqs_unq;
154
155
0
        GGML_ASSERT(mean);
156
0
        GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer));
157
158
0
        float * data = (float *) mean->data;
159
0
        memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean));
160
161
0
        std::vector<uint64_t> sums(n_seqs_unq, 0);
162
0
        for (int i = 0; i < n_tokens; i += n_seq_tokens) {
163
0
            for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
164
0
                const llama_seq_id seq_id  = ubatch->seq_id[i][s];
165
0
                const int32_t      seq_idx = ubatch->seq_idx[seq_id];
166
167
0
                sums[seq_idx] += ubatch->n_seq_tokens;
168
0
            }
169
0
        }
170
171
0
        std::vector<float> div(n_seqs_unq, 0.0f);
172
0
        for (int s = 0; s < n_seqs_unq; ++s) {
173
0
            const uint64_t sum = sums[s];
174
0
            if (sum > 0) {
175
0
                div[s] = 1.0f/float(sum);
176
0
            }
177
0
        }
178
179
0
        for (int i = 0; i < n_tokens; i += n_seq_tokens) {
180
0
            for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
181
0
                const llama_seq_id seq_id  = ubatch->seq_id[i][s];
182
0
                const int32_t      seq_idx = ubatch->seq_idx[seq_id];
183
184
0
                for (int j = 0; j < n_seq_tokens; ++j) {
185
0
                    data[seq_idx*n_tokens + i + j] = div[seq_idx];
186
0
                }
187
0
            }
188
0
        }
189
0
    }
190
0
}
191
192
0
void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) {
193
0
    const int64_t n_tokens     = ubatch->n_tokens;
194
0
    const int64_t n_seqs_unq   = ubatch->n_seqs_unq;
195
196
0
    if (cparams.embeddings && (
197
0
        cparams.pooling_type == LLAMA_POOLING_TYPE_CLS  ||
198
0
        cparams.pooling_type == LLAMA_POOLING_TYPE_RANK ||
199
0
        cparams.pooling_type == LLAMA_POOLING_TYPE_LAST
200
0
    )) {
201
0
        GGML_ASSERT(cls);
202
0
        GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
203
204
0
        uint32_t * data = (uint32_t *) cls->data;
205
0
        memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
206
207
0
        std::vector<int> target_pos(n_seqs_unq, -1);
208
0
        std::vector<int> target_row(n_seqs_unq, -1);
209
210
0
        const bool last = (
211
0
             cparams.pooling_type == LLAMA_POOLING_TYPE_LAST ||
212
0
            (cparams.pooling_type == LLAMA_POOLING_TYPE_RANK && arch == LLM_ARCH_QWEN3) // qwen3 reranking & embedding models use last token
213
0
        );
214
215
0
        for (int i = 0; i < n_tokens; ++i) {
216
0
            const llama_pos pos = ubatch->pos[i];
217
218
0
            for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
219
0
                const llama_seq_id seq_id  = ubatch->seq_id[i][s];
220
0
                const int32_t      seq_idx = ubatch->seq_idx[seq_id];
221
222
0
                if (
223
0
                    (target_pos[seq_idx] == -1) ||
224
0
                    ( last && pos >= target_pos[seq_idx]) ||
225
0
                    (!last && pos <  target_pos[seq_idx])
226
0
                ) {
227
0
                    target_pos[seq_idx] = pos;
228
0
                    target_row[seq_idx] = i;
229
0
                }
230
0
            }
231
0
        }
232
233
0
        for (int s = 0; s < n_seqs_unq; ++s) {
234
0
            if (target_row[s] >= 0) {
235
0
                data[s] = target_row[s];
236
0
            }
237
0
        }
238
0
    }
239
0
}
240
241
0
void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) {
242
0
    GGML_UNUSED(ubatch);
243
244
0
    const int64_t n_rs = mctx->get_n_rs();
245
246
0
    if (s_copy) {
247
0
        GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer));
248
0
        int32_t * data = (int32_t *) s_copy->data;
249
250
        // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
251
0
        for (uint32_t i = 0; i < n_rs; ++i) {
252
0
            data[i] = mctx->s_copy(i);
253
0
        }
254
0
    }
255
0
}
256
257
0
bool llm_graph_input_rs::can_reuse(const llm_graph_params & params) {
258
0
    const auto * mctx = static_cast<const llama_memory_recurrent_context *>(params.mctx);
259
260
0
    this->mctx = mctx;
261
262
0
    bool res = true;
263
264
0
    res &= s_copy->ne[0] == mctx->get_n_rs();
265
266
0
    res &= s_copy_main->ne[0]  == params.ubatch.n_seqs;
267
0
    res &= s_copy_extra->ne[0] == mctx->get_n_rs() - params.ubatch.n_seqs;
268
269
0
    res &= head == mctx->get_head();
270
0
    res &= rs_z == mctx->get_rs_z();
271
272
0
    return res;
273
0
}
274
275
0
void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) {
276
0
    GGML_UNUSED(ubatch);
277
278
0
    if (cross_embd && !cross->v_embd.empty()) {
279
0
        assert(cross_embd->type == GGML_TYPE_F32);
280
281
0
        ggml_backend_tensor_set(cross_embd, cross->v_embd.data(), 0, ggml_nbytes(cross_embd));
282
0
    }
283
0
}
284
285
0
static void print_mask(const float * data, int64_t n_tokens, int64_t n_kv, int64_t n_swa, llama_swa_type swa_type) {
286
0
    LLAMA_LOG_DEBUG("%s: === Attention mask ===\n", __func__);
287
0
    const char * swa_type_str = "unknown";
288
289
0
    switch (swa_type) {
290
0
        case LLAMA_SWA_TYPE_NONE:      swa_type_str = "LLAMA_SWA_TYPE_NONE"; break;
291
0
        case LLAMA_SWA_TYPE_STANDARD:  swa_type_str = "LLAMA_SWA_TYPE_STANDARD"; break;
292
0
        case LLAMA_SWA_TYPE_CHUNKED:   swa_type_str = "LLAMA_SWA_TYPE_CHUNKED"; break;
293
0
        case LLAMA_SWA_TYPE_SYMMETRIC: swa_type_str = "LLAMA_SWA_TYPE_SYMMETRIC"; break;
294
0
    };
295
296
0
    LLAMA_LOG_DEBUG("%s: n_swa : %d, n_kv: %d, swq_type: %s\n", __func__, (int)n_swa, (int)n_kv, swa_type_str);
297
0
    LLAMA_LOG_DEBUG("%s: '0' = can attend, '∞' = masked\n", __func__);
298
0
    LLAMA_LOG_DEBUG("%s: Rows = query tokens, Columns = key/value tokens\n\n", __func__);
299
300
0
    LLAMA_LOG_DEBUG("    ");
301
0
    for (int j = 0; j < std::min((int64_t)20, n_kv); ++j) {
302
0
        LLAMA_LOG_DEBUG("%2d", j);
303
0
    }
304
0
    LLAMA_LOG_DEBUG("\n");
305
306
0
    for (int i = 0; i < std::min((int64_t)20, n_tokens); ++i) {
307
0
        LLAMA_LOG_DEBUG(" %2d ", i);
308
0
        for (int j = 0; j < std::min((int64_t)20, n_kv); ++j) {
309
0
            float val = data[i * n_kv + j];
310
0
            if (val == -INFINITY) {
311
0
                LLAMA_LOG_DEBUG(" ∞");
312
0
            } else {
313
0
                LLAMA_LOG_DEBUG(" 0");
314
0
            }
315
0
        }
316
0
        LLAMA_LOG_DEBUG("\n");
317
0
    }
318
0
}
319
320
0
void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
321
0
    const int64_t n_kv     = ubatch->n_tokens;
322
0
    const int64_t n_tokens = ubatch->n_tokens;
323
324
0
    const auto fill_mask = [&](float * data, int n_swa, llama_swa_type swa_type) {
325
0
        for (int h = 0; h < 1; ++h) {
326
0
            for (int i1 = 0; i1 < n_tokens; ++i1) {
327
0
                const llama_seq_id s1 = ubatch->seq_id[i1][0];
328
0
                const llama_pos    p1 = ubatch->pos[i1];
329
330
0
                const uint64_t idst = h*(n_kv*n_tokens) + i1*n_kv;
331
332
0
                for (int i0 = 0; i0 < n_tokens; ++i0) {
333
0
                    const llama_seq_id s0 = ubatch->seq_id[i0][0];
334
0
                    const llama_pos p0    = ubatch->pos[i0];
335
336
                    // mask different sequences
337
0
                    if (s0 != s1) {
338
0
                        continue;
339
0
                    }
340
341
                    // mask future tokens
342
0
                    if (cparams.causal_attn && p0 > p1) {
343
0
                        continue;
344
0
                    }
345
346
                    // apply SWA if any
347
0
                    if (llama_hparams::is_masked_swa(n_swa, swa_type, p0, p1)) {
348
0
                        continue;
349
0
                    }
350
351
0
                    data[idst + i0] = hparams.use_alibi ? -std::abs(p0 - p1) : 0.0f;
352
0
                }
353
0
            }
354
0
        }
355
0
    };
356
357
0
    {
358
0
        GGML_ASSERT(self_kq_mask);
359
0
        GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask->buffer));
360
361
0
        float * data = (float *) self_kq_mask->data;
362
363
0
        std::fill(data, data + ggml_nelements(self_kq_mask), -INFINITY);
364
365
0
        fill_mask(data, 0, LLAMA_SWA_TYPE_NONE);
366
367
0
        if (debug) {
368
0
            print_mask(data, n_tokens, n_kv, 0, LLAMA_SWA_TYPE_NONE);
369
0
        }
370
0
    }
371
372
0
    if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
373
0
        GGML_ASSERT(self_kq_mask_swa);
374
0
        GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask_swa->buffer));
375
376
0
        float * data = (float *) self_kq_mask_swa->data;
377
378
0
        std::fill(data, data + ggml_nelements(self_kq_mask_swa), -INFINITY);
379
380
0
        fill_mask(data, hparams.n_swa, hparams.swa_type);
381
382
0
        if (debug) {
383
0
            print_mask(data, n_tokens, n_kv, hparams.n_swa, hparams.swa_type);
384
0
        }
385
0
    }
386
0
}
387
388
0
void llm_graph_input_attn_kv::set_input(const llama_ubatch * ubatch) {
389
0
    mctx->set_input_k_idxs(self_k_idxs, ubatch);
390
0
    mctx->set_input_v_idxs(self_v_idxs, ubatch);
391
392
0
    mctx->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
393
0
}
394
395
0
bool llm_graph_input_attn_kv::can_reuse(const llm_graph_params & params) {
396
0
    const auto * mctx = static_cast<const llama_kv_cache_context *>(params.mctx);
397
398
0
    this->mctx = mctx;
399
400
0
    bool res = true;
401
402
0
    res &= self_k_idxs->ne[0] == params.ubatch.n_tokens;
403
  //res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
404
405
0
    res &= self_kq_mask->ne[0] == mctx->get_n_kv();
406
0
    res &= self_kq_mask->ne[1] == params.ubatch.n_tokens;
407
408
0
    return res;
409
0
}
410
411
0
void llm_graph_input_attn_kv_iswa::set_input(const llama_ubatch * ubatch) {
412
0
    mctx->get_base()->set_input_k_idxs(self_k_idxs, ubatch);
413
0
    mctx->get_base()->set_input_v_idxs(self_v_idxs, ubatch);
414
415
0
    mctx->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
416
417
0
    mctx->get_swa()->set_input_k_idxs(self_k_idxs_swa, ubatch);
418
0
    mctx->get_swa()->set_input_v_idxs(self_v_idxs_swa, ubatch);
419
420
0
    mctx->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn);
421
0
}
422
423
0
bool llm_graph_input_attn_kv_iswa::can_reuse(const llm_graph_params & params) {
424
0
    const auto * mctx = static_cast<const llama_kv_cache_iswa_context *>(params.mctx);
425
426
0
    this->mctx = mctx;
427
428
0
    bool res = true;
429
430
0
    res &= self_k_idxs->ne[0] == params.ubatch.n_tokens;
431
  //res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
432
433
0
    res &= self_k_idxs_swa->ne[0] == params.ubatch.n_tokens;
434
  //res &= self_v_idxs_swa->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
435
436
0
    res &= self_kq_mask->ne[0] == mctx->get_base()->get_n_kv();
437
0
    res &= self_kq_mask->ne[1] == params.ubatch.n_tokens;
438
439
0
    res &= self_kq_mask_swa->ne[0] == mctx->get_swa()->get_n_kv();
440
0
    res &= self_kq_mask_swa->ne[1] == params.ubatch.n_tokens;
441
442
0
    return res;
443
0
}
444
445
0
void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
446
0
    GGML_ASSERT(cross_kq_mask);
447
448
0
    const int64_t n_enc    = cross_kq_mask->ne[0];
449
0
    const int64_t n_tokens = ubatch->n_tokens;
450
451
0
    GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer));
452
0
    GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
453
454
0
    float * data = (float *) cross_kq_mask->data;
455
456
0
    for (int h = 0; h < 1; ++h) {
457
0
        for (int i = 0; i < n_tokens; ++i) {
458
0
            for (int j = 0; j < n_enc; ++j) {
459
0
                float f = -INFINITY;
460
461
0
                for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
462
0
                    const llama_seq_id seq_id = ubatch->seq_id[i][s];
463
464
0
                    if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) {
465
0
                        f = 0.0f;
466
0
                    }
467
0
                }
468
469
0
                data[h*(n_enc*n_tokens) + i*n_enc + j] = f;
470
0
            }
471
0
        }
472
473
0
        for (int i = n_tokens; i < n_tokens; ++i) {
474
0
            for (int j = 0; j < n_enc; ++j) {
475
0
                data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY;
476
0
            }
477
0
        }
478
0
    }
479
0
}
480
481
0
void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
482
0
    mctx->get_attn()->set_input_k_idxs(inp_attn->self_k_idxs, ubatch);
483
0
    mctx->get_attn()->set_input_v_idxs(inp_attn->self_v_idxs, ubatch);
484
485
0
    mctx->get_attn()->set_input_kq_mask(inp_attn->self_kq_mask, ubatch, cparams.causal_attn);
486
487
0
    const int64_t n_rs = mctx->get_recr()->get_n_rs();
488
489
0
    if (inp_rs->s_copy) {
490
0
        GGML_ASSERT(ggml_backend_buffer_is_host(inp_rs->s_copy->buffer));
491
0
        int32_t * data = (int32_t *) inp_rs->s_copy->data;
492
493
        // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
494
0
        for (uint32_t i = 0; i < n_rs; ++i) {
495
0
            data[i] = mctx->get_recr()->s_copy(i);
496
0
        }
497
0
    }
498
0
}
499
500
0
bool llm_graph_input_mem_hybrid::can_reuse(const llm_graph_params & params) {
501
0
    const auto * mctx = static_cast<const llama_memory_hybrid_context *>(params.mctx);
502
503
0
    this->mctx = mctx;
504
505
0
    bool res = true;
506
507
0
    res &= inp_attn->self_k_idxs->ne[0] == params.ubatch.n_tokens;
508
  //res &= inp_attn->self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
509
510
0
    res &= inp_attn->self_kq_mask->ne[0] == mctx->get_attn()->get_n_kv();
511
0
    res &= inp_attn->self_kq_mask->ne[1] == params.ubatch.n_tokens;
512
513
0
    res &= inp_rs->s_copy->ne[0] == mctx->get_recr()->get_n_rs();
514
515
0
    res &= inp_rs->s_copy_main->ne[0]  == params.ubatch.n_seqs;
516
0
    res &= inp_rs->s_copy_extra->ne[0] == mctx->get_recr()->get_n_rs() - params.ubatch.n_seqs;
517
518
0
    res &= inp_rs->head == mctx->get_recr()->get_head();
519
0
    res &= inp_rs->rs_z == mctx->get_recr()->get_rs_z();
520
521
0
    return res;
522
0
}
523
524
//
525
// llm_graph_result
526
//
527
528
0
llm_graph_result::llm_graph_result(int64_t max_nodes) : max_nodes(max_nodes) {
529
0
    reset();
530
531
0
    const char * LLAMA_GRAPH_RESULT_DEBUG = getenv("LLAMA_GRAPH_RESULT_DEBUG");
532
0
    debug = LLAMA_GRAPH_RESULT_DEBUG ? atoi(LLAMA_GRAPH_RESULT_DEBUG) : 0;
533
0
}
534
535
0
int64_t llm_graph_result::get_max_nodes() const {
536
0
    return max_nodes;
537
0
}
538
539
0
void llm_graph_result::reset() {
540
0
    t_tokens      = nullptr;
541
0
    t_logits      = nullptr;
542
0
    t_embd        = nullptr;
543
0
    t_embd_pooled = nullptr;
544
545
0
    params = {};
546
547
0
    inputs.clear();
548
549
0
    buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false));
550
551
0
    ggml_init_params params = {
552
0
        /*.mem_size   =*/ buf_compute_meta.size(),
553
0
        /*.mem_buffer =*/ buf_compute_meta.data(),
554
0
        /*.no_alloc   =*/ true,
555
0
    };
556
557
0
    ctx_compute.reset(ggml_init(params));
558
559
0
    gf = ggml_new_graph_custom(ctx_compute.get(), max_nodes, false);
560
0
}
561
562
0
void llm_graph_result::set_inputs(const llama_ubatch * ubatch) {
563
0
    for (auto & input : inputs) {
564
0
        input->set_input(ubatch);
565
0
    }
566
0
}
567
568
0
bool llm_graph_result::can_reuse(const llm_graph_params & params) {
569
0
    if (!this->params.allow_reuse(params)) {
570
0
        if (debug > 1) {
571
0
            LLAMA_LOG_DEBUG("%s: cannot reuse graph due to incompatible graph parameters\n", __func__);
572
0
        }
573
574
0
        return false;
575
0
    }
576
577
0
    if (debug > 1) {
578
0
        LLAMA_LOG_DEBUG("%s: checking compatibility of %d inputs:\n", __func__, (int) inputs.size());
579
0
    }
580
581
0
    bool res = true;
582
583
0
    for (auto & input : inputs) {
584
0
        const bool cur = input->can_reuse(params);
585
586
0
        if (debug > 1) {
587
0
            LLAMA_LOG_DEBUG("%s: can_reuse = %d\n", "placeholder", cur);
588
0
        }
589
590
0
        res = res && cur;
591
0
    }
592
593
0
    if (debug > 0) {
594
0
        LLAMA_LOG_DEBUG("%s: can reuse graph = %d\n", __func__, res);
595
0
    }
596
597
0
    return res;
598
0
}
599
600
0
llm_graph_input_i * llm_graph_result::add_input(llm_graph_input_ptr input) {
601
0
    inputs.emplace_back(std::move(input));
602
0
    return inputs.back().get();
603
0
}
604
605
0
void llm_graph_result::set_params(const llm_graph_params & params) {
606
0
    this->params = params;
607
0
}
608
609
//
610
// llm_graph_context
611
//
612
613
llm_graph_context::llm_graph_context(const llm_graph_params & params) :
614
0
    arch             (params.arch),
615
0
    hparams          (params.hparams),
616
0
    cparams          (params.cparams),
617
0
    ubatch           (params.ubatch),
618
0
    n_embd           (hparams.n_embd),
619
0
    n_layer          (hparams.n_layer),
620
0
    n_rot            (hparams.n_rot),
621
0
    n_ctx            (cparams.n_ctx),
622
0
    n_head           (hparams.n_head()),
623
0
    n_head_kv        (hparams.n_head_kv()),
624
0
    n_embd_head_k    (hparams.n_embd_head_k),
625
0
    n_embd_k_gqa     (hparams.n_embd_k_gqa()),
626
0
    n_embd_head_v    (hparams.n_embd_head_v),
627
0
    n_embd_v_gqa     (hparams.n_embd_v_gqa()),
628
0
    n_expert         (hparams.n_expert),
629
0
    n_expert_used    (cparams.warmup ? hparams.n_expert : hparams.n_expert_used),
630
0
    freq_base        (cparams.rope_freq_base),
631
0
    freq_scale       (cparams.rope_freq_scale),
632
0
    ext_factor       (cparams.yarn_ext_factor),
633
0
    attn_factor      (cparams.yarn_attn_factor),
634
0
    beta_fast        (cparams.yarn_beta_fast),
635
0
    beta_slow        (cparams.yarn_beta_slow),
636
0
    norm_eps         (hparams.f_norm_eps),
637
0
    norm_rms_eps     (hparams.f_norm_rms_eps),
638
0
    n_tokens         (ubatch.n_tokens),
639
0
    n_outputs        (params.n_outputs),
640
0
    n_ctx_orig       (cparams.n_ctx_orig_yarn),
641
0
    pooling_type     (cparams.pooling_type),
642
0
    rope_type        (hparams.rope_type),
643
0
    sched            (params.sched),
644
0
    backend_cpu      (params.backend_cpu),
645
0
    cvec             (params.cvec),
646
0
    loras            (params.loras),
647
0
    mctx             (params.mctx),
648
0
    cross            (params.cross),
649
0
    cb_func          (params.cb),
650
0
    res              (params.res),
651
0
    ctx0             (res->get_ctx()),
652
0
    gf               (res->get_gf()) {
653
0
        res->set_params(params);
654
0
    }
655
656
0
void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const {
657
0
    if (cb_func) {
658
0
        cb_func(ubatch, cur, name, il);
659
0
    }
660
0
}
661
662
ggml_tensor * llm_graph_context::build_cvec(
663
         ggml_tensor * cur,
664
0
                 int   il) const {
665
0
    return cvec->apply_to(ctx0, cur, il);
666
0
}
667
668
ggml_tensor * llm_graph_context::build_lora_mm(
669
          ggml_tensor * w,
670
0
          ggml_tensor * cur) const {
671
0
    ggml_tensor * res = ggml_mul_mat(ctx0, w, cur);
672
673
0
    for (const auto & lora : *loras) {
674
0
        llama_adapter_lora_weight * lw = lora.first->get_weight(w);
675
0
        if (lw == nullptr) {
676
0
            continue;
677
0
        }
678
679
0
        const float adapter_scale = lora.second;
680
0
        const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
681
682
0
        ggml_tensor * ab_cur = ggml_mul_mat(
683
0
                ctx0, lw->b,
684
0
                ggml_mul_mat(ctx0, lw->a, cur)
685
0
                );
686
687
0
        ab_cur = ggml_scale(ctx0, ab_cur, scale);
688
0
        res = ggml_add(ctx0, res, ab_cur);
689
0
    }
690
691
0
    return res;
692
0
}
693
694
ggml_tensor * llm_graph_context::build_lora_mm_id(
695
          ggml_tensor * w,   // ggml_tensor * as
696
          ggml_tensor * cur, // ggml_tensor * b
697
0
          ggml_tensor * ids) const {
698
0
    ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids);
699
0
    for (const auto & lora : *loras) {
700
0
        llama_adapter_lora_weight * lw = lora.first->get_weight(w);
701
0
        if (lw == nullptr) {
702
0
            continue;
703
0
        }
704
705
0
        const float alpha = lora.first->alpha;
706
0
        const float rank  = (float) lw->b->ne[0];
707
0
        const float scale = alpha ? lora.second * alpha / rank : lora.second;
708
709
0
        ggml_tensor * ab_cur = ggml_mul_mat_id(
710
0
                ctx0, lw->b,
711
0
                ggml_mul_mat_id(ctx0, lw->a, cur, ids),
712
0
                ids
713
0
                );
714
715
0
        ab_cur = ggml_scale(ctx0, ab_cur, scale);
716
0
        res = ggml_add(ctx0, res, ab_cur);
717
0
    }
718
719
0
    return res;
720
0
}
721
722
ggml_tensor * llm_graph_context::build_norm(
723
         ggml_tensor * cur,
724
         ggml_tensor * mw,
725
         ggml_tensor * mb,
726
       llm_norm_type   type,
727
0
                 int   il) const {
728
0
    switch (type) {
729
0
        case LLM_NORM:       cur = ggml_norm    (ctx0, cur, hparams.f_norm_eps);     break;
730
0
        case LLM_NORM_RMS:   cur = ggml_rms_norm(ctx0, cur, hparams.f_norm_rms_eps); break;
731
0
        case LLM_NORM_GROUP:
732
0
            {
733
0
                cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], 1, cur->ne[1]);
734
0
                cur = ggml_group_norm(ctx0, cur, hparams.n_norm_groups, hparams.f_norm_group_eps);
735
0
                cur = ggml_reshape_2d(ctx0, cur, cur->ne[0],    cur->ne[2]);
736
0
            } break;
737
0
    }
738
739
0
    if (mw || mb) {
740
0
        cb(cur, "norm", il);
741
0
    }
742
743
0
    if (mw) {
744
0
        cur = ggml_mul(ctx0, cur, mw);
745
0
        if (mb) {
746
0
            cb(cur, "norm_w", il);
747
0
        }
748
0
    }
749
750
0
    if (mb) {
751
0
        cur = ggml_add(ctx0, cur, mb);
752
0
    }
753
754
0
    return cur;
755
0
}
756
757
ggml_tensor * llm_graph_context::build_ffn(
758
         ggml_tensor * cur,
759
         ggml_tensor * up,
760
         ggml_tensor * up_b,
761
         ggml_tensor * up_s,
762
         ggml_tensor * gate,
763
         ggml_tensor * gate_b,
764
         ggml_tensor * gate_s,
765
         ggml_tensor * down,
766
         ggml_tensor * down_b,
767
         ggml_tensor * down_s,
768
         ggml_tensor * act_scales,
769
     llm_ffn_op_type   type_op,
770
   llm_ffn_gate_type   type_gate,
771
0
                 int   il) const {
772
0
    ggml_tensor * tmp = up ? build_lora_mm(up, cur) : cur;
773
0
    cb(tmp, "ffn_up", il);
774
775
0
    if (up_b) {
776
0
        tmp = ggml_add(ctx0, tmp, up_b);
777
0
        cb(tmp, "ffn_up_b", il);
778
0
    }
779
780
0
    if (up_s) {
781
0
        tmp = ggml_mul(ctx0, tmp, up_s);
782
0
        cb(tmp, "ffn_up_s", il);
783
0
    }
784
785
0
    if (gate) {
786
0
        switch (type_gate) {
787
0
            case LLM_FFN_SEQ:
788
0
                {
789
0
                    cur = build_lora_mm(gate, tmp);
790
0
                    cb(cur, "ffn_gate", il);
791
0
                } break;
792
0
            case LLM_FFN_PAR:
793
0
                {
794
0
                    cur = build_lora_mm(gate, cur);
795
0
                    cb(cur, "ffn_gate", il);
796
0
                } break;
797
0
        }
798
799
0
        if (gate_b) {
800
0
            cur = ggml_add(ctx0, cur, gate_b);
801
0
            cb(cur, "ffn_gate_b", il);
802
0
        }
803
804
0
        if (gate_s) {
805
0
            cur = ggml_mul(ctx0, cur, gate_s);
806
0
            cb(cur, "ffn_gate_s", il);
807
0
        }
808
809
0
    } else {
810
0
        cur = tmp;
811
0
    }
812
813
0
    switch (type_op) {
814
0
        case LLM_FFN_SILU:
815
0
            if (gate && type_gate == LLM_FFN_PAR) {
816
0
                cur = ggml_swiglu_split(ctx0, cur, tmp);
817
0
                cb(cur, "ffn_swiglu", il);
818
0
                type_gate = LLM_FFN_SEQ;
819
0
            } else {
820
0
                cur = ggml_silu(ctx0, cur);
821
0
                cb(cur, "ffn_silu", il);
822
0
            } break;
823
0
        case LLM_FFN_GELU:
824
0
            if (gate && type_gate == LLM_FFN_PAR) {
825
0
                cur = ggml_geglu_split(ctx0, cur, tmp);
826
0
                cb(cur, "ffn_geglu", il);
827
0
                type_gate = LLM_FFN_SEQ;
828
0
            } else {
829
0
                cur = ggml_gelu(ctx0, cur);
830
0
                cb(cur, "ffn_gelu", il);
831
0
                if (act_scales != NULL) {
832
0
                    cur = ggml_div(ctx0, cur, act_scales);
833
0
                    cb(cur, "ffn_act", il);
834
0
                }
835
0
            } break;
836
0
        case LLM_FFN_RELU:
837
0
            if (gate && type_gate == LLM_FFN_PAR) {
838
0
                cur = ggml_reglu_split(ctx0, cur, tmp);
839
0
                cb(cur, "ffn_reglu", il);
840
0
                type_gate = LLM_FFN_SEQ;
841
0
            } else {
842
0
                cur = ggml_relu(ctx0, cur);
843
0
                cb(cur, "ffn_relu", il);
844
0
            } break;
845
0
        case LLM_FFN_RELU_SQR:
846
0
            {
847
0
                cur = ggml_relu(ctx0, cur);
848
0
                cb(cur, "ffn_relu", il);
849
850
0
                cur = ggml_sqr(ctx0, cur);
851
0
                cb(cur, "ffn_sqr(relu)", il);
852
0
            } break;
853
0
        case LLM_FFN_SWIGLU:
854
0
            {
855
0
                cur = ggml_swiglu(ctx0, cur);
856
0
                cb(cur, "ffn_swiglu", il);
857
0
            } break;
858
0
        case LLM_FFN_GEGLU:
859
0
            {
860
0
                cur = ggml_geglu(ctx0, cur);
861
0
                cb(cur, "ffn_geglu", il);
862
0
            } break;
863
0
        case LLM_FFN_REGLU:
864
0
            {
865
0
                cur = ggml_reglu(ctx0, cur);
866
0
                cb(cur, "ffn_reglu", il);
867
0
            } break;
868
0
        default:
869
0
            GGML_ABORT("fatal error");
870
0
    }
871
872
0
    if (gate && type_gate == LLM_FFN_PAR) {
873
0
        cur = ggml_mul(ctx0, cur, tmp);
874
0
        cb(cur, "ffn_gate_par", il);
875
0
    }
876
877
0
    if (down) {
878
0
        cur = build_lora_mm(down, cur);
879
0
        if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) {
880
            // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
881
0
            ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
882
0
        }
883
0
    }
884
885
0
    if (down_b) {
886
0
        cb(cur, "ffn_down", il);
887
0
    }
888
889
0
    if (down_b) {
890
0
        cur = ggml_add(ctx0, cur, down_b);
891
0
    }
892
893
0
    if (down_s) {
894
0
        cur = ggml_mul(ctx0, cur, down_s);
895
0
        cb(cur, "ffn_down_s", il);
896
0
    }
897
898
0
    return cur;
899
0
}
900
901
ggml_tensor * llm_graph_context::build_moe_ffn(
902
         ggml_tensor * cur,
903
         ggml_tensor * gate_inp,
904
         ggml_tensor * up_exps,
905
         ggml_tensor * gate_exps,
906
         ggml_tensor * down_exps,
907
         ggml_tensor * exp_probs_b,
908
             int64_t   n_expert,
909
             int64_t   n_expert_used,
910
     llm_ffn_op_type   type_op,
911
                bool   norm_w,
912
                bool   scale_w,
913
               float   w_scale,
914
         llama_expert_gating_func_type gating_op,
915
                 int   il,
916
0
         ggml_tensor * probs_in) const {
917
0
    return build_moe_ffn(
918
0
        cur,
919
0
        gate_inp,  /* gate_inp_b  */ nullptr,
920
0
        up_exps,   /* up_exps_b   */ nullptr,
921
0
        gate_exps, /* gate_exps_b */ nullptr,
922
0
        down_exps, /* down_exps_b */ nullptr,
923
0
        exp_probs_b,
924
0
        n_expert,
925
0
        n_expert_used,
926
0
        type_op,
927
0
        norm_w,
928
0
        scale_w,
929
0
        w_scale,
930
0
        gating_op,
931
0
        il,
932
0
        probs_in
933
0
    );
934
0
}
935
936
ggml_tensor * llm_graph_context::build_moe_ffn(
937
         ggml_tensor * cur,
938
         ggml_tensor * gate_inp,
939
         ggml_tensor * gate_inp_b,
940
         ggml_tensor * up_exps,
941
         ggml_tensor * up_exps_b,
942
         ggml_tensor * gate_exps,
943
         ggml_tensor * gate_exps_b,
944
         ggml_tensor * down_exps,
945
         ggml_tensor * down_exps_b,
946
         ggml_tensor * exp_probs_b,
947
             int64_t   n_expert,
948
             int64_t   n_expert_used,
949
     llm_ffn_op_type   type_op,
950
                bool   norm_w,
951
                bool   scale_w,
952
               float   w_scale,
953
        llama_expert_gating_func_type gating_op,
954
                 int   il,
955
0
         ggml_tensor * probs_in) const {
956
0
    const int64_t n_embd   = cur->ne[0];
957
0
    const int64_t n_tokens = cur->ne[1];
958
0
    const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; // for llama4, we apply the sigmoid-ed weights before the FFN
959
960
0
    ggml_tensor * logits = nullptr;
961
962
0
    if (probs_in == nullptr) {
963
0
        logits = build_lora_mm(gate_inp, cur); // [n_expert, n_tokens]
964
0
        cb(logits, "ffn_moe_logits", il);
965
0
    } else {
966
0
        logits = probs_in;
967
0
    }
968
969
0
    if (gate_inp_b) {
970
0
        logits = ggml_add(ctx0, logits, gate_inp_b);
971
0
        cb(logits, "ffn_moe_logits_biased", il);
972
0
    }
973
974
0
    ggml_tensor * probs = nullptr;
975
0
    switch (gating_op) {
976
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX:
977
0
            {
978
0
                probs = ggml_soft_max(ctx0, logits); // [n_expert, n_tokens]
979
0
            } break;
980
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID:
981
0
            {
982
0
                probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens]
983
0
            } break;
984
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT:
985
0
            {
986
0
                probs = logits; // [n_expert, n_tokens]
987
0
            } break;
988
0
        default:
989
0
            GGML_ABORT("fatal error");
990
0
    }
991
0
    cb(probs, "ffn_moe_probs", il);
992
993
    // add experts selection bias - introduced in DeepSeek V3
994
    // leave probs unbiased as it's later used to get expert weights
995
0
    ggml_tensor * selection_probs = probs;
996
0
    if (exp_probs_b != nullptr) {
997
0
        selection_probs = ggml_add(ctx0, probs, exp_probs_b);
998
0
        cb(selection_probs, "ffn_moe_probs_biased", il);
999
0
    }
1000
1001
    // llama4 doesn't have exp_probs_b, and sigmoid is only used after top_k
1002
    // see: https://github.com/meta-llama/llama-models/blob/699a02993512fb36936b1b0741e13c06790bcf98/models/llama4/moe.py#L183-L198
1003
0
    if (arch == LLM_ARCH_LLAMA4) {
1004
0
        selection_probs = logits;
1005
0
    }
1006
1007
0
    if (arch == LLM_ARCH_GROVEMOE) {
1008
0
        selection_probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens]
1009
0
        cb(selection_probs, "ffn_moe_probs_biased", il);
1010
0
    }
1011
1012
    // select top n_group_used expert groups
1013
    // https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/e815299b0bcbac849fa540c768ef21845365c9eb/modeling_deepseek.py#L440-L457
1014
0
    if (hparams.n_expert_groups > 1 && n_tokens > 0) {
1015
0
        const int64_t n_exp_per_group = n_expert / hparams.n_expert_groups;
1016
1017
        // organize experts into n_expert_groups
1018
0
        ggml_tensor * selection_groups = ggml_reshape_3d(ctx0, selection_probs, n_exp_per_group, hparams.n_expert_groups, n_tokens); // [n_exp_per_group, n_expert_groups, n_tokens]
1019
1020
0
        ggml_tensor * group_scores = ggml_argsort_top_k(ctx0, selection_groups, 2); // [2, n_expert_groups, n_tokens]
1021
0
        group_scores = ggml_get_rows(ctx0, ggml_reshape_4d(ctx0, selection_groups, 1, selection_groups->ne[0], selection_groups->ne[1], selection_groups->ne[2]), group_scores); // [1, 2, n_expert_groups, n_tokens]
1022
1023
        // get top n_group_used expert groups
1024
0
        group_scores = ggml_sum_rows(ctx0, ggml_reshape_3d(ctx0, group_scores, group_scores->ne[1], group_scores->ne[2], group_scores->ne[3])); // [1, n_expert_groups, n_tokens]
1025
0
        group_scores = ggml_reshape_2d(ctx0, group_scores, group_scores->ne[1], group_scores->ne[2]); // [n_expert_groups, n_tokens]
1026
1027
0
        ggml_tensor * expert_groups = ggml_argsort_top_k(ctx0, group_scores, hparams.n_group_used); // [n_group_used, n_tokens]
1028
0
        cb(expert_groups, "ffn_moe_group_topk", il);
1029
1030
        // mask out the other groups
1031
0
        selection_probs = ggml_get_rows(ctx0, selection_groups, expert_groups); // [n_exp_per_group, n_group_used, n_tokens]
1032
0
        selection_probs = ggml_set_rows(ctx0, ggml_fill(ctx0, selection_groups, -INFINITY), selection_probs, expert_groups); // [n_exp_per_group, n_expert_groups, n_tokens]
1033
0
        selection_probs = ggml_reshape_2d(ctx0, selection_probs, n_expert, n_tokens); // [n_expert, n_tokens]
1034
0
        cb(selection_probs, "ffn_moe_probs_masked", il);
1035
0
    }
1036
1037
    // select experts
1038
0
    ggml_tensor * selected_experts = ggml_argsort_top_k(ctx0, selection_probs, n_expert_used); // [n_expert_used, n_tokens]
1039
0
    cb(selected_experts->src[0], "ffn_moe_argsort", il);
1040
0
    cb(selected_experts, "ffn_moe_topk", il);
1041
1042
0
    if (arch == LLM_ARCH_GROVEMOE && n_expert != hparams.n_expert) {
1043
        // TODO: Use scalar div instead when/if implemented
1044
0
        ggml_tensor * f_sel = ggml_cast(ctx0, selected_experts, GGML_TYPE_F32);
1045
0
        selected_experts = ggml_cast(ctx0, ggml_scale(ctx0, f_sel, 1.0f / float(hparams.n_group_experts)), GGML_TYPE_I32);
1046
0
        probs = ggml_reshape_3d(ctx0, probs, 1, hparams.n_expert, n_tokens);
1047
0
    } else {
1048
0
        probs = ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens);
1049
0
    }
1050
1051
0
    ggml_tensor * weights = ggml_get_rows(ctx0, probs, selected_experts); // [1, n_expert_used, n_tokens]
1052
0
    cb(weights, "ffn_moe_weights", il);
1053
1054
1055
0
    if (gating_op == LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT) {
1056
0
        weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens);
1057
0
        weights = ggml_soft_max(ctx0, weights); // [n_expert_used, n_tokens]
1058
0
        weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens);
1059
0
        cb(weights, "ffn_moe_weights_softmax", il);
1060
0
    }
1061
1062
0
    if (norm_w) {
1063
0
        weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens);
1064
1065
0
        ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens]
1066
0
        cb(weights_sum, "ffn_moe_weights_sum", il);
1067
1068
        // Avoid division by zero, clamp to smallest number representable by F16
1069
0
        weights_sum = ggml_clamp(ctx0, weights_sum, 6.103515625e-5, INFINITY);
1070
0
        cb(weights_sum, "ffn_moe_weights_sum_clamped", il);
1071
1072
0
        weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens]
1073
0
        cb(weights, "ffn_moe_weights_norm", il);
1074
1075
0
        weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens);
1076
0
    }
1077
0
    if (scale_w) {
1078
0
        weights = ggml_scale(ctx0, weights, w_scale);
1079
0
        cb(weights, "ffn_moe_weights_scaled", il);
1080
0
    }
1081
1082
    //call early so that topk-moe can be used
1083
0
    ggml_build_forward_expand(gf, weights);
1084
1085
0
    cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens);
1086
1087
0
    if (weight_before_ffn) {
1088
        // repeat cur to [n_embd, n_expert_used, n_tokens]
1089
0
        ggml_tensor * repeated = ggml_repeat_4d(ctx0, cur, n_embd, n_expert_used, n_tokens, 1);
1090
0
        cur = ggml_mul(ctx0, repeated, weights);
1091
0
        cb(cur, "ffn_moe_weighted", il);
1092
0
    }
1093
1094
0
    ggml_tensor * up = build_lora_mm_id(up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
1095
0
    cb(up, "ffn_moe_up", il);
1096
1097
0
    if (up_exps_b) {
1098
0
        up = ggml_add_id(ctx0, up, up_exps_b, selected_experts);
1099
0
        cb(up, "ffn_moe_up_biased", il);
1100
0
    }
1101
1102
0
    ggml_tensor * experts = nullptr;
1103
0
    if (gate_exps) {
1104
0
        cur = build_lora_mm_id(gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
1105
0
        cb(cur, "ffn_moe_gate", il);
1106
0
    } else {
1107
0
        cur = up;
1108
0
    }
1109
1110
0
    if (gate_exps_b) {
1111
0
        cur = ggml_add_id(ctx0, cur, gate_exps_b, selected_experts);
1112
0
        cb(cur, "ffn_moe_gate_biased", il);
1113
0
    }
1114
1115
0
    switch (type_op) {
1116
0
        case LLM_FFN_SILU:
1117
0
            if (gate_exps) {
1118
0
                cur = ggml_swiglu_split(ctx0, cur, up);
1119
0
                cb(cur, "ffn_moe_swiglu", il);
1120
0
            } else {
1121
0
                cur = ggml_silu(ctx0, cur);
1122
0
                cb(cur, "ffn_moe_silu", il);
1123
0
            } break;
1124
0
        case LLM_FFN_GELU:
1125
0
            if (gate_exps) {
1126
0
                cur = ggml_geglu_split(ctx0, cur, up);
1127
0
                cb(cur, "ffn_moe_geglu", il);
1128
0
            } else {
1129
0
                cur = ggml_gelu(ctx0, cur);
1130
0
                cb(cur, "ffn_moe_gelu", il);
1131
0
            } break;
1132
0
        case LLM_FFN_SWIGLU_OAI_MOE:
1133
0
            {
1134
                // TODO: move to hparams?
1135
0
                constexpr float alpha = 1.702f;
1136
0
                constexpr float limit = 7.0f;
1137
0
                cur = ggml_swiglu_oai(ctx0, cur, up, alpha, limit);
1138
0
                cb(cur, "ffn_moe_swiglu_oai", il);
1139
0
            } break;
1140
0
        case LLM_FFN_RELU:
1141
0
            if (gate_exps) {
1142
0
                cur = ggml_reglu_split(ctx0, cur, up);
1143
0
                cb(cur, "ffn_moe_reglu", il);
1144
0
            } else {
1145
0
                cur = ggml_relu(ctx0, cur);
1146
0
                cb(cur, "ffn_moe_relu", il);
1147
0
            } break;
1148
0
        case LLM_FFN_RELU_SQR:
1149
0
            if (gate_exps) {
1150
                // TODO: add support for gated squared relu
1151
0
                GGML_ABORT("fatal error: gated squared relu not implemented");
1152
0
            } else {
1153
0
                cur = ggml_relu(ctx0, cur);
1154
0
                cur = ggml_sqr(ctx0, cur);
1155
0
                cb(cur, "ffn_moe_relu_sqr", il);
1156
0
            } break;
1157
0
        default:
1158
0
            GGML_ABORT("fatal error");
1159
0
    }
1160
1161
0
    experts = build_lora_mm_id(down_exps, cur, selected_experts); // [n_embd, n_expert_used, n_tokens]
1162
0
    cb(experts, "ffn_moe_down", il);
1163
1164
0
    if (down_exps_b) {
1165
0
        experts = ggml_add_id(ctx0, experts, down_exps_b, selected_experts);
1166
0
        cb(experts, "ffn_moe_down_biased", il);
1167
0
    }
1168
1169
0
    if (!weight_before_ffn) {
1170
0
        experts = ggml_mul(ctx0, experts, weights);
1171
0
        cb(cur, "ffn_moe_weighted", il);
1172
0
    }
1173
1174
0
    ggml_tensor * cur_experts[LLAMA_MAX_EXPERTS] = { nullptr };
1175
1176
0
    assert(n_expert_used > 0);
1177
1178
    // order the views before the adds
1179
0
    for (uint32_t i = 0; i < hparams.n_expert_used; ++i) {
1180
0
        cur_experts[i] = ggml_view_2d(ctx0, experts, n_embd, n_tokens, experts->nb[2], i*experts->nb[1]);
1181
1182
0
        ggml_build_forward_expand(gf, cur_experts[i]);
1183
0
    }
1184
1185
    // aggregate experts
1186
    // note: here we explicitly use hparams.n_expert_used instead of n_expert_used
1187
    //       to avoid potentially a large number of add nodes during warmup
1188
    //       ref: https://github.com/ggml-org/llama.cpp/pull/14753
1189
0
    ggml_tensor * moe_out = cur_experts[0];
1190
1191
0
    for (uint32_t i = 1; i < hparams.n_expert_used; ++i) {
1192
0
        moe_out = ggml_add(ctx0, moe_out, cur_experts[i]);
1193
0
    }
1194
1195
0
    if (hparams.n_expert_used == 1) {
1196
        // avoid returning a non-contiguous tensor
1197
0
        moe_out = ggml_cont(ctx0, moe_out);
1198
0
    }
1199
1200
0
    cb(moe_out, "ffn_moe_out", il);
1201
1202
0
    return moe_out;
1203
0
}
1204
1205
// input embeddings with optional lora
1206
0
ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const {
1207
0
    const int64_t n_embd = hparams.n_embd_inp();
1208
1209
0
    auto inp = std::make_unique<llm_graph_input_embd>();
1210
1211
0
    ggml_tensor * cur = nullptr;
1212
1213
0
    if (ubatch.token) {
1214
0
        inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens);
1215
        //cb(inp->tokens, "inp_tokens", -1);
1216
0
        ggml_set_input(inp->tokens);
1217
0
        res->t_tokens = inp->tokens;
1218
1219
0
        cur = ggml_get_rows(ctx0, tok_embd, inp->tokens);
1220
1221
        // apply lora for embedding tokens if needed
1222
0
        for (const auto & lora : *loras) {
1223
0
            llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd);
1224
0
            if (lw == nullptr) {
1225
0
                continue;
1226
0
            }
1227
1228
0
            const float adapter_scale = lora.second;
1229
0
            const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
1230
1231
0
            ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat(
1232
0
                        ctx0, lw->b, // non-transposed lora_b
1233
0
                        ggml_get_rows(ctx0, lw->a, inp->tokens)
1234
0
                        ), scale);
1235
1236
0
            cur = ggml_add(ctx0, cur, inpL_delta);
1237
0
        }
1238
0
    } else {
1239
0
        inp->embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, ubatch.n_tokens);
1240
0
        ggml_set_input(inp->embd);
1241
1242
0
        cur = inp->embd;
1243
0
    }
1244
1245
    // For Granite architecture
1246
0
    if (hparams.f_embedding_scale != 0.0f) {
1247
0
        cur = ggml_scale(ctx0, cur, hparams.f_embedding_scale);
1248
0
    }
1249
1250
0
    cb(cur, "inp_embd", -1);
1251
1252
0
    res->add_input(std::move(inp));
1253
1254
0
    return cur;
1255
0
}
1256
1257
0
ggml_tensor * llm_graph_context::build_inp_pos() const {
1258
0
    auto inp = std::make_unique<llm_graph_input_pos>(hparams.n_pos_per_embd());
1259
1260
0
    auto & cur = inp->pos;
1261
1262
0
    cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd());
1263
0
    ggml_set_input(cur);
1264
1265
0
    res->add_input(std::move(inp));
1266
1267
0
    return cur;
1268
0
}
1269
1270
0
ggml_tensor * llm_graph_context::build_inp_attn_scale() const {
1271
0
    auto inp = std::make_unique<llm_graph_input_attn_temp>(hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale, hparams.f_attn_temp_offset);
1272
1273
0
    auto & cur = inp->attn_scale;
1274
1275
    // this need to be 1x1xN for broadcasting
1276
0
    cur = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 1, 1, n_tokens);
1277
0
    ggml_set_input(cur);
1278
1279
0
    res->add_input(std::move(inp));
1280
1281
0
    return cur;
1282
0
}
1283
1284
0
ggml_tensor * llm_graph_context::build_inp_out_ids() const {
1285
    // note: when all tokens are output, we could skip this optimization to spare the ggml_get_rows() calls,
1286
    //       but this would make the graph topology depend on the number of output tokens, which can interere with
1287
    //       features that require constant topology such as pipline parallelism
1288
    //       ref: https://github.com/ggml-org/llama.cpp/pull/14275#issuecomment-2987424471
1289
    //if (n_outputs < n_tokens) {
1290
    //    return nullptr;
1291
    //}
1292
1293
0
    auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs);
1294
1295
0
    auto & cur = inp->out_ids;
1296
1297
0
    cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs);
1298
0
    ggml_set_input(cur);
1299
1300
0
    res->add_input(std::move(inp));
1301
1302
0
    return cur;
1303
0
}
1304
1305
0
ggml_tensor * llm_graph_context::build_inp_mean() const {
1306
0
    auto inp = std::make_unique<llm_graph_input_mean>(cparams);
1307
1308
0
    auto & cur = inp->mean;
1309
1310
0
    cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq);
1311
0
    ggml_set_input(cur);
1312
1313
0
    res->add_input(std::move(inp));
1314
1315
0
    return cur;
1316
0
}
1317
1318
0
ggml_tensor * llm_graph_context::build_inp_cls() const {
1319
0
    auto inp = std::make_unique<llm_graph_input_cls>(cparams, arch);
1320
1321
0
    auto & cur = inp->cls;
1322
1323
0
    cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq);
1324
0
    ggml_set_input(cur);
1325
1326
0
    res->add_input(std::move(inp));
1327
1328
0
    return cur;
1329
0
}
1330
1331
0
ggml_tensor * llm_graph_context::build_inp_cross_embd() const {
1332
0
    auto inp = std::make_unique<llm_graph_input_cross_embd>(cross);
1333
1334
0
    auto & cur = inp->cross_embd;
1335
1336
    // if we have the output embeddings from the encoder, use them directly
1337
    // TODO: needs more work to be correct, for now just use the tensor shape
1338
    //if (cross->t_embd) {
1339
    //    cur = ggml_view_tensor(ctx0, cross->t_embd);
1340
1341
    //    return cur;
1342
    //}
1343
1344
0
    const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd_inp();
1345
0
    const auto n_enc  = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
1346
1347
0
    cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc);
1348
0
    ggml_set_input(cur);
1349
1350
0
    res->add_input(std::move(inp));
1351
1352
0
    return cur;
1353
0
}
1354
1355
0
ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const {
1356
0
    auto inp = std::make_unique<llm_graph_input_pos_bucket>(hparams);
1357
1358
0
    auto & cur = inp->pos_bucket;
1359
1360
0
    cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens);
1361
0
    ggml_set_input(cur);
1362
1363
0
    res->add_input(std::move(inp));
1364
1365
0
    return cur;
1366
0
}
1367
1368
0
ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const {
1369
0
    const auto * mctx_cur = static_cast<const llama_kv_cache_context *>(mctx);
1370
1371
0
    auto inp = std::make_unique<llm_graph_input_pos_bucket_kv>(hparams, mctx_cur);
1372
1373
0
    const auto n_kv = mctx_cur->get_n_kv();
1374
1375
0
    auto & cur = inp->pos_bucket;
1376
1377
0
    cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
1378
0
    ggml_set_input(cur);
1379
1380
0
    res->add_input(std::move(inp));
1381
1382
0
    return cur;
1383
0
}
1384
1385
0
ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const {
1386
0
    ggml_tensor * pos_bucket_1d = ggml_reshape_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1]);
1387
0
    cb(pos_bucket_1d, "pos_bucket_1d", -1);
1388
1389
0
    ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d);
1390
1391
0
    pos_bias = ggml_reshape_3d(ctx0, pos_bias, pos_bias->ne[0], pos_bucket->ne[0], pos_bucket->ne[1]);
1392
0
    pos_bias = ggml_permute   (ctx0, pos_bias, 2, 0, 1, 3);
1393
0
    pos_bias = ggml_cont      (ctx0, pos_bias);
1394
1395
0
    cb(pos_bias, "pos_bias", -1);
1396
1397
0
    return pos_bias;
1398
0
}
1399
1400
ggml_tensor * llm_graph_context::build_attn_mha(
1401
         ggml_tensor * q,
1402
         ggml_tensor * k,
1403
         ggml_tensor * v,
1404
         ggml_tensor * kq_b,
1405
         ggml_tensor * kq_mask,
1406
         ggml_tensor * sinks,
1407
         ggml_tensor * v_mla,
1408
               float   kq_scale,
1409
0
                 int   il) const {
1410
0
    const bool v_trans = v->nb[1] > v->nb[2];
1411
1412
    // split the batch into streams if needed
1413
0
    const auto n_stream = k->ne[3];
1414
1415
0
    q = ggml_view_4d(ctx0, q, q->ne[0], q->ne[1], q->ne[2]/n_stream, n_stream, q->nb[1], q->nb[2], q->nb[3]/n_stream, 0);
1416
1417
0
    q = ggml_permute(ctx0, q, 0, 2, 1, 3);
1418
0
    k = ggml_permute(ctx0, k, 0, 2, 1, 3);
1419
0
    v = ggml_permute(ctx0, v, 0, 2, 1, 3);
1420
1421
0
    ggml_tensor * cur;
1422
1423
0
    if (cparams.flash_attn && kq_b == nullptr) {
1424
0
        GGML_ASSERT(kq_b == nullptr && "Flash attention does not support KQ bias yet");
1425
1426
0
        if (v_trans) {
1427
0
            v = ggml_transpose(ctx0, v);
1428
0
        }
1429
1430
        // this can happen when KV cache is not used (e.g. an embedding model with non-causal attn)
1431
0
        if (k->type == GGML_TYPE_F32) {
1432
0
            k = ggml_cast(ctx0, k, GGML_TYPE_F16);
1433
0
        }
1434
1435
0
        if (v->type == GGML_TYPE_F32) {
1436
0
            v = ggml_cast(ctx0, v, GGML_TYPE_F16);
1437
0
        }
1438
1439
0
        cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias,
1440
0
                                  hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
1441
0
        cb(cur, LLAMA_TENSOR_NAME_FATTN, il);
1442
1443
0
        ggml_flash_attn_ext_add_sinks(cur, sinks);
1444
0
        ggml_flash_attn_ext_set_prec (cur, GGML_PREC_F32);
1445
1446
0
        if (v_mla) {
1447
#if 0
1448
            // v_mla can be applied as a matrix-vector multiplication with broadcasting across dimension 3 == n_tokens.
1449
            // However, the code is optimized for dimensions 0 and 1 being large, so this is ineffient.
1450
            cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens);
1451
            cur = ggml_mul_mat(ctx0, v_mla, cur);
1452
#else
1453
            // It's preferable to do the calculation as a matrix-matrix multiplication with n_tokens in dimension 1.
1454
            // The permutations are noops and only change how the tensor data is interpreted.
1455
0
            cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
1456
0
            cur = ggml_mul_mat(ctx0, v_mla, cur);
1457
0
            cb(cur, "fattn_mla", il);
1458
0
            cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
1459
0
            cur = ggml_cont(ctx0, cur); // Needed because ggml_reshape_2d expects contiguous inputs.
1460
0
#endif
1461
0
        }
1462
1463
0
        cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
1464
0
    } else {
1465
0
        ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
1466
0
        cb(kq, "kq", il);
1467
1468
        // note: this op tends to require high floating point range
1469
        //       while for some models F16 is enough, for others it is not, so we default to F32 here
1470
0
        ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
1471
1472
0
        if (arch == LLM_ARCH_GROK) {
1473
            // need to do the following:
1474
            // multiply by attn_output_multiplier
1475
            // and then :
1476
            // kq = 30 * tanh(kq / 30)
1477
            // before the softmax below
1478
1479
0
            kq = ggml_tanh(ctx0, ggml_scale(ctx0, kq, hparams.f_attn_out_scale / hparams.f_attn_logit_softcapping));
1480
0
            cb(kq, "kq_tanh", il);
1481
0
            kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping);
1482
0
            cb(kq, "kq_scaled", il);
1483
0
        }
1484
1485
0
        if (hparams.attn_soft_cap) {
1486
0
            kq = ggml_scale(ctx0, kq, 1.0f / hparams.f_attn_logit_softcapping);
1487
0
            cb(kq, "kq_scaled_1", il);
1488
0
            kq = ggml_tanh (ctx0, kq);
1489
0
            cb(kq, "kq_tanh", il);
1490
0
            kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping);
1491
0
            cb(kq, "kq_scaled_2", il);
1492
0
        }
1493
1494
0
        if (kq_b) {
1495
0
            kq = ggml_add(ctx0, kq, kq_b);
1496
0
            cb(kq, "kq_plus_kq_b", il);
1497
0
        }
1498
1499
0
        kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
1500
0
        ggml_soft_max_add_sinks(kq, sinks);
1501
0
        cb(kq, "kq_soft_max", il);
1502
1503
0
        if (!v_trans) {
1504
            // note: avoid this branch
1505
0
            v = ggml_cont(ctx0, ggml_transpose(ctx0, v));
1506
0
            cb(v, "v_cont", il);
1507
0
        }
1508
1509
0
        ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
1510
0
        cb(kqv, "kqv", il);
1511
1512
        // for MLA with the absorption optimization, we need to "decompress" from MQA back to MHA
1513
0
        if (v_mla) {
1514
0
            kqv = ggml_mul_mat(ctx0, v_mla, kqv);
1515
0
            cb(kqv, "kqv_mla", il);
1516
0
        }
1517
1518
0
        cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
1519
1520
        // recombine streams
1521
0
        cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
1522
1523
0
        if (!cparams.offload_kqv) {
1524
            // all nodes between the KV store and the attention output are run on the CPU
1525
0
            ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu);
1526
0
        }
1527
0
    }
1528
1529
0
    ggml_build_forward_expand(gf, cur);
1530
1531
0
    return cur;
1532
0
}
1533
1534
0
llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() const {
1535
0
    auto inp = std::make_unique<llm_graph_input_attn_no_cache>(hparams, cparams);
1536
1537
    // note: there is no KV cache, so the number of KV values is equal to the number of tokens in the batch
1538
0
    inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens, 1, 1);
1539
0
    ggml_set_input(inp->self_kq_mask);
1540
1541
0
    inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
1542
1543
0
    if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
1544
0
        inp->self_kq_mask_swa = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens, 1, 1);
1545
0
        ggml_set_input(inp->self_kq_mask_swa);
1546
1547
0
        inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
1548
0
    } else {
1549
0
        inp->self_kq_mask_swa     = nullptr;
1550
0
        inp->self_kq_mask_swa_cnv = nullptr;
1551
0
    }
1552
1553
0
    return (llm_graph_input_attn_no_cache *) res->add_input(std::move(inp));
1554
0
}
1555
1556
ggml_tensor * llm_graph_context::build_attn(
1557
        llm_graph_input_attn_no_cache * inp,
1558
        ggml_tensor * wo,
1559
        ggml_tensor * wo_b,
1560
        ggml_tensor * q_cur,
1561
        ggml_tensor * k_cur,
1562
        ggml_tensor * v_cur,
1563
        ggml_tensor * kq_b,
1564
        ggml_tensor * sinks,
1565
        ggml_tensor * v_mla,
1566
            float     kq_scale,
1567
0
            int       il) const {
1568
0
    GGML_UNUSED(n_tokens);
1569
1570
    // these nodes are added to the graph together so that they are not reordered
1571
    // by doing so, the number of splits in the graph is reduced
1572
0
    ggml_build_forward_expand(gf, q_cur);
1573
0
    ggml_build_forward_expand(gf, k_cur);
1574
0
    ggml_build_forward_expand(gf, v_cur);
1575
1576
0
    const bool is_swa = hparams.is_swa(il);
1577
1578
0
    const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask();
1579
1580
    // [TAG_NO_CACHE_PAD]
1581
    // TODO: if ubatch.equal_seqs() == true, we can split the three tensors below into ubatch.n_seqs_unq streams
1582
    //       but it might not be worth it: https://github.com/ggml-org/llama.cpp/pull/15636
1583
    //assert(!ubatch.equal_seqs() || (k_cur->ne[3] == 1 && k_cur->ne[3] == ubatch.n_seqs_unq));
1584
1585
0
    ggml_tensor * q = q_cur;
1586
0
    ggml_tensor * k = k_cur;
1587
0
    ggml_tensor * v = v_cur;
1588
1589
0
    ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
1590
0
    cb(cur, "kqv_out", il);
1591
1592
0
    if (wo) {
1593
0
        cur = build_lora_mm(wo, cur);
1594
0
    }
1595
1596
0
    if (wo_b) {
1597
        //cb(cur, "kqv_wo", il);
1598
0
    }
1599
1600
0
    if (wo_b) {
1601
0
        cur = ggml_add(ctx0, cur, wo_b);
1602
0
    }
1603
1604
0
    return cur;
1605
0
}
1606
1607
static std::unique_ptr<llm_graph_input_attn_kv> build_attn_inp_kv_impl(
1608
           ggml_context * ctx0,
1609
     const llama_ubatch & ubatch,
1610
    const llama_hparams & hparams,
1611
    const llama_cparams & cparams,
1612
0
    const llama_kv_cache_context * mctx_cur) {
1613
1614
0
    auto inp = std::make_unique<llm_graph_input_attn_kv>(hparams, cparams, mctx_cur);
1615
1616
0
    {
1617
0
        GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_iswa for SWA");
1618
1619
0
        const auto n_kv     = mctx_cur->get_n_kv();
1620
0
        const auto n_tokens = ubatch.n_tokens;
1621
0
        const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq;
1622
1623
0
        inp->self_k_idxs = mctx_cur->build_input_k_idxs(ctx0, ubatch);
1624
0
        inp->self_v_idxs = mctx_cur->build_input_v_idxs(ctx0, ubatch);
1625
1626
0
        inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, n_tokens/n_stream, 1, n_stream);
1627
0
        ggml_set_input(inp->self_kq_mask);
1628
1629
0
        inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
1630
0
    }
1631
1632
0
    return inp;
1633
0
}
1634
1635
0
llm_graph_input_attn_kv * llm_graph_context::build_attn_inp_kv() const {
1636
0
    const auto * mctx_cur = static_cast<const llama_kv_cache_context *>(mctx);
1637
1638
0
    auto inp = build_attn_inp_kv_impl(ctx0, ubatch, hparams, cparams, mctx_cur);
1639
1640
0
    return (llm_graph_input_attn_kv *) res->add_input(std::move(inp));
1641
0
}
1642
1643
ggml_tensor * llm_graph_context::build_attn(
1644
        llm_graph_input_attn_kv * inp,
1645
        ggml_tensor * wo,
1646
        ggml_tensor * wo_b,
1647
        ggml_tensor * q_cur,
1648
        ggml_tensor * k_cur,
1649
        ggml_tensor * v_cur,
1650
        ggml_tensor * kq_b,
1651
        ggml_tensor * sinks,
1652
        ggml_tensor * v_mla,
1653
            float     kq_scale,
1654
0
            int       il) const {
1655
    // these nodes are added to the graph together so that they are not reordered
1656
    // by doing so, the number of splits in the graph is reduced
1657
    // expand k later to enable rope fusion which directly writes into k-v cache
1658
0
    ggml_build_forward_expand(gf, q_cur);
1659
0
    ggml_build_forward_expand(gf, v_cur);
1660
0
    ggml_build_forward_expand(gf, k_cur);
1661
1662
0
    const auto * mctx_cur = inp->mctx;
1663
1664
    // store to KV cache
1665
0
    {
1666
0
        const auto & k_idxs = inp->get_k_idxs();
1667
0
        const auto & v_idxs = inp->get_v_idxs();
1668
1669
0
        ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il));
1670
0
        ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il));
1671
0
    }
1672
1673
0
    const auto & kq_mask = inp->get_kq_mask();
1674
1675
0
    ggml_tensor * q = q_cur;
1676
0
    ggml_tensor * k = mctx_cur->get_k(ctx0, il);
1677
0
    ggml_tensor * v = mctx_cur->get_v(ctx0, il);
1678
1679
0
    ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
1680
0
    cb(cur, "kqv_out", il);
1681
1682
0
    if (wo) {
1683
0
        cur = build_lora_mm(wo, cur);
1684
0
        if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) {
1685
            // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
1686
0
            ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
1687
0
        }
1688
0
    }
1689
1690
0
    if (wo_b) {
1691
0
        cur = ggml_add(ctx0, cur, wo_b);
1692
0
    }
1693
1694
0
    return cur;
1695
0
}
1696
1697
ggml_tensor * llm_graph_context::build_attn(
1698
        llm_graph_input_attn_kv_iswa * inp,
1699
        ggml_tensor * wo,
1700
        ggml_tensor * wo_b,
1701
        ggml_tensor * q_cur,
1702
        ggml_tensor * k_cur,
1703
        ggml_tensor * v_cur,
1704
        ggml_tensor * kq_b,
1705
        ggml_tensor * sinks,
1706
        ggml_tensor * v_mla,
1707
            float     kq_scale,
1708
0
            int       il) const {
1709
    // these nodes are added to the graph together so that they are not reordered
1710
    // by doing so, the number of splits in the graph is reduced
1711
0
    ggml_build_forward_expand(gf, q_cur);
1712
1713
0
    if (k_cur) {
1714
0
        ggml_build_forward_expand(gf, k_cur);
1715
0
    }
1716
1717
0
    if (v_cur) {
1718
0
        ggml_build_forward_expand(gf, v_cur);
1719
0
    }
1720
1721
0
    const auto * mctx_iswa = inp->mctx;
1722
1723
0
    const bool is_swa = hparams.is_swa(il);
1724
1725
0
    const auto * mctx_cur = is_swa ? mctx_iswa->get_swa() : mctx_iswa->get_base();
1726
1727
    // optionally store to KV cache
1728
0
    if (k_cur) {
1729
0
        const auto & k_idxs = is_swa ? inp->get_k_idxs_swa() : inp->get_k_idxs();
1730
1731
0
        ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il));
1732
0
    }
1733
1734
0
    if (v_cur) {
1735
0
        const auto & v_idxs = is_swa ? inp->get_v_idxs_swa() : inp->get_v_idxs();
1736
1737
0
        ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il));
1738
0
    }
1739
1740
0
    const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask();
1741
1742
0
    ggml_tensor * q = q_cur;
1743
0
    ggml_tensor * k = mctx_cur->get_k(ctx0, il);
1744
0
    ggml_tensor * v = mctx_cur->get_v(ctx0, il);
1745
1746
0
    ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
1747
0
    cb(cur, "kqv_out", il);
1748
1749
0
    if (wo) {
1750
0
        cur = build_lora_mm(wo, cur);
1751
0
    }
1752
1753
0
    if (wo_b) {
1754
        //cb(cur, "kqv_wo", il);
1755
0
    }
1756
1757
0
    if (wo_b) {
1758
0
        cur = ggml_add(ctx0, cur, wo_b);
1759
0
    }
1760
1761
0
    return cur;
1762
0
}
1763
1764
0
llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const {
1765
0
    auto inp = std::make_unique<llm_graph_input_attn_cross>(cross);
1766
1767
0
    const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
1768
1769
0
    inp->cross_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_enc, n_tokens, 1, 1);
1770
0
    ggml_set_input(inp->cross_kq_mask);
1771
1772
0
    inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask;
1773
1774
0
    return (llm_graph_input_attn_cross *) res->add_input(std::move(inp));
1775
0
}
1776
1777
ggml_tensor * llm_graph_context::build_attn(
1778
        llm_graph_input_attn_cross * inp,
1779
        ggml_tensor * wo,
1780
        ggml_tensor * wo_b,
1781
        ggml_tensor * q_cur,
1782
        ggml_tensor * k_cur,
1783
        ggml_tensor * v_cur,
1784
        ggml_tensor * kq_b,
1785
        ggml_tensor * sinks,
1786
        ggml_tensor * v_mla,
1787
            float     kq_scale,
1788
0
            int       il) const {
1789
    // these nodes are added to the graph together so that they are not reordered
1790
    // by doing so, the number of splits in the graph is reduced
1791
0
    ggml_build_forward_expand(gf, q_cur);
1792
0
    ggml_build_forward_expand(gf, k_cur);
1793
0
    ggml_build_forward_expand(gf, v_cur);
1794
1795
0
    const auto & kq_mask = inp->get_kq_mask_cross();
1796
1797
0
    ggml_tensor * q = q_cur;
1798
0
    ggml_tensor * k = k_cur;
1799
0
    ggml_tensor * v = v_cur;
1800
1801
0
    ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
1802
0
    cb(cur, "kqv_out", il);
1803
1804
0
    if (wo) {
1805
0
        cur = build_lora_mm(wo, cur);
1806
0
    }
1807
1808
0
    if (wo_b) {
1809
        //cb(cur, "kqv_wo", il);
1810
0
    }
1811
1812
0
    if (wo_b) {
1813
0
        cur = ggml_add(ctx0, cur, wo_b);
1814
0
    }
1815
1816
0
    return cur;
1817
0
}
1818
1819
// TODO: maybe separate the inner implementation into a separate function
1820
//       like with the non-sliding window equivalent
1821
//       once sliding-window hybrid caches are a thing.
1822
0
llm_graph_input_attn_kv_iswa * llm_graph_context::build_attn_inp_kv_iswa() const {
1823
0
    const auto * mctx_cur = static_cast<const llama_kv_cache_iswa_context *>(mctx);
1824
1825
0
    auto inp = std::make_unique<llm_graph_input_attn_kv_iswa>(hparams, cparams, mctx_cur);
1826
1827
0
    const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq;
1828
1829
0
    {
1830
0
        const auto n_kv = mctx_cur->get_base()->get_n_kv();
1831
1832
0
        inp->self_k_idxs = mctx_cur->get_base()->build_input_k_idxs(ctx0, ubatch);
1833
0
        inp->self_v_idxs = mctx_cur->get_base()->build_input_v_idxs(ctx0, ubatch);
1834
1835
0
        inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, n_tokens/n_stream, 1, n_stream);
1836
0
        ggml_set_input(inp->self_kq_mask);
1837
1838
0
        inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
1839
0
    }
1840
1841
0
    {
1842
0
        GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache for non-SWA");
1843
1844
0
        const auto n_kv = mctx_cur->get_swa()->get_n_kv();
1845
1846
0
        inp->self_k_idxs_swa = mctx_cur->get_swa()->build_input_k_idxs(ctx0, ubatch);
1847
0
        inp->self_v_idxs_swa = mctx_cur->get_swa()->build_input_v_idxs(ctx0, ubatch);
1848
1849
0
        inp->self_kq_mask_swa = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, n_tokens/n_stream, 1, n_stream);
1850
0
        ggml_set_input(inp->self_kq_mask_swa);
1851
1852
0
        inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
1853
0
    }
1854
1855
0
    return (llm_graph_input_attn_kv_iswa *) res->add_input(std::move(inp));
1856
0
}
1857
1858
ggml_tensor * llm_graph_context::build_rs(
1859
        ggml_tensor * s,
1860
        ggml_tensor * state_copy_main,
1861
        ggml_tensor * state_copy_extra,
1862
            int32_t   state_size,
1863
            int32_t   n_seqs,
1864
           uint32_t   n_rs,
1865
           uint32_t   rs_head,
1866
           uint32_t   rs_size,
1867
            int32_t   rs_zero,
1868
0
        const llm_graph_get_rows_fn & get_state_rows) const {
1869
1870
0
    ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, rs_size);
1871
1872
    // Clear a single state which will then be copied to the other cleared states.
1873
    // Note that this is a no-op when the view is zero-sized.
1874
0
    ggml_tensor * state_zero = ggml_view_1d(ctx0, states, state_size*(rs_zero >= 0), rs_zero*states->nb[1]*(rs_zero >= 0));
1875
0
    ggml_build_forward_expand(gf, ggml_scale_inplace(ctx0, state_zero, 0));
1876
1877
    // copy states
1878
    // NOTE: assuming the copy destinations are ALL contained between rs_head and rs_head + n_rs
1879
    // {state_size, rs_size} -> {state_size, n_seqs}
1880
0
    ggml_tensor * output_states = get_state_rows(ctx0, states, state_copy_main);
1881
0
    ggml_build_forward_expand(gf, output_states);
1882
1883
    // copy extra states which won't be changed further (between n_seqs and n_rs)
1884
0
    ggml_tensor * states_extra = ggml_get_rows(ctx0, states, state_copy_extra);
1885
0
    ggml_build_forward_expand(gf,
1886
0
        ggml_cpy(ctx0,
1887
0
            states_extra,
1888
0
            ggml_view_1d(ctx0, s, state_size*(n_rs - n_seqs), (rs_head + n_seqs)*state_size*ggml_element_size(s))));
1889
1890
0
    return output_states;
1891
0
}
1892
1893
static std::unique_ptr<llm_graph_input_rs> build_rs_inp_impl(
1894
           ggml_context * ctx0,
1895
     const llama_ubatch & ubatch,
1896
0
    const llama_memory_recurrent_context * mctx_cur) {
1897
1898
0
    auto inp = std::make_unique<llm_graph_input_rs>(mctx_cur);
1899
1900
0
    const int64_t n_rs   = mctx_cur->get_n_rs();
1901
0
    const int64_t n_seqs = ubatch.n_seqs;
1902
1903
0
    inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs);
1904
0
    ggml_set_input(inp->s_copy);
1905
1906
0
    inp->s_copy_main  = ggml_view_1d(ctx0, inp->s_copy, n_seqs, 0);
1907
0
    inp->s_copy_extra = ggml_view_1d(ctx0, inp->s_copy, n_rs - n_seqs, n_seqs * inp->s_copy->nb[0]);
1908
1909
0
    inp->head = mctx_cur->get_head();
1910
0
    inp->rs_z = mctx_cur->get_rs_z();
1911
1912
0
    return inp;
1913
0
}
1914
1915
0
llm_graph_input_rs * llm_graph_context::build_rs_inp() const {
1916
0
    const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
1917
1918
0
    auto inp = build_rs_inp_impl(ctx0, ubatch, mctx_cur);
1919
1920
0
    return (llm_graph_input_rs *) res->add_input(std::move(inp));
1921
0
}
1922
1923
ggml_tensor * llm_graph_context::build_rs(
1924
        llm_graph_input_rs * inp,
1925
        ggml_tensor * s,
1926
            int32_t   state_size,
1927
            int32_t   n_seqs,
1928
0
        const llm_graph_get_rows_fn & get_state_rows) const {
1929
0
    const auto * kv_state = inp->mctx;
1930
1931
0
    return build_rs(s, inp->s_copy_main, inp->s_copy_extra, state_size, n_seqs,
1932
0
                    kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(),
1933
0
                    get_state_rows);
1934
0
}
1935
1936
ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
1937
    llm_graph_input_rs * inp,
1938
    const llama_ubatch & ubatch,
1939
0
                   int   il) const {
1940
0
    const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
1941
1942
0
    const auto token_shift_count = hparams.token_shift_count;
1943
1944
0
    const int64_t n_seqs  = ubatch.n_seqs;
1945
1946
0
    ggml_tensor * token_shift_all = mctx_cur->get_r_l(il);
1947
1948
0
    ggml_tensor * token_shift = build_rs(
1949
0
            inp, token_shift_all,
1950
0
            hparams.n_embd_r(), n_seqs);
1951
1952
0
    token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs);
1953
1954
0
    return token_shift;
1955
0
}
1956
1957
ggml_tensor * llm_graph_context::build_rwkv_token_shift_store(
1958
         ggml_tensor * token_shift,
1959
  const llama_ubatch & ubatch,
1960
0
                 int   il) const {
1961
0
    const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
1962
1963
0
    const auto token_shift_count = hparams.token_shift_count;
1964
0
    const auto n_embd = hparams.n_embd;
1965
1966
0
    const int64_t n_seqs = ubatch.n_seqs;
1967
1968
0
    const auto kv_head = mctx_cur->get_head();
1969
1970
0
    return ggml_cpy(
1971
0
        ctx0,
1972
0
        ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0),
1973
0
        ggml_view_1d(ctx0, mctx_cur->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(mctx_cur->get_r_l(il)))
1974
0
    );
1975
0
}
1976
1977
0
llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const {
1978
0
    const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx);
1979
1980
0
    auto inp_rs   = build_rs_inp_impl     (ctx0, ubatch, mctx_cur->get_recr());
1981
0
    auto inp_attn = build_attn_inp_kv_impl(ctx0, ubatch, hparams, cparams, mctx_cur->get_attn());
1982
1983
0
    auto inp = std::make_unique<llm_graph_input_mem_hybrid>(cparams, std::move(inp_attn), std::move(inp_rs), mctx_cur);
1984
1985
0
    return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp));
1986
0
}
1987
1988
void llm_graph_context::build_dense_out(
1989
    ggml_tensor * dense_2,
1990
0
    ggml_tensor * dense_3) const {
1991
0
    if (!cparams.embeddings || dense_2 == nullptr || dense_3 == nullptr) {
1992
0
        return;
1993
0
    }
1994
0
    ggml_tensor * cur = res->t_embd_pooled != nullptr ? res->t_embd_pooled : res->t_embd;
1995
0
    GGML_ASSERT(cur != nullptr && "missing t_embd_pooled/t_embd");
1996
1997
0
    cur = ggml_mul_mat(ctx0, dense_2, cur);
1998
0
    cur = ggml_mul_mat(ctx0, dense_3, cur);
1999
0
    cb(cur, "result_embd_pooled", -1);
2000
0
    res->t_embd_pooled = cur;
2001
0
    ggml_build_forward_expand(gf, cur);
2002
0
}
2003
2004
2005
void llm_graph_context::build_pooling(
2006
        ggml_tensor * cls,
2007
        ggml_tensor * cls_b,
2008
        ggml_tensor * cls_out,
2009
0
        ggml_tensor * cls_out_b) const {
2010
0
    if (!cparams.embeddings) {
2011
0
        return;
2012
0
    }
2013
2014
0
    ggml_tensor * inp = res->t_embd;
2015
2016
    //// find result_norm tensor for input
2017
    //for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) {
2018
    //    inp = ggml_graph_node(gf, i);
2019
    //    if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) {
2020
    //        break;
2021
    //    }
2022
2023
    //    inp = nullptr;
2024
    //}
2025
2026
0
    GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor");
2027
2028
0
    ggml_tensor * cur;
2029
2030
0
    switch (pooling_type) {
2031
0
        case LLAMA_POOLING_TYPE_NONE:
2032
0
            {
2033
0
                cur = inp;
2034
0
            } break;
2035
0
        case LLAMA_POOLING_TYPE_MEAN:
2036
0
            {
2037
0
                ggml_tensor * inp_mean = build_inp_mean();
2038
0
                cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean);
2039
0
            } break;
2040
0
        case LLAMA_POOLING_TYPE_CLS:
2041
0
        case LLAMA_POOLING_TYPE_LAST:
2042
0
            {
2043
0
                ggml_tensor * inp_cls = build_inp_cls();
2044
0
                cur = ggml_get_rows(ctx0, inp, inp_cls);
2045
0
            } break;
2046
0
        case LLAMA_POOLING_TYPE_RANK:
2047
0
            {
2048
0
                ggml_tensor * inp_cls = build_inp_cls();
2049
0
                cur = ggml_get_rows(ctx0, inp, inp_cls);
2050
2051
                // classification head
2052
                // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566
2053
0
                if (cls) {
2054
0
                    cur = ggml_mul_mat(ctx0, cls, cur);
2055
0
                    if (cls_b) {
2056
0
                        cur = ggml_add(ctx0, cur, cls_b);
2057
0
                    }
2058
0
                    cur = ggml_tanh(ctx0, cur);
2059
0
                }
2060
2061
                // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
2062
                // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896
2063
                // Single layer classification head (direct projection)
2064
                // https://github.com/huggingface/transformers/blob/f4fc42216cd56ab6b68270bf80d811614d8d59e4/src/transformers/models/bert/modeling_bert.py#L1476
2065
0
                if (cls_out) {
2066
0
                    cur = ggml_mul_mat(ctx0, cls_out, cur);
2067
0
                    if (cls_out_b) {
2068
0
                        cur = ggml_add(ctx0, cur, cls_out_b);
2069
0
                    }
2070
0
                }
2071
2072
                // softmax for qwen3 reranker
2073
0
                if (arch == LLM_ARCH_QWEN3) {
2074
0
                    cur = ggml_soft_max(ctx0, cur);
2075
0
                }
2076
0
            } break;
2077
0
        default:
2078
0
            {
2079
0
                GGML_ABORT("unknown pooling type");
2080
0
            }
2081
0
    }
2082
2083
0
    cb(cur, "result_embd_pooled", -1);
2084
0
    res->t_embd_pooled = cur;
2085
2086
0
    ggml_build_forward_expand(gf, cur);
2087
0
}
2088
2089
0
int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) {
2090
    // TODO move to hparams if a T5 variant appears that uses a different value
2091
0
    const int64_t max_distance = 128;
2092
2093
0
    if (bidirectional) {
2094
0
        n_buckets >>= 1;
2095
0
    }
2096
2097
0
    const int64_t max_exact = n_buckets >> 1;
2098
2099
0
    int32_t relative_position = x - y;
2100
0
    int32_t relative_bucket = 0;
2101
2102
0
    if (bidirectional) {
2103
0
        relative_bucket += (relative_position > 0) * n_buckets;
2104
0
        relative_position = std::abs(relative_position);
2105
0
    } else {
2106
0
        relative_position = -std::min<int32_t>(relative_position, 0);
2107
0
    }
2108
2109
0
    int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact));
2110
0
    relative_position_if_large = std::min<int32_t>(relative_position_if_large, n_buckets - 1);
2111
0
    relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large);
2112
2113
0
    return relative_bucket;
2114
0
}