Coverage Report

Created: 2025-11-24 06:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-graph.cpp
Line
Count
Source
1
#include "llama-graph.h"
2
3
#include "llama-impl.h"
4
#include "llama-batch.h"
5
#include "llama-cparams.h"
6
7
#include "llama-kv-cache.h"
8
#include "llama-kv-cache-iswa.h"
9
#include "llama-memory-hybrid.h"
10
#include "llama-memory-recurrent.h"
11
12
#include <cassert>
13
#include <cmath>
14
#include <cstring>
15
16
0
void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) {
17
0
    if (ubatch->token) {
18
0
        const int64_t n_tokens = ubatch->n_tokens;
19
20
0
        ggml_backend_tensor_set(tokens, ubatch->token, 0, n_tokens*ggml_element_size(tokens));
21
0
    }
22
23
0
    if (ubatch->embd) {
24
0
        const int64_t n_embd   = embd->ne[0];
25
0
        const int64_t n_tokens = ubatch->n_tokens;
26
27
0
        ggml_backend_tensor_set(embd, ubatch->embd, 0, n_tokens*n_embd*ggml_element_size(embd));
28
0
    }
29
0
}
30
31
0
bool llm_graph_input_embd::can_reuse(const llm_graph_params & params) {
32
0
    bool res = true;
33
34
0
    res &= (!tokens && !params.ubatch.token) || (tokens && tokens->ne[0] == params.ubatch.n_tokens);
35
0
    res &= (!embd   && !params.ubatch.embd)  || (embd   &&   embd->ne[0] == params.ubatch.n_tokens);
36
37
0
    return res;
38
0
}
39
40
0
void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) {
41
0
    if (ubatch->pos && pos) {
42
0
        const int64_t n_tokens = ubatch->n_tokens;
43
44
0
        if (ubatch->token && n_pos_per_embd == 4) {
45
            // in case we're using M-RoPE with text tokens, convert the 1D positions to 4D
46
            // the 3 first dims are the same, and 4th dim is all 0
47
0
            std::vector<llama_pos> pos_data(n_tokens*n_pos_per_embd);
48
            // copy the first dimension
49
0
            for (int i = 0; i < n_tokens; ++i) {
50
0
                pos_data[               i] = ubatch->pos[i];
51
0
                pos_data[    n_tokens + i] = ubatch->pos[i];
52
0
                pos_data[2 * n_tokens + i] = ubatch->pos[i];
53
0
                pos_data[3 * n_tokens + i] = 0; // 4th dim is 0
54
0
            }
55
0
            ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos));
56
0
        } else {
57
0
            ggml_backend_tensor_set(pos, ubatch->pos, 0, n_tokens*n_pos_per_embd*ggml_element_size(pos));
58
0
        }
59
0
    }
60
0
}
61
62
0
bool llm_graph_input_pos::can_reuse(const llm_graph_params & params) {
63
0
    bool res = true;
64
65
0
    res &= pos->ne[0] == params.ubatch.n_tokens;
66
67
0
    return res;
68
0
}
69
70
0
void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) {
71
0
    if (ubatch->pos && attn_scale) {
72
0
        const int64_t n_tokens = ubatch->n_tokens;
73
74
0
        std::vector<float> attn_scale_data(n_tokens, 0.0f);
75
0
        for (int i = 0; i < n_tokens; ++i) {
76
0
            const float pos = ubatch->pos[i];
77
0
            attn_scale_data[i] = std::log(
78
0
                std::floor((pos + 1.0f) / n_attn_temp_floor_scale) + 1.0
79
0
            ) * f_attn_temp_scale + 1.0;
80
0
        }
81
82
0
        ggml_backend_tensor_set(attn_scale, attn_scale_data.data(), 0, n_tokens*ggml_element_size(attn_scale));
83
0
    }
84
0
}
85
86
0
void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) {
87
0
    if (pos_bucket) {
88
0
        const int64_t n_tokens = ubatch->n_tokens;
89
90
0
        GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer));
91
0
        GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
92
93
0
        int32_t * data = (int32_t *) pos_bucket->data;
94
95
0
        for (int h = 0; h < 1; ++h) {
96
0
            for (int j = 0; j < n_tokens; ++j) {
97
0
                for (int i = 0; i < n_tokens; ++i) {
98
0
                    data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch->pos[i], ubatch->pos[j], hparams.n_rel_attn_bkts, true);
99
0
                }
100
0
            }
101
0
        }
102
0
    }
103
0
}
104
105
0
void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) {
106
0
    if (pos_bucket) {
107
0
        mctx->set_input_pos_bucket(pos_bucket, ubatch);
108
0
    }
109
0
}
110
111
0
void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) {
112
0
    GGML_ASSERT(out_ids);
113
114
0
    const int64_t n_tokens = ubatch->n_tokens;
115
116
0
    GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer));
117
0
    int32_t * data = (int32_t *) out_ids->data;
118
119
0
    if (n_outputs == n_tokens) {
120
0
        for (int i = 0; i < n_tokens; ++i) {
121
0
            data[i] = i;
122
0
        }
123
124
0
        return;
125
0
    }
126
127
0
    GGML_ASSERT(ubatch->output);
128
129
0
    int n_outputs = 0;
130
131
0
    for (int i = 0; i < n_tokens; ++i) {
132
0
        if (ubatch->output[i]) {
133
0
            data[n_outputs++] = i;
134
0
        }
135
0
    }
136
0
}
137
138
0
bool llm_graph_input_out_ids::can_reuse(const llm_graph_params & params) {
139
0
    bool res = true;
140
141
0
    res &= n_outputs == params.n_outputs;
142
143
0
    return res;
144
0
}
145
146
0
void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) {
147
0
    if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
148
0
        const int64_t n_tokens     = ubatch->n_tokens;
149
0
        const int64_t n_seq_tokens = ubatch->n_seq_tokens;
150
0
        const int64_t n_seqs_unq   = ubatch->n_seqs_unq;
151
152
0
        GGML_ASSERT(mean);
153
0
        GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer));
154
155
0
        float * data = (float *) mean->data;
156
0
        memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean));
157
158
0
        std::vector<uint64_t> sums(n_seqs_unq, 0);
159
0
        for (int i = 0; i < n_tokens; i += n_seq_tokens) {
160
0
            for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
161
0
                const llama_seq_id seq_id  = ubatch->seq_id[i][s];
162
0
                const int32_t      seq_idx = ubatch->seq_idx[seq_id];
163
164
0
                sums[seq_idx] += ubatch->n_seq_tokens;
165
0
            }
166
0
        }
167
168
0
        std::vector<float> div(n_seqs_unq, 0.0f);
169
0
        for (int s = 0; s < n_seqs_unq; ++s) {
170
0
            const uint64_t sum = sums[s];
171
0
            if (sum > 0) {
172
0
                div[s] = 1.0f/float(sum);
173
0
            }
174
0
        }
175
176
0
        for (int i = 0; i < n_tokens; i += n_seq_tokens) {
177
0
            for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
178
0
                const llama_seq_id seq_id  = ubatch->seq_id[i][s];
179
0
                const int32_t      seq_idx = ubatch->seq_idx[seq_id];
180
181
0
                for (int j = 0; j < n_seq_tokens; ++j) {
182
0
                    data[seq_idx*n_tokens + i + j] = div[seq_idx];
183
0
                }
184
0
            }
185
0
        }
186
0
    }
187
0
}
188
189
0
void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) {
190
0
    const int64_t n_tokens     = ubatch->n_tokens;
191
0
    const int64_t n_seqs_unq   = ubatch->n_seqs_unq;
192
193
0
    if (cparams.embeddings && (
194
0
        cparams.pooling_type == LLAMA_POOLING_TYPE_CLS  ||
195
0
        cparams.pooling_type == LLAMA_POOLING_TYPE_RANK ||
196
0
        cparams.pooling_type == LLAMA_POOLING_TYPE_LAST
197
0
    )) {
198
0
        GGML_ASSERT(cls);
199
0
        GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer));
200
201
0
        uint32_t * data = (uint32_t *) cls->data;
202
0
        memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls));
203
204
0
        std::vector<int> target_pos(n_seqs_unq, -1);
205
0
        std::vector<int> target_row(n_seqs_unq, -1);
206
207
0
        const bool last = (
208
0
             cparams.pooling_type == LLAMA_POOLING_TYPE_LAST ||
209
0
            (cparams.pooling_type == LLAMA_POOLING_TYPE_RANK && arch == LLM_ARCH_QWEN3) // qwen3 reranking & embedding models use last token
210
0
        );
211
212
0
        for (int i = 0; i < n_tokens; ++i) {
213
0
            const llama_pos pos = ubatch->pos[i];
214
215
0
            for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
216
0
                const llama_seq_id seq_id  = ubatch->seq_id[i][s];
217
0
                const int32_t      seq_idx = ubatch->seq_idx[seq_id];
218
219
0
                if (
220
0
                    (target_pos[seq_idx] == -1) ||
221
0
                    ( last && pos >= target_pos[seq_idx]) ||
222
0
                    (!last && pos <  target_pos[seq_idx])
223
0
                ) {
224
0
                    target_pos[seq_idx] = pos;
225
0
                    target_row[seq_idx] = i;
226
0
                }
227
0
            }
228
0
        }
229
230
0
        for (int s = 0; s < n_seqs_unq; ++s) {
231
0
            if (target_row[s] >= 0) {
232
0
                data[s] = target_row[s];
233
0
            }
234
0
        }
235
0
    }
236
0
}
237
238
0
void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) {
239
0
    GGML_UNUSED(ubatch);
240
241
0
    const int64_t n_rs = mctx->get_n_rs();
242
243
0
    if (s_copy) {
244
0
        GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer));
245
0
        int32_t * data = (int32_t *) s_copy->data;
246
247
        // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
248
0
        for (uint32_t i = 0; i < n_rs; ++i) {
249
0
            data[i] = mctx->s_copy(i);
250
0
        }
251
0
    }
252
0
}
253
254
0
void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) {
255
0
    GGML_UNUSED(ubatch);
256
257
0
    if (cross_embd && !cross->v_embd.empty()) {
258
0
        assert(cross_embd->type == GGML_TYPE_F32);
259
260
0
        ggml_backend_tensor_set(cross_embd, cross->v_embd.data(), 0, ggml_nbytes(cross_embd));
261
0
    }
262
0
}
263
264
0
static void print_mask(const float * data, int64_t n_tokens, int64_t n_kv, int64_t n_swa, llama_swa_type swa_type) {
265
0
    LLAMA_LOG_DEBUG("%s: === Attention mask ===\n", __func__);
266
0
    const char * swa_type_str = "unknown";
267
268
0
    switch (swa_type) {
269
0
        case LLAMA_SWA_TYPE_NONE:      swa_type_str = "LLAMA_SWA_TYPE_NONE"; break;
270
0
        case LLAMA_SWA_TYPE_STANDARD:  swa_type_str = "LLAMA_SWA_TYPE_STANDARD"; break;
271
0
        case LLAMA_SWA_TYPE_CHUNKED:   swa_type_str = "LLAMA_SWA_TYPE_CHUNKED"; break;
272
0
        case LLAMA_SWA_TYPE_SYMMETRIC: swa_type_str = "LLAMA_SWA_TYPE_SYMMETRIC"; break;
273
0
    };
274
275
0
    LLAMA_LOG_DEBUG("%s: n_swa : %d, n_kv: %d, swq_type: %s\n", __func__, (int)n_swa, (int)n_kv, swa_type_str);
276
0
    LLAMA_LOG_DEBUG("%s: '0' = can attend, '∞' = masked\n", __func__);
277
0
    LLAMA_LOG_DEBUG("%s: Rows = query tokens, Columns = key/value tokens\n\n", __func__);
278
279
0
    LLAMA_LOG_DEBUG("    ");
280
0
    for (int j = 0; j < std::min((int64_t)20, n_kv); ++j) {
281
0
        LLAMA_LOG_DEBUG("%2d", j);
282
0
    }
283
0
    LLAMA_LOG_DEBUG("\n");
284
285
0
    for (int i = 0; i < std::min((int64_t)20, n_tokens); ++i) {
286
0
        LLAMA_LOG_DEBUG(" %2d ", i);
287
0
        for (int j = 0; j < std::min((int64_t)20, n_kv); ++j) {
288
0
            float val = data[i * n_kv + j];
289
0
            if (val == -INFINITY) {
290
0
                LLAMA_LOG_DEBUG(" ∞");
291
0
            } else {
292
0
                LLAMA_LOG_DEBUG(" 0");
293
0
            }
294
0
        }
295
0
        LLAMA_LOG_DEBUG("\n");
296
0
    }
297
0
}
298
299
0
void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) {
300
0
    const int64_t n_kv     = ubatch->n_tokens;
301
0
    const int64_t n_tokens = ubatch->n_tokens;
302
303
0
    const auto fill_mask = [&](float * data, int n_swa, llama_swa_type swa_type) {
304
0
        for (int h = 0; h < 1; ++h) {
305
0
            for (int i1 = 0; i1 < n_tokens; ++i1) {
306
0
                const llama_seq_id s1 = ubatch->seq_id[i1][0];
307
0
                const llama_pos    p1 = ubatch->pos[i1];
308
309
0
                const uint64_t idst = h*(n_kv*n_tokens) + i1*n_kv;
310
311
0
                for (int i0 = 0; i0 < n_tokens; ++i0) {
312
0
                    const llama_seq_id s0 = ubatch->seq_id[i0][0];
313
0
                    const llama_pos p0    = ubatch->pos[i0];
314
315
                    // mask different sequences
316
0
                    if (s0 != s1) {
317
0
                        continue;
318
0
                    }
319
320
                    // mask future tokens
321
0
                    if (cparams.causal_attn && p0 > p1) {
322
0
                        continue;
323
0
                    }
324
325
                    // apply SWA if any
326
0
                    if (llama_hparams::is_masked_swa(n_swa, swa_type, p0, p1)) {
327
0
                        continue;
328
0
                    }
329
330
0
                    data[idst + i0] = hparams.use_alibi ? -std::abs(p0 - p1) : 0.0f;
331
0
                }
332
0
            }
333
0
        }
334
0
    };
335
336
0
    {
337
0
        GGML_ASSERT(self_kq_mask);
338
0
        GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask->buffer));
339
340
0
        float * data = (float *) self_kq_mask->data;
341
342
0
        std::fill(data, data + ggml_nelements(self_kq_mask), -INFINITY);
343
344
0
        fill_mask(data, 0, LLAMA_SWA_TYPE_NONE);
345
346
0
        if (debug) {
347
0
            print_mask(data, n_tokens, n_kv, 0, LLAMA_SWA_TYPE_NONE);
348
0
        }
349
0
    }
350
351
0
    if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
352
0
        GGML_ASSERT(self_kq_mask_swa);
353
0
        GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask_swa->buffer));
354
355
0
        float * data = (float *) self_kq_mask_swa->data;
356
357
0
        std::fill(data, data + ggml_nelements(self_kq_mask_swa), -INFINITY);
358
359
0
        fill_mask(data, hparams.n_swa, hparams.swa_type);
360
361
0
        if (debug) {
362
0
            print_mask(data, n_tokens, n_kv, hparams.n_swa, hparams.swa_type);
363
0
        }
364
0
    }
365
0
}
366
367
0
void llm_graph_input_attn_kv::set_input(const llama_ubatch * ubatch) {
368
0
    mctx->set_input_k_idxs(self_k_idxs, ubatch);
369
0
    mctx->set_input_v_idxs(self_v_idxs, ubatch);
370
371
0
    mctx->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
372
0
}
373
374
0
bool llm_graph_input_attn_kv::can_reuse(const llm_graph_params & params) {
375
0
    const auto * mctx = static_cast<const llama_kv_cache_context *>(params.mctx);
376
377
0
    this->mctx = mctx;
378
379
0
    bool res = true;
380
381
0
    res &= self_k_idxs->ne[0] == params.ubatch.n_tokens;
382
  //res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
383
384
0
    res &= self_kq_mask->ne[0] == mctx->get_n_kv();
385
0
    res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD);
386
387
0
    return res;
388
0
}
389
390
0
void llm_graph_input_attn_kv_iswa::set_input(const llama_ubatch * ubatch) {
391
0
    mctx->get_base()->set_input_k_idxs(self_k_idxs, ubatch);
392
0
    mctx->get_base()->set_input_v_idxs(self_v_idxs, ubatch);
393
394
0
    mctx->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn);
395
396
0
    mctx->get_swa()->set_input_k_idxs(self_k_idxs_swa, ubatch);
397
0
    mctx->get_swa()->set_input_v_idxs(self_v_idxs_swa, ubatch);
398
399
0
    mctx->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn);
400
0
}
401
402
0
bool llm_graph_input_attn_kv_iswa::can_reuse(const llm_graph_params & params) {
403
0
    const auto * mctx = static_cast<const llama_kv_cache_iswa_context *>(params.mctx);
404
405
0
    this->mctx = mctx;
406
407
0
    bool res = true;
408
409
0
    res &= self_k_idxs->ne[0] == params.ubatch.n_tokens;
410
  //res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
411
412
0
    res &= self_k_idxs_swa->ne[0] == params.ubatch.n_tokens;
413
  //res &= self_v_idxs_swa->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there
414
415
0
    res &= self_kq_mask->ne[0] == mctx->get_base()->get_n_kv();
416
0
    res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD);
417
418
0
    res &= self_kq_mask_swa->ne[0] == mctx->get_swa()->get_n_kv();
419
0
    res &= self_kq_mask_swa->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD);
420
421
0
    return res;
422
0
}
423
424
0
void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) {
425
0
    GGML_ASSERT(cross_kq_mask);
426
427
0
    const int64_t n_enc    = cross_kq_mask->ne[0];
428
0
    const int64_t n_tokens = ubatch->n_tokens;
429
430
0
    GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer));
431
0
    GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
432
433
0
    float * data = (float *) cross_kq_mask->data;
434
435
0
    for (int h = 0; h < 1; ++h) {
436
0
        for (int i = 0; i < n_tokens; ++i) {
437
0
            for (int j = 0; j < n_enc; ++j) {
438
0
                float f = -INFINITY;
439
440
0
                for (int s = 0; s < ubatch->n_seq_id[i]; ++s) {
441
0
                    const llama_seq_id seq_id = ubatch->seq_id[i][s];
442
443
0
                    if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) {
444
0
                        f = 0.0f;
445
0
                    }
446
0
                }
447
448
0
                data[h*(n_enc*n_tokens) + i*n_enc + j] = f;
449
0
            }
450
0
        }
451
452
0
        for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
453
0
            for (int j = 0; j < n_enc; ++j) {
454
0
                data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY;
455
0
            }
456
0
        }
457
0
    }
458
0
}
459
460
0
void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
461
0
    inp_attn->set_input(ubatch);
462
0
    inp_rs->set_input(ubatch);
463
0
}
464
465
//
466
// llm_graph_result
467
//
468
469
0
llm_graph_result::llm_graph_result(int64_t max_nodes) : max_nodes(max_nodes) {
470
0
    reset();
471
472
0
    const char * LLAMA_GRAPH_RESULT_DEBUG = getenv("LLAMA_GRAPH_RESULT_DEBUG");
473
0
    debug = LLAMA_GRAPH_RESULT_DEBUG ? atoi(LLAMA_GRAPH_RESULT_DEBUG) : 0;
474
0
}
475
476
0
int64_t llm_graph_result::get_max_nodes() const {
477
0
    return max_nodes;
478
0
}
479
480
0
void llm_graph_result::reset() {
481
0
    t_tokens      = nullptr;
482
0
    t_logits      = nullptr;
483
0
    t_embd        = nullptr;
484
0
    t_embd_pooled = nullptr;
485
486
0
    params = {};
487
488
0
    inputs.clear();
489
490
0
    buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false));
491
492
0
    ggml_init_params params = {
493
0
        /*.mem_size   =*/ buf_compute_meta.size(),
494
0
        /*.mem_buffer =*/ buf_compute_meta.data(),
495
0
        /*.no_alloc   =*/ true,
496
0
    };
497
498
0
    ctx_compute.reset(ggml_init(params));
499
500
0
    gf = ggml_new_graph_custom(ctx_compute.get(), max_nodes, false);
501
0
}
502
503
0
void llm_graph_result::set_inputs(const llama_ubatch * ubatch) {
504
0
    for (auto & input : inputs) {
505
0
        input->set_input(ubatch);
506
0
    }
507
0
}
508
509
0
bool llm_graph_result::can_reuse(const llm_graph_params & params) {
510
0
    if (!this->params.allow_reuse(params)) {
511
0
        if (debug > 1) {
512
0
            LLAMA_LOG_DEBUG("%s: cannot reuse graph due to incompatible graph parameters\n", __func__);
513
0
        }
514
515
0
        return false;
516
0
    }
517
518
0
    if (debug > 1) {
519
0
        LLAMA_LOG_DEBUG("%s: checking compatibility of %d inputs:\n", __func__, (int) inputs.size());
520
0
    }
521
522
0
    bool res = true;
523
524
0
    for (auto & input : inputs) {
525
0
        const bool cur = input->can_reuse(params);
526
527
0
        if (debug > 1) {
528
0
            LLAMA_LOG_DEBUG("%s: can_reuse = %d\n", "placeholder", cur);
529
0
        }
530
531
0
        res = res && cur;
532
0
    }
533
534
0
    if (debug > 0) {
535
0
        LLAMA_LOG_DEBUG("%s: can reuse graph = %d\n", __func__, res);
536
0
    }
537
538
0
    return res;
539
0
}
540
541
0
llm_graph_input_i * llm_graph_result::add_input(llm_graph_input_ptr input) {
542
0
    inputs.emplace_back(std::move(input));
543
0
    return inputs.back().get();
544
0
}
545
546
0
void llm_graph_result::set_params(const llm_graph_params & params) {
547
0
    this->params = params;
548
0
}
549
550
//
551
// llm_graph_context
552
//
553
554
llm_graph_context::llm_graph_context(const llm_graph_params & params) :
555
0
    arch             (params.arch),
556
0
    hparams          (params.hparams),
557
0
    cparams          (params.cparams),
558
0
    ubatch           (params.ubatch),
559
0
    n_embd           (hparams.n_embd),
560
0
    n_layer          (hparams.n_layer),
561
0
    n_rot            (hparams.n_rot),
562
0
    n_ctx            (cparams.n_ctx),
563
0
    n_head           (hparams.n_head()),
564
0
    n_head_kv        (hparams.n_head_kv()),
565
0
    n_embd_head_k    (hparams.n_embd_head_k),
566
0
    n_embd_k_gqa     (hparams.n_embd_k_gqa()),
567
0
    n_embd_head_v    (hparams.n_embd_head_v),
568
0
    n_embd_v_gqa     (hparams.n_embd_v_gqa()),
569
0
    n_expert         (hparams.n_expert),
570
0
    n_expert_used    (cparams.warmup ? hparams.n_expert : hparams.n_expert_used),
571
0
    freq_base        (cparams.rope_freq_base),
572
0
    freq_scale       (cparams.rope_freq_scale),
573
0
    ext_factor       (cparams.yarn_ext_factor),
574
0
    attn_factor      (cparams.yarn_attn_factor),
575
0
    beta_fast        (cparams.yarn_beta_fast),
576
0
    beta_slow        (cparams.yarn_beta_slow),
577
0
    norm_eps         (hparams.f_norm_eps),
578
0
    norm_rms_eps     (hparams.f_norm_rms_eps),
579
0
    n_tokens         (ubatch.n_tokens),
580
0
    n_outputs        (params.n_outputs),
581
0
    n_ctx_orig       (cparams.n_ctx_orig_yarn),
582
0
    pooling_type     (cparams.pooling_type),
583
0
    rope_type        (hparams.rope_type),
584
0
    sched            (params.sched),
585
0
    backend_cpu      (params.backend_cpu),
586
0
    cvec             (params.cvec),
587
0
    loras            (params.loras),
588
0
    mctx             (params.mctx),
589
0
    cross            (params.cross),
590
0
    cb_func          (params.cb),
591
0
    res              (params.res),
592
0
    ctx0             (res->get_ctx()),
593
0
    gf               (res->get_gf()) {
594
0
        res->set_params(params);
595
0
    }
596
597
0
void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const {
598
0
    if (cb_func) {
599
0
        cb_func(ubatch, cur, name, il);
600
0
    }
601
0
}
602
603
ggml_tensor * llm_graph_context::build_cvec(
604
         ggml_tensor * cur,
605
0
                 int   il) const {
606
0
    return cvec->apply_to(ctx0, cur, il);
607
0
}
608
609
ggml_tensor * llm_graph_context::build_lora_mm(
610
          ggml_tensor * w,
611
0
          ggml_tensor * cur) const {
612
0
    ggml_tensor * res = ggml_mul_mat(ctx0, w, cur);
613
614
0
    for (const auto & lora : *loras) {
615
0
        llama_adapter_lora_weight * lw = lora.first->get_weight(w);
616
0
        if (lw == nullptr) {
617
0
            continue;
618
0
        }
619
620
0
        const float adapter_scale = lora.second;
621
0
        const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
622
623
0
        ggml_tensor * ab_cur = ggml_mul_mat(
624
0
                ctx0, lw->b,
625
0
                ggml_mul_mat(ctx0, lw->a, cur)
626
0
                );
627
628
0
        ab_cur = ggml_scale(ctx0, ab_cur, scale);
629
0
        res = ggml_add(ctx0, res, ab_cur);
630
0
    }
631
632
0
    return res;
633
0
}
634
635
ggml_tensor * llm_graph_context::build_lora_mm_id(
636
          ggml_tensor * w,   // ggml_tensor * as
637
          ggml_tensor * cur, // ggml_tensor * b
638
0
          ggml_tensor * ids) const {
639
0
    ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids);
640
0
    for (const auto & lora : *loras) {
641
0
        llama_adapter_lora_weight * lw = lora.first->get_weight(w);
642
0
        if (lw == nullptr) {
643
0
            continue;
644
0
        }
645
646
0
        const float alpha = lora.first->alpha;
647
0
        const float rank  = (float) lw->b->ne[0];
648
0
        const float scale = alpha ? lora.second * alpha / rank : lora.second;
649
650
0
        ggml_tensor * ab_cur = ggml_mul_mat_id(
651
0
                ctx0, lw->b,
652
0
                ggml_mul_mat_id(ctx0, lw->a, cur, ids),
653
0
                ids
654
0
                );
655
656
0
        ab_cur = ggml_scale(ctx0, ab_cur, scale);
657
0
        res = ggml_add(ctx0, res, ab_cur);
658
0
    }
659
660
0
    return res;
661
0
}
662
663
ggml_tensor * llm_graph_context::build_norm(
664
         ggml_tensor * cur,
665
         ggml_tensor * mw,
666
         ggml_tensor * mb,
667
       llm_norm_type   type,
668
0
                 int   il) const {
669
0
    switch (type) {
670
0
        case LLM_NORM:       cur = ggml_norm    (ctx0, cur, hparams.f_norm_eps);     break;
671
0
        case LLM_NORM_RMS:   cur = ggml_rms_norm(ctx0, cur, hparams.f_norm_rms_eps); break;
672
0
        case LLM_NORM_GROUP:
673
0
            {
674
0
                cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], 1, cur->ne[1]);
675
0
                cur = ggml_group_norm(ctx0, cur, hparams.n_norm_groups, hparams.f_norm_group_eps);
676
0
                cur = ggml_reshape_2d(ctx0, cur, cur->ne[0],    cur->ne[2]);
677
0
            } break;
678
0
    }
679
680
0
    if (mw || mb) {
681
0
        cb(cur, "norm", il);
682
0
    }
683
684
0
    if (mw) {
685
0
        cur = ggml_mul(ctx0, cur, mw);
686
0
        if (mb) {
687
0
            cb(cur, "norm_w", il);
688
0
        }
689
0
    }
690
691
0
    if (mb) {
692
0
        cur = ggml_add(ctx0, cur, mb);
693
0
    }
694
695
0
    return cur;
696
0
}
697
698
ggml_tensor * llm_graph_context::build_ffn(
699
         ggml_tensor * cur,
700
         ggml_tensor * up,
701
         ggml_tensor * up_b,
702
         ggml_tensor * up_s,
703
         ggml_tensor * gate,
704
         ggml_tensor * gate_b,
705
         ggml_tensor * gate_s,
706
         ggml_tensor * down,
707
         ggml_tensor * down_b,
708
         ggml_tensor * down_s,
709
         ggml_tensor * act_scales,
710
     llm_ffn_op_type   type_op,
711
   llm_ffn_gate_type   type_gate,
712
0
                 int   il) const {
713
0
    ggml_tensor * tmp = up ? build_lora_mm(up, cur) : cur;
714
0
    cb(tmp, "ffn_up", il);
715
716
0
    if (up_b) {
717
0
        tmp = ggml_add(ctx0, tmp, up_b);
718
0
        cb(tmp, "ffn_up_b", il);
719
0
    }
720
721
0
    if (up_s) {
722
0
        tmp = ggml_mul(ctx0, tmp, up_s);
723
0
        cb(tmp, "ffn_up_s", il);
724
0
    }
725
726
0
    if (gate) {
727
0
        switch (type_gate) {
728
0
            case LLM_FFN_SEQ:
729
0
                {
730
0
                    cur = build_lora_mm(gate, tmp);
731
0
                    cb(cur, "ffn_gate", il);
732
0
                } break;
733
0
            case LLM_FFN_PAR:
734
0
                {
735
0
                    cur = build_lora_mm(gate, cur);
736
0
                    cb(cur, "ffn_gate", il);
737
0
                } break;
738
0
        }
739
740
0
        if (gate_b) {
741
0
            cur = ggml_add(ctx0, cur, gate_b);
742
0
            cb(cur, "ffn_gate_b", il);
743
0
        }
744
745
0
        if (gate_s) {
746
0
            cur = ggml_mul(ctx0, cur, gate_s);
747
0
            cb(cur, "ffn_gate_s", il);
748
0
        }
749
750
0
    } else {
751
0
        cur = tmp;
752
0
    }
753
754
0
    switch (type_op) {
755
0
        case LLM_FFN_SILU:
756
0
            if (gate && type_gate == LLM_FFN_PAR) {
757
0
                cur = ggml_swiglu_split(ctx0, cur, tmp);
758
0
                cb(cur, "ffn_swiglu", il);
759
0
                type_gate = LLM_FFN_SEQ;
760
0
            } else {
761
0
                cur = ggml_silu(ctx0, cur);
762
0
                cb(cur, "ffn_silu", il);
763
0
            } break;
764
0
        case LLM_FFN_GELU:
765
0
            if (gate && type_gate == LLM_FFN_PAR) {
766
0
                cur = ggml_geglu_split(ctx0, cur, tmp);
767
0
                cb(cur, "ffn_geglu", il);
768
0
                type_gate = LLM_FFN_SEQ;
769
0
            } else {
770
0
                cur = ggml_gelu(ctx0, cur);
771
0
                cb(cur, "ffn_gelu", il);
772
0
                if (act_scales != NULL) {
773
0
                    cur = ggml_div(ctx0, cur, act_scales);
774
0
                    cb(cur, "ffn_act", il);
775
0
                }
776
0
            } break;
777
0
        case LLM_FFN_RELU:
778
0
            if (gate && type_gate == LLM_FFN_PAR) {
779
0
                cur = ggml_reglu_split(ctx0, cur, tmp);
780
0
                cb(cur, "ffn_reglu", il);
781
0
                type_gate = LLM_FFN_SEQ;
782
0
            } else {
783
0
                cur = ggml_relu(ctx0, cur);
784
0
                cb(cur, "ffn_relu", il);
785
0
            } break;
786
0
        case LLM_FFN_RELU_SQR:
787
0
            {
788
0
                cur = ggml_relu(ctx0, cur);
789
0
                cb(cur, "ffn_relu", il);
790
791
0
                cur = ggml_sqr(ctx0, cur);
792
0
                cb(cur, "ffn_sqr(relu)", il);
793
0
            } break;
794
0
        case LLM_FFN_SWIGLU:
795
0
            {
796
0
                cur = ggml_swiglu(ctx0, cur);
797
0
                cb(cur, "ffn_swiglu", il);
798
0
            } break;
799
0
        case LLM_FFN_GEGLU:
800
0
            {
801
0
                cur = ggml_geglu(ctx0, cur);
802
0
                cb(cur, "ffn_geglu", il);
803
0
            } break;
804
0
        case LLM_FFN_REGLU:
805
0
            {
806
0
                cur = ggml_reglu(ctx0, cur);
807
0
                cb(cur, "ffn_reglu", il);
808
0
            } break;
809
0
        default:
810
0
            GGML_ABORT("fatal error");
811
0
    }
812
813
    //expand here so that we can fuse ffn gate
814
0
    ggml_build_forward_expand(gf, cur);
815
816
0
    if (gate && type_gate == LLM_FFN_PAR) {
817
0
        cur = ggml_mul(ctx0, cur, tmp);
818
0
        cb(cur, "ffn_gate_par", il);
819
0
    }
820
821
0
    if (down) {
822
0
        cur = build_lora_mm(down, cur);
823
0
        if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) {
824
            // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
825
0
            ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
826
0
        }
827
0
    }
828
829
0
    if (down_b) {
830
0
        cb(cur, "ffn_down", il);
831
0
    }
832
833
0
    if (down_b) {
834
0
        cur = ggml_add(ctx0, cur, down_b);
835
0
    }
836
837
0
    if (down_s) {
838
0
        cur = ggml_mul(ctx0, cur, down_s);
839
0
        cb(cur, "ffn_down_s", il);
840
0
    }
841
842
0
    return cur;
843
0
}
844
845
ggml_tensor * llm_graph_context::build_moe_ffn(
846
         ggml_tensor * cur,
847
         ggml_tensor * gate_inp,
848
         ggml_tensor * up_exps,
849
         ggml_tensor * gate_exps,
850
         ggml_tensor * down_exps,
851
         ggml_tensor * exp_probs_b,
852
             int64_t   n_expert,
853
             int64_t   n_expert_used,
854
     llm_ffn_op_type   type_op,
855
                bool   norm_w,
856
                bool   scale_w,
857
               float   w_scale,
858
         llama_expert_gating_func_type gating_op,
859
                 int   il,
860
0
         ggml_tensor * probs_in) const {
861
0
    return build_moe_ffn(
862
0
        cur,
863
0
        gate_inp,  /* gate_inp_b  */ nullptr,
864
0
        up_exps,   /* up_exps_b   */ nullptr,
865
0
        gate_exps, /* gate_exps_b */ nullptr,
866
0
        down_exps, /* down_exps_b */ nullptr,
867
0
        exp_probs_b,
868
0
        n_expert,
869
0
        n_expert_used,
870
0
        type_op,
871
0
        norm_w,
872
0
        scale_w,
873
0
        w_scale,
874
0
        gating_op,
875
0
        il,
876
0
        probs_in
877
0
    );
878
0
}
879
880
ggml_tensor * llm_graph_context::build_moe_ffn(
881
         ggml_tensor * cur,
882
         ggml_tensor * gate_inp,
883
         ggml_tensor * gate_inp_b,
884
         ggml_tensor * up_exps,
885
         ggml_tensor * up_exps_b,
886
         ggml_tensor * gate_exps,
887
         ggml_tensor * gate_exps_b,
888
         ggml_tensor * down_exps,
889
         ggml_tensor * down_exps_b,
890
         ggml_tensor * exp_probs_b,
891
             int64_t   n_expert,
892
             int64_t   n_expert_used,
893
     llm_ffn_op_type   type_op,
894
                bool   norm_w,
895
                bool   scale_w,
896
               float   w_scale,
897
        llama_expert_gating_func_type gating_op,
898
                 int   il,
899
0
         ggml_tensor * probs_in) const {
900
0
    const int64_t n_embd   = cur->ne[0];
901
0
    const int64_t n_tokens = cur->ne[1];
902
0
    const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; // for llama4, we apply the sigmoid-ed weights before the FFN
903
904
0
    ggml_tensor * logits = nullptr;
905
906
0
    if (probs_in == nullptr) {
907
0
        logits = build_lora_mm(gate_inp, cur); // [n_expert, n_tokens]
908
0
        cb(logits, "ffn_moe_logits", il);
909
0
    } else {
910
0
        logits = probs_in;
911
0
    }
912
913
0
    if (gate_inp_b) {
914
0
        logits = ggml_add(ctx0, logits, gate_inp_b);
915
0
        cb(logits, "ffn_moe_logits_biased", il);
916
0
    }
917
918
0
    ggml_tensor * probs = nullptr;
919
0
    switch (gating_op) {
920
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX:
921
0
            {
922
0
                probs = ggml_soft_max(ctx0, logits); // [n_expert, n_tokens]
923
0
            } break;
924
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID:
925
0
            {
926
0
                probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens]
927
0
            } break;
928
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT:
929
0
            {
930
0
                probs = logits; // [n_expert, n_tokens]
931
0
            } break;
932
0
        default:
933
0
            GGML_ABORT("fatal error");
934
0
    }
935
0
    cb(probs, "ffn_moe_probs", il);
936
937
    // add experts selection bias - introduced in DeepSeek V3
938
    // leave probs unbiased as it's later used to get expert weights
939
0
    ggml_tensor * selection_probs = probs;
940
0
    if (exp_probs_b != nullptr) {
941
0
        selection_probs = ggml_add(ctx0, probs, exp_probs_b);
942
0
        cb(selection_probs, "ffn_moe_probs_biased", il);
943
0
    }
944
945
    // llama4 doesn't have exp_probs_b, and sigmoid is only used after top_k
946
    // see: https://github.com/meta-llama/llama-models/blob/699a02993512fb36936b1b0741e13c06790bcf98/models/llama4/moe.py#L183-L198
947
0
    if (arch == LLM_ARCH_LLAMA4) {
948
0
        selection_probs = logits;
949
0
    }
950
951
0
    if (arch == LLM_ARCH_GROVEMOE) {
952
0
        selection_probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens]
953
0
        cb(selection_probs, "ffn_moe_probs_biased", il);
954
0
    }
955
956
    // select top n_group_used expert groups
957
    // https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/e815299b0bcbac849fa540c768ef21845365c9eb/modeling_deepseek.py#L440-L457
958
0
    if (hparams.n_expert_groups > 1 && n_tokens > 0) {
959
0
        const int64_t n_exp_per_group = n_expert / hparams.n_expert_groups;
960
961
        // organize experts into n_expert_groups
962
0
        ggml_tensor * selection_groups = ggml_reshape_3d(ctx0, selection_probs, n_exp_per_group, hparams.n_expert_groups, n_tokens); // [n_exp_per_group, n_expert_groups, n_tokens]
963
964
0
        ggml_tensor * group_scores = ggml_top_k(ctx0, selection_groups, 2); // [2, n_expert_groups, n_tokens]
965
0
        group_scores = ggml_get_rows(ctx0, ggml_reshape_4d(ctx0, selection_groups, 1, selection_groups->ne[0], selection_groups->ne[1], selection_groups->ne[2]), group_scores); // [1, 2, n_expert_groups, n_tokens]
966
967
        // get top n_group_used expert groups
968
0
        group_scores = ggml_sum_rows(ctx0, ggml_reshape_3d(ctx0, group_scores, group_scores->ne[1], group_scores->ne[2], group_scores->ne[3])); // [1, n_expert_groups, n_tokens]
969
0
        group_scores = ggml_reshape_2d(ctx0, group_scores, group_scores->ne[1], group_scores->ne[2]); // [n_expert_groups, n_tokens]
970
971
0
        ggml_tensor * expert_groups = ggml_top_k(ctx0, group_scores, hparams.n_group_used); // [n_group_used, n_tokens]
972
0
        cb(expert_groups, "ffn_moe_group_topk", il);
973
974
        // mask out the other groups
975
0
        selection_probs = ggml_get_rows(ctx0, selection_groups, expert_groups); // [n_exp_per_group, n_group_used, n_tokens]
976
0
        selection_probs = ggml_set_rows(ctx0, ggml_scale_bias(ctx0, selection_groups, 0.0f, -INFINITY), selection_probs, expert_groups); // [n_exp_per_group, n_expert_groups, n_tokens]
977
0
        selection_probs = ggml_reshape_2d(ctx0, selection_probs, n_expert, n_tokens); // [n_expert, n_tokens]
978
0
        cb(selection_probs, "ffn_moe_probs_masked", il);
979
0
    }
980
981
    // select experts
982
0
    ggml_tensor * selected_experts = ggml_top_k(ctx0, selection_probs, n_expert_used); // [n_expert_used, n_tokens]
983
0
    cb(selected_experts->src[0], "ffn_moe_argsort", il);
984
0
    cb(selected_experts, "ffn_moe_topk", il);
985
986
0
    if (arch == LLM_ARCH_GROVEMOE && n_expert != hparams.n_expert) {
987
        // TODO: Use scalar div instead when/if implemented
988
0
        ggml_tensor * f_sel = ggml_cast(ctx0, selected_experts, GGML_TYPE_F32);
989
0
        selected_experts = ggml_cast(ctx0, ggml_scale(ctx0, f_sel, 1.0f / float(hparams.n_group_experts)), GGML_TYPE_I32);
990
0
        probs = ggml_reshape_3d(ctx0, probs, 1, hparams.n_expert, n_tokens);
991
0
    } else {
992
0
        probs = ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens);
993
0
    }
994
995
0
    ggml_tensor * weights = ggml_get_rows(ctx0, probs, selected_experts); // [1, n_expert_used, n_tokens]
996
0
    cb(weights, "ffn_moe_weights", il);
997
998
999
0
    if (gating_op == LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT) {
1000
0
        weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens);
1001
0
        weights = ggml_soft_max(ctx0, weights); // [n_expert_used, n_tokens]
1002
0
        weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens);
1003
0
        cb(weights, "ffn_moe_weights_softmax", il);
1004
0
    }
1005
1006
0
    if (norm_w) {
1007
0
        weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens);
1008
1009
0
        ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); // [1, n_tokens]
1010
0
        cb(weights_sum, "ffn_moe_weights_sum", il);
1011
1012
        // Avoid division by zero, clamp to smallest number representable by F16
1013
0
        weights_sum = ggml_clamp(ctx0, weights_sum, 6.103515625e-5, INFINITY);
1014
0
        cb(weights_sum, "ffn_moe_weights_sum_clamped", il);
1015
1016
0
        weights = ggml_div(ctx0, weights, weights_sum); // [n_expert_used, n_tokens]
1017
0
        cb(weights, "ffn_moe_weights_norm", il);
1018
1019
0
        weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens);
1020
0
    }
1021
0
    if (scale_w) {
1022
0
        weights = ggml_scale(ctx0, weights, w_scale);
1023
0
        cb(weights, "ffn_moe_weights_scaled", il);
1024
0
    }
1025
1026
    //call early so that topk-moe can be used
1027
0
    ggml_build_forward_expand(gf, weights);
1028
1029
0
    cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens);
1030
1031
0
    if (weight_before_ffn) {
1032
        // repeat cur to [n_embd, n_expert_used, n_tokens]
1033
0
        ggml_tensor * repeated = ggml_repeat_4d(ctx0, cur, n_embd, n_expert_used, n_tokens, 1);
1034
0
        cur = ggml_mul(ctx0, repeated, weights);
1035
0
        cb(cur, "ffn_moe_weighted", il);
1036
0
    }
1037
1038
0
    ggml_tensor * up = build_lora_mm_id(up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
1039
0
    cb(up, "ffn_moe_up", il);
1040
1041
0
    if (up_exps_b) {
1042
0
        up = ggml_add_id(ctx0, up, up_exps_b, selected_experts);
1043
0
        cb(up, "ffn_moe_up_biased", il);
1044
0
    }
1045
1046
0
    ggml_tensor * experts = nullptr;
1047
0
    if (gate_exps) {
1048
0
        cur = build_lora_mm_id(gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens]
1049
0
        cb(cur, "ffn_moe_gate", il);
1050
0
    } else {
1051
0
        cur = up;
1052
0
    }
1053
1054
0
    if (gate_exps_b) {
1055
0
        cur = ggml_add_id(ctx0, cur, gate_exps_b, selected_experts);
1056
0
        cb(cur, "ffn_moe_gate_biased", il);
1057
0
    }
1058
1059
0
    switch (type_op) {
1060
0
        case LLM_FFN_SILU:
1061
0
            if (gate_exps) {
1062
0
                cur = ggml_swiglu_split(ctx0, cur, up);
1063
0
                cb(cur, "ffn_moe_swiglu", il);
1064
0
            } else {
1065
0
                cur = ggml_silu(ctx0, cur);
1066
0
                cb(cur, "ffn_moe_silu", il);
1067
0
            } break;
1068
0
        case LLM_FFN_GELU:
1069
0
            if (gate_exps) {
1070
0
                cur = ggml_geglu_split(ctx0, cur, up);
1071
0
                cb(cur, "ffn_moe_geglu", il);
1072
0
            } else {
1073
0
                cur = ggml_gelu(ctx0, cur);
1074
0
                cb(cur, "ffn_moe_gelu", il);
1075
0
            } break;
1076
0
        case LLM_FFN_SWIGLU_OAI_MOE:
1077
0
            {
1078
                // TODO: move to hparams?
1079
0
                constexpr float alpha = 1.702f;
1080
0
                constexpr float limit = 7.0f;
1081
0
                cur = ggml_swiglu_oai(ctx0, cur, up, alpha, limit);
1082
0
                cb(cur, "ffn_moe_swiglu_oai", il);
1083
0
            } break;
1084
0
        case LLM_FFN_RELU:
1085
0
            if (gate_exps) {
1086
0
                cur = ggml_reglu_split(ctx0, cur, up);
1087
0
                cb(cur, "ffn_moe_reglu", il);
1088
0
            } else {
1089
0
                cur = ggml_relu(ctx0, cur);
1090
0
                cb(cur, "ffn_moe_relu", il);
1091
0
            } break;
1092
0
        default:
1093
0
            GGML_ABORT("fatal error");
1094
0
    }
1095
1096
    //expand here so that we can fuse ffn gate
1097
0
    ggml_build_forward_expand(gf, cur);
1098
1099
0
    experts = build_lora_mm_id(down_exps, cur, selected_experts); // [n_embd, n_expert_used, n_tokens]
1100
0
    cb(experts, "ffn_moe_down", il);
1101
1102
0
    if (down_exps_b) {
1103
0
        experts = ggml_add_id(ctx0, experts, down_exps_b, selected_experts);
1104
0
        cb(experts, "ffn_moe_down_biased", il);
1105
0
    }
1106
1107
0
    if (!weight_before_ffn) {
1108
0
        experts = ggml_mul(ctx0, experts, weights);
1109
0
        cb(cur, "ffn_moe_weighted", il);
1110
0
    }
1111
1112
0
    ggml_tensor * cur_experts[LLAMA_MAX_EXPERTS] = { nullptr };
1113
1114
0
    assert(n_expert_used > 0);
1115
1116
    // order the views before the adds
1117
0
    for (uint32_t i = 0; i < hparams.n_expert_used; ++i) {
1118
0
        cur_experts[i] = ggml_view_2d(ctx0, experts, n_embd, n_tokens, experts->nb[2], i*experts->nb[1]);
1119
1120
0
        ggml_build_forward_expand(gf, cur_experts[i]);
1121
0
    }
1122
1123
    // aggregate experts
1124
    // note: here we explicitly use hparams.n_expert_used instead of n_expert_used
1125
    //       to avoid potentially a large number of add nodes during warmup
1126
    //       ref: https://github.com/ggml-org/llama.cpp/pull/14753
1127
0
    ggml_tensor * moe_out = cur_experts[0];
1128
1129
0
    for (uint32_t i = 1; i < hparams.n_expert_used; ++i) {
1130
0
        moe_out = ggml_add(ctx0, moe_out, cur_experts[i]);
1131
0
    }
1132
1133
0
    if (hparams.n_expert_used == 1) {
1134
        // avoid returning a non-contiguous tensor
1135
0
        moe_out = ggml_cont(ctx0, moe_out);
1136
0
    }
1137
1138
0
    cb(moe_out, "ffn_moe_out", il);
1139
1140
0
    return moe_out;
1141
0
}
1142
1143
// input embeddings with optional lora
1144
0
ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const {
1145
0
    const int64_t n_embd = hparams.n_embd_inp();
1146
1147
0
    auto inp = std::make_unique<llm_graph_input_embd>();
1148
1149
0
    ggml_tensor * cur = nullptr;
1150
1151
0
    if (ubatch.token) {
1152
0
        inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens);
1153
        //cb(inp->tokens, "inp_tokens", -1);
1154
0
        ggml_set_input(inp->tokens);
1155
0
        res->t_tokens = inp->tokens;
1156
1157
0
        cur = ggml_get_rows(ctx0, tok_embd, inp->tokens);
1158
1159
        // apply lora for embedding tokens if needed
1160
0
        for (const auto & lora : *loras) {
1161
0
            llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd);
1162
0
            if (lw == nullptr) {
1163
0
                continue;
1164
0
            }
1165
1166
0
            const float adapter_scale = lora.second;
1167
0
            const float scale = lw->get_scale(lora.first->alpha, adapter_scale);
1168
1169
0
            ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat(
1170
0
                        ctx0, lw->b, // non-transposed lora_b
1171
0
                        ggml_get_rows(ctx0, lw->a, inp->tokens)
1172
0
                        ), scale);
1173
1174
0
            cur = ggml_add(ctx0, cur, inpL_delta);
1175
0
        }
1176
0
    } else {
1177
0
        inp->embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, ubatch.n_tokens);
1178
0
        ggml_set_input(inp->embd);
1179
1180
0
        cur = inp->embd;
1181
0
    }
1182
1183
    // For Granite architecture
1184
0
    if (hparams.f_embedding_scale != 0.0f) {
1185
0
        cur = ggml_scale(ctx0, cur, hparams.f_embedding_scale);
1186
0
    }
1187
1188
0
    cb(cur, "inp_embd", -1);
1189
1190
0
    res->add_input(std::move(inp));
1191
1192
0
    return cur;
1193
0
}
1194
1195
0
ggml_tensor * llm_graph_context::build_inp_pos() const {
1196
0
    auto inp = std::make_unique<llm_graph_input_pos>(hparams.n_pos_per_embd());
1197
1198
0
    auto & cur = inp->pos;
1199
1200
0
    cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd());
1201
0
    ggml_set_input(cur);
1202
1203
0
    res->add_input(std::move(inp));
1204
1205
0
    return cur;
1206
0
}
1207
1208
0
ggml_tensor * llm_graph_context::build_inp_attn_scale() const {
1209
0
    auto inp = std::make_unique<llm_graph_input_attn_temp>(hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale);
1210
1211
0
    auto & cur = inp->attn_scale;
1212
1213
    // this need to be 1x1xN for broadcasting
1214
0
    cur = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 1, 1, n_tokens);
1215
0
    ggml_set_input(cur);
1216
1217
0
    res->add_input(std::move(inp));
1218
1219
0
    return cur;
1220
0
}
1221
1222
0
ggml_tensor * llm_graph_context::build_inp_out_ids() const {
1223
    // note: when all tokens are output, we could skip this optimization to spare the ggml_get_rows() calls,
1224
    //       but this would make the graph topology depend on the number of output tokens, which can interere with
1225
    //       features that require constant topology such as pipline parallelism
1226
    //       ref: https://github.com/ggml-org/llama.cpp/pull/14275#issuecomment-2987424471
1227
    //if (n_outputs < n_tokens) {
1228
    //    return nullptr;
1229
    //}
1230
1231
0
    auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs);
1232
1233
0
    auto & cur = inp->out_ids;
1234
1235
0
    cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs);
1236
0
    ggml_set_input(cur);
1237
1238
0
    res->add_input(std::move(inp));
1239
1240
0
    return cur;
1241
0
}
1242
1243
0
ggml_tensor * llm_graph_context::build_inp_mean() const {
1244
0
    auto inp = std::make_unique<llm_graph_input_mean>(cparams);
1245
1246
0
    auto & cur = inp->mean;
1247
1248
0
    cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq);
1249
0
    ggml_set_input(cur);
1250
1251
0
    res->add_input(std::move(inp));
1252
1253
0
    return cur;
1254
0
}
1255
1256
0
ggml_tensor * llm_graph_context::build_inp_cls() const {
1257
0
    auto inp = std::make_unique<llm_graph_input_cls>(cparams, arch);
1258
1259
0
    auto & cur = inp->cls;
1260
1261
0
    cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq);
1262
0
    ggml_set_input(cur);
1263
1264
0
    res->add_input(std::move(inp));
1265
1266
0
    return cur;
1267
0
}
1268
1269
0
ggml_tensor * llm_graph_context::build_inp_cross_embd() const {
1270
0
    auto inp = std::make_unique<llm_graph_input_cross_embd>(cross);
1271
1272
0
    auto & cur = inp->cross_embd;
1273
1274
    // if we have the output embeddings from the encoder, use them directly
1275
    // TODO: needs more work to be correct, for now just use the tensor shape
1276
    //if (cross->t_embd) {
1277
    //    cur = ggml_view_tensor(ctx0, cross->t_embd);
1278
1279
    //    return cur;
1280
    //}
1281
1282
0
    const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd_inp();
1283
0
    const auto n_enc  = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
1284
1285
0
    cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc);
1286
0
    ggml_set_input(cur);
1287
1288
0
    res->add_input(std::move(inp));
1289
1290
0
    return cur;
1291
0
}
1292
1293
0
ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const {
1294
0
    auto inp = std::make_unique<llm_graph_input_pos_bucket>(hparams);
1295
1296
0
    auto & cur = inp->pos_bucket;
1297
1298
0
    cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens);
1299
0
    ggml_set_input(cur);
1300
1301
0
    res->add_input(std::move(inp));
1302
1303
0
    return cur;
1304
0
}
1305
1306
0
ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const {
1307
0
    const auto * mctx_cur = static_cast<const llama_kv_cache_context *>(mctx);
1308
1309
0
    auto inp = std::make_unique<llm_graph_input_pos_bucket_kv>(hparams, mctx_cur);
1310
1311
0
    const auto n_kv = mctx_cur->get_n_kv();
1312
1313
0
    auto & cur = inp->pos_bucket;
1314
1315
0
    cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
1316
0
    ggml_set_input(cur);
1317
1318
0
    res->add_input(std::move(inp));
1319
1320
0
    return cur;
1321
0
}
1322
1323
0
ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const {
1324
0
    ggml_tensor * pos_bucket_1d = ggml_reshape_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1]);
1325
0
    cb(pos_bucket_1d, "pos_bucket_1d", -1);
1326
1327
0
    ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d);
1328
1329
0
    pos_bias = ggml_reshape_3d(ctx0, pos_bias, pos_bias->ne[0], pos_bucket->ne[0], pos_bucket->ne[1]);
1330
0
    pos_bias = ggml_permute   (ctx0, pos_bias, 2, 0, 1, 3);
1331
0
    pos_bias = ggml_cont      (ctx0, pos_bias);
1332
1333
0
    cb(pos_bias, "pos_bias", -1);
1334
1335
0
    return pos_bias;
1336
0
}
1337
1338
ggml_tensor * llm_graph_context::build_attn_mha(
1339
         ggml_tensor * q,
1340
         ggml_tensor * k,
1341
         ggml_tensor * v,
1342
         ggml_tensor * kq_b,
1343
         ggml_tensor * kq_mask,
1344
         ggml_tensor * sinks,
1345
         ggml_tensor * v_mla,
1346
               float   kq_scale,
1347
0
                 int   il) const {
1348
0
    const bool v_trans = v->nb[1] > v->nb[2];
1349
1350
    // split the batch into streams if needed
1351
0
    const auto n_stream = k->ne[3];
1352
1353
0
    q = ggml_view_4d(ctx0, q, q->ne[0], q->ne[1], q->ne[2]/n_stream, n_stream, q->nb[1], q->nb[2], q->nb[3]/n_stream, 0);
1354
1355
0
    q = ggml_permute(ctx0, q, 0, 2, 1, 3);
1356
0
    k = ggml_permute(ctx0, k, 0, 2, 1, 3);
1357
0
    v = ggml_permute(ctx0, v, 0, 2, 1, 3);
1358
1359
0
    ggml_tensor * cur;
1360
1361
0
    if (cparams.flash_attn && kq_b == nullptr) {
1362
0
        GGML_ASSERT(kq_b == nullptr && "Flash attention does not support KQ bias yet");
1363
1364
0
        if (v_trans) {
1365
0
            v = ggml_transpose(ctx0, v);
1366
0
        }
1367
1368
        // this can happen when KV cache is not used (e.g. an embedding model with non-causal attn)
1369
0
        if (k->type == GGML_TYPE_F32) {
1370
0
            k = ggml_cast(ctx0, k, GGML_TYPE_F16);
1371
0
        }
1372
1373
0
        if (v->type == GGML_TYPE_F32) {
1374
0
            v = ggml_cast(ctx0, v, GGML_TYPE_F16);
1375
0
        }
1376
1377
0
        cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias,
1378
0
                                  hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f);
1379
0
        cb(cur, LLAMA_TENSOR_NAME_FATTN, il);
1380
1381
0
        ggml_flash_attn_ext_add_sinks(cur, sinks);
1382
0
        ggml_flash_attn_ext_set_prec (cur, GGML_PREC_F32);
1383
1384
0
        if (v_mla) {
1385
#if 0
1386
            // v_mla can be applied as a matrix-vector multiplication with broadcasting across dimension 3 == n_tokens.
1387
            // However, the code is optimized for dimensions 0 and 1 being large, so this is ineffient.
1388
            cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens);
1389
            cur = ggml_mul_mat(ctx0, v_mla, cur);
1390
#else
1391
            // It's preferable to do the calculation as a matrix-matrix multiplication with n_tokens in dimension 1.
1392
            // The permutations are noops and only change how the tensor data is interpreted.
1393
0
            cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
1394
0
            cur = ggml_mul_mat(ctx0, v_mla, cur);
1395
0
            cb(cur, "fattn_mla", il);
1396
0
            cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
1397
0
            cur = ggml_cont(ctx0, cur); // Needed because ggml_reshape_2d expects contiguous inputs.
1398
0
#endif
1399
0
        }
1400
1401
0
        cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
1402
0
    } else {
1403
0
        ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
1404
0
        cb(kq, "kq", il);
1405
1406
        // note: this op tends to require high floating point range
1407
        //       while for some models F16 is enough, for others it is not, so we default to F32 here
1408
0
        ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
1409
1410
0
        if (arch == LLM_ARCH_GROK) {
1411
            // need to do the following:
1412
            // multiply by attn_output_multiplier
1413
            // and then :
1414
            // kq = 30 * tanh(kq / 30)
1415
            // before the softmax below
1416
1417
0
            kq = ggml_tanh(ctx0, ggml_scale(ctx0, kq, hparams.f_attn_out_scale / hparams.f_attn_logit_softcapping));
1418
0
            cb(kq, "kq_tanh", il);
1419
0
            kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping);
1420
0
            cb(kq, "kq_scaled", il);
1421
0
        }
1422
1423
0
        if (hparams.attn_soft_cap) {
1424
0
            kq = ggml_scale(ctx0, kq, 1.0f / hparams.f_attn_logit_softcapping);
1425
0
            cb(kq, "kq_scaled_1", il);
1426
0
            kq = ggml_tanh (ctx0, kq);
1427
0
            cb(kq, "kq_tanh", il);
1428
0
            kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping);
1429
0
            cb(kq, "kq_scaled_2", il);
1430
0
        }
1431
1432
0
        if (kq_b) {
1433
0
            kq = ggml_add(ctx0, kq, kq_b);
1434
0
            cb(kq, "kq_plus_kq_b", il);
1435
0
        }
1436
1437
0
        kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
1438
0
        ggml_soft_max_add_sinks(kq, sinks);
1439
0
        cb(kq, "kq_soft_max", il);
1440
1441
0
        if (!v_trans) {
1442
            // note: avoid this branch
1443
0
            v = ggml_cont(ctx0, ggml_transpose(ctx0, v));
1444
0
            cb(v, "v_cont", il);
1445
0
        }
1446
1447
0
        ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq);
1448
0
        cb(kqv, "kqv", il);
1449
1450
        // for MLA with the absorption optimization, we need to "decompress" from MQA back to MHA
1451
0
        if (v_mla) {
1452
0
            kqv = ggml_mul_mat(ctx0, v_mla, kqv);
1453
0
            cb(kqv, "kqv_mla", il);
1454
0
        }
1455
1456
0
        cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
1457
1458
        // recombine streams
1459
0
        cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]);
1460
1461
0
        if (!cparams.offload_kqv) {
1462
            // all nodes between the KV store and the attention output are run on the CPU
1463
0
            ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu);
1464
0
        }
1465
0
    }
1466
1467
0
    ggml_build_forward_expand(gf, cur);
1468
1469
0
    return cur;
1470
0
}
1471
1472
0
llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() const {
1473
0
    auto inp = std::make_unique<llm_graph_input_attn_no_cache>(hparams, cparams);
1474
1475
    // note: there is no KV cache, so the number of KV values is equal to the number of tokens in the batch
1476
0
    inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1);
1477
0
    ggml_set_input(inp->self_kq_mask);
1478
1479
0
    inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
1480
1481
0
    if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
1482
0
        inp->self_kq_mask_swa = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1);
1483
0
        ggml_set_input(inp->self_kq_mask_swa);
1484
1485
0
        inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
1486
0
    } else {
1487
0
        inp->self_kq_mask_swa     = nullptr;
1488
0
        inp->self_kq_mask_swa_cnv = nullptr;
1489
0
    }
1490
1491
0
    return (llm_graph_input_attn_no_cache *) res->add_input(std::move(inp));
1492
0
}
1493
1494
ggml_tensor * llm_graph_context::build_attn(
1495
        llm_graph_input_attn_no_cache * inp,
1496
        ggml_tensor * wo,
1497
        ggml_tensor * wo_b,
1498
        ggml_tensor * q_cur,
1499
        ggml_tensor * k_cur,
1500
        ggml_tensor * v_cur,
1501
        ggml_tensor * kq_b,
1502
        ggml_tensor * sinks,
1503
        ggml_tensor * v_mla,
1504
            float     kq_scale,
1505
0
            int       il) const {
1506
0
    GGML_UNUSED(n_tokens);
1507
1508
    // these nodes are added to the graph together so that they are not reordered
1509
    // by doing so, the number of splits in the graph is reduced
1510
0
    ggml_build_forward_expand(gf, q_cur);
1511
0
    ggml_build_forward_expand(gf, k_cur);
1512
0
    ggml_build_forward_expand(gf, v_cur);
1513
1514
0
    const bool is_swa = hparams.is_swa(il);
1515
1516
0
    const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask();
1517
1518
    // [TAG_NO_CACHE_PAD]
1519
    // TODO: if ubatch.equal_seqs() == true, we can split the three tensors below into ubatch.n_seqs_unq streams
1520
    //       but it might not be worth it: https://github.com/ggml-org/llama.cpp/pull/15636
1521
    //assert(!ubatch.equal_seqs() || (k_cur->ne[3] == 1 && k_cur->ne[3] == ubatch.n_seqs_unq));
1522
1523
0
    ggml_tensor * q = q_cur;
1524
0
    ggml_tensor * k = k_cur;
1525
0
    ggml_tensor * v = v_cur;
1526
1527
0
    ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
1528
0
    cb(cur, "kqv_out", il);
1529
1530
0
    if (wo) {
1531
0
        cur = build_lora_mm(wo, cur);
1532
0
    }
1533
1534
0
    if (wo_b) {
1535
        //cb(cur, "kqv_wo", il);
1536
0
    }
1537
1538
0
    if (wo_b) {
1539
0
        cur = ggml_add(ctx0, cur, wo_b);
1540
0
    }
1541
1542
0
    return cur;
1543
0
}
1544
1545
static std::unique_ptr<llm_graph_input_attn_kv> build_attn_inp_kv_impl(
1546
           ggml_context * ctx0,
1547
     const llama_ubatch & ubatch,
1548
    const llama_hparams & hparams,
1549
    const llama_cparams & cparams,
1550
0
    const llama_kv_cache_context * mctx_cur) {
1551
1552
0
    auto inp = std::make_unique<llm_graph_input_attn_kv>(hparams, cparams, mctx_cur);
1553
1554
0
    {
1555
0
        GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_iswa for SWA");
1556
1557
0
        const auto n_kv     = mctx_cur->get_n_kv();
1558
0
        const auto n_tokens = ubatch.n_tokens;
1559
0
        const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq;
1560
1561
0
        inp->self_k_idxs = mctx_cur->build_input_k_idxs(ctx0, ubatch);
1562
0
        inp->self_v_idxs = mctx_cur->build_input_v_idxs(ctx0, ubatch);
1563
1564
0
        inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream);
1565
0
        ggml_set_input(inp->self_kq_mask);
1566
1567
0
        inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
1568
0
    }
1569
1570
0
    return inp;
1571
0
}
1572
1573
0
llm_graph_input_attn_kv * llm_graph_context::build_attn_inp_kv() const {
1574
0
    const auto * mctx_cur = static_cast<const llama_kv_cache_context *>(mctx);
1575
1576
0
    auto inp = build_attn_inp_kv_impl(ctx0, ubatch, hparams, cparams, mctx_cur);
1577
1578
0
    return (llm_graph_input_attn_kv *) res->add_input(std::move(inp));
1579
0
}
1580
1581
ggml_tensor * llm_graph_context::build_attn(
1582
        llm_graph_input_attn_kv * inp,
1583
        ggml_tensor * wo,
1584
        ggml_tensor * wo_b,
1585
        ggml_tensor * q_cur,
1586
        ggml_tensor * k_cur,
1587
        ggml_tensor * v_cur,
1588
        ggml_tensor * kq_b,
1589
        ggml_tensor * sinks,
1590
        ggml_tensor * v_mla,
1591
            float     kq_scale,
1592
0
            int       il) const {
1593
    // these nodes are added to the graph together so that they are not reordered
1594
    // by doing so, the number of splits in the graph is reduced
1595
    // expand k later to enable rope fusion which directly writes into k-v cache
1596
0
    ggml_build_forward_expand(gf, q_cur);
1597
0
    ggml_build_forward_expand(gf, v_cur);
1598
0
    ggml_build_forward_expand(gf, k_cur);
1599
1600
0
    const auto * mctx_cur = inp->mctx;
1601
1602
    // store to KV cache
1603
0
    {
1604
0
        const auto & k_idxs = inp->get_k_idxs();
1605
0
        const auto & v_idxs = inp->get_v_idxs();
1606
1607
0
        ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il));
1608
0
        ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il));
1609
0
    }
1610
1611
0
    const auto & kq_mask = inp->get_kq_mask();
1612
1613
0
    ggml_tensor * q = q_cur;
1614
0
    ggml_tensor * k = mctx_cur->get_k(ctx0, il);
1615
0
    ggml_tensor * v = mctx_cur->get_v(ctx0, il);
1616
1617
0
    ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
1618
0
    cb(cur, "kqv_out", il);
1619
1620
0
    if (wo) {
1621
0
        cur = build_lora_mm(wo, cur);
1622
0
        if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) {
1623
            // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators
1624
0
            ggml_mul_mat_set_prec(cur, GGML_PREC_F32);
1625
0
        }
1626
0
    }
1627
1628
0
    if (wo_b) {
1629
0
        cur = ggml_add(ctx0, cur, wo_b);
1630
0
    }
1631
1632
0
    return cur;
1633
0
}
1634
1635
ggml_tensor * llm_graph_context::build_attn(
1636
        llm_graph_input_attn_kv_iswa * inp,
1637
        ggml_tensor * wo,
1638
        ggml_tensor * wo_b,
1639
        ggml_tensor * q_cur,
1640
        ggml_tensor * k_cur,
1641
        ggml_tensor * v_cur,
1642
        ggml_tensor * kq_b,
1643
        ggml_tensor * sinks,
1644
        ggml_tensor * v_mla,
1645
            float     kq_scale,
1646
0
            int       il) const {
1647
    // these nodes are added to the graph together so that they are not reordered
1648
    // by doing so, the number of splits in the graph is reduced
1649
0
    ggml_build_forward_expand(gf, q_cur);
1650
1651
0
    if (k_cur) {
1652
0
        ggml_build_forward_expand(gf, k_cur);
1653
0
    }
1654
1655
0
    if (v_cur) {
1656
0
        ggml_build_forward_expand(gf, v_cur);
1657
0
    }
1658
1659
0
    const auto * mctx_iswa = inp->mctx;
1660
1661
0
    const bool is_swa = hparams.is_swa(il);
1662
1663
0
    const auto * mctx_cur = is_swa ? mctx_iswa->get_swa() : mctx_iswa->get_base();
1664
1665
    // optionally store to KV cache
1666
0
    if (k_cur) {
1667
0
        const auto & k_idxs = is_swa ? inp->get_k_idxs_swa() : inp->get_k_idxs();
1668
1669
0
        ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il));
1670
0
    }
1671
1672
0
    if (v_cur) {
1673
0
        const auto & v_idxs = is_swa ? inp->get_v_idxs_swa() : inp->get_v_idxs();
1674
1675
0
        ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il));
1676
0
    }
1677
1678
0
    const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask();
1679
1680
0
    ggml_tensor * q = q_cur;
1681
0
    ggml_tensor * k = mctx_cur->get_k(ctx0, il);
1682
0
    ggml_tensor * v = mctx_cur->get_v(ctx0, il);
1683
1684
0
    ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
1685
0
    cb(cur, "kqv_out", il);
1686
1687
0
    if (wo) {
1688
0
        cur = build_lora_mm(wo, cur);
1689
0
    }
1690
1691
0
    if (wo_b) {
1692
        //cb(cur, "kqv_wo", il);
1693
0
    }
1694
1695
0
    if (wo_b) {
1696
0
        cur = ggml_add(ctx0, cur, wo_b);
1697
0
    }
1698
1699
0
    return cur;
1700
0
}
1701
1702
0
llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const {
1703
0
    auto inp = std::make_unique<llm_graph_input_attn_cross>(cross);
1704
1705
0
    const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train;
1706
1707
0
    inp->cross_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1);
1708
0
    ggml_set_input(inp->cross_kq_mask);
1709
1710
0
    inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask;
1711
1712
0
    return (llm_graph_input_attn_cross *) res->add_input(std::move(inp));
1713
0
}
1714
1715
ggml_tensor * llm_graph_context::build_attn(
1716
        llm_graph_input_attn_cross * inp,
1717
        ggml_tensor * wo,
1718
        ggml_tensor * wo_b,
1719
        ggml_tensor * q_cur,
1720
        ggml_tensor * k_cur,
1721
        ggml_tensor * v_cur,
1722
        ggml_tensor * kq_b,
1723
        ggml_tensor * sinks,
1724
        ggml_tensor * v_mla,
1725
            float     kq_scale,
1726
0
            int       il) const {
1727
    // these nodes are added to the graph together so that they are not reordered
1728
    // by doing so, the number of splits in the graph is reduced
1729
0
    ggml_build_forward_expand(gf, q_cur);
1730
0
    ggml_build_forward_expand(gf, k_cur);
1731
0
    ggml_build_forward_expand(gf, v_cur);
1732
1733
0
    const auto & kq_mask = inp->get_kq_mask_cross();
1734
1735
0
    ggml_tensor * q = q_cur;
1736
0
    ggml_tensor * k = k_cur;
1737
0
    ggml_tensor * v = v_cur;
1738
1739
0
    ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il);
1740
0
    cb(cur, "kqv_out", il);
1741
1742
0
    if (wo) {
1743
0
        cur = build_lora_mm(wo, cur);
1744
0
    }
1745
1746
0
    if (wo_b) {
1747
        //cb(cur, "kqv_wo", il);
1748
0
    }
1749
1750
0
    if (wo_b) {
1751
0
        cur = ggml_add(ctx0, cur, wo_b);
1752
0
    }
1753
1754
0
    return cur;
1755
0
}
1756
1757
// TODO: maybe separate the inner implementation into a separate function
1758
//       like with the non-sliding window equivalent
1759
//       once sliding-window hybrid caches are a thing.
1760
0
llm_graph_input_attn_kv_iswa * llm_graph_context::build_attn_inp_kv_iswa() const {
1761
0
    const auto * mctx_cur = static_cast<const llama_kv_cache_iswa_context *>(mctx);
1762
1763
0
    auto inp = std::make_unique<llm_graph_input_attn_kv_iswa>(hparams, cparams, mctx_cur);
1764
1765
0
    const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq;
1766
1767
0
    {
1768
0
        const auto n_kv = mctx_cur->get_base()->get_n_kv();
1769
1770
0
        inp->self_k_idxs = mctx_cur->get_base()->build_input_k_idxs(ctx0, ubatch);
1771
0
        inp->self_v_idxs = mctx_cur->get_base()->build_input_v_idxs(ctx0, ubatch);
1772
1773
0
        inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream);
1774
0
        ggml_set_input(inp->self_kq_mask);
1775
1776
0
        inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask;
1777
0
    }
1778
1779
0
    {
1780
0
        GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache for non-SWA");
1781
1782
0
        const auto n_kv = mctx_cur->get_swa()->get_n_kv();
1783
1784
0
        inp->self_k_idxs_swa = mctx_cur->get_swa()->build_input_k_idxs(ctx0, ubatch);
1785
0
        inp->self_v_idxs_swa = mctx_cur->get_swa()->build_input_v_idxs(ctx0, ubatch);
1786
1787
0
        inp->self_kq_mask_swa = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream);
1788
0
        ggml_set_input(inp->self_kq_mask_swa);
1789
1790
0
        inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa;
1791
0
    }
1792
1793
0
    return (llm_graph_input_attn_kv_iswa *) res->add_input(std::move(inp));
1794
0
}
1795
1796
ggml_tensor * llm_graph_context::build_rs(
1797
        ggml_tensor * s,
1798
        ggml_tensor * state_copy_main,
1799
        ggml_tensor * state_copy_extra,
1800
            int32_t   state_size,
1801
            int32_t   n_seqs,
1802
           uint32_t   n_rs,
1803
           uint32_t   rs_head,
1804
           uint32_t   rs_size,
1805
            int32_t   rs_zero,
1806
0
        const llm_graph_get_rows_fn & get_state_rows) const {
1807
1808
0
    ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, rs_size);
1809
1810
    // Clear a single state which will then be copied to the other cleared states.
1811
    // Note that this is a no-op when the view is zero-sized.
1812
0
    ggml_tensor * state_zero = ggml_view_1d(ctx0, states, state_size*(rs_zero >= 0), rs_zero*states->nb[1]*(rs_zero >= 0));
1813
0
    ggml_build_forward_expand(gf, ggml_scale_inplace(ctx0, state_zero, 0));
1814
1815
    // copy states
1816
    // NOTE: assuming the copy destinations are ALL contained between rs_head and rs_head + n_rs
1817
    // {state_size, rs_size} -> {state_size, n_seqs}
1818
0
    ggml_tensor * output_states = get_state_rows(ctx0, states, state_copy_main);
1819
0
    ggml_build_forward_expand(gf, output_states);
1820
1821
    // copy extra states which won't be changed further (between n_seqs and n_rs)
1822
0
    ggml_tensor * states_extra = ggml_get_rows(ctx0, states, state_copy_extra);
1823
0
    ggml_build_forward_expand(gf,
1824
0
        ggml_cpy(ctx0,
1825
0
            states_extra,
1826
0
            ggml_view_1d(ctx0, s, state_size*(n_rs - n_seqs), (rs_head + n_seqs)*state_size*ggml_element_size(s))));
1827
1828
0
    return output_states;
1829
0
}
1830
1831
static std::unique_ptr<llm_graph_input_rs> build_rs_inp_impl(
1832
           ggml_context * ctx0,
1833
     const llama_ubatch & ubatch,
1834
0
    const llama_memory_recurrent_context * mctx_cur) {
1835
1836
0
    auto inp = std::make_unique<llm_graph_input_rs>(mctx_cur);
1837
1838
0
    const int64_t n_rs   = mctx_cur->get_n_rs();
1839
0
    const int64_t n_seqs = ubatch.n_seqs;
1840
1841
0
    inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs);
1842
0
    ggml_set_input(inp->s_copy);
1843
1844
0
    inp->s_copy_main  = ggml_view_1d(ctx0, inp->s_copy, n_seqs, 0);
1845
0
    inp->s_copy_extra = ggml_view_1d(ctx0, inp->s_copy, n_rs - n_seqs, n_seqs * inp->s_copy->nb[0]);
1846
1847
0
    return inp;
1848
0
}
1849
1850
0
llm_graph_input_rs * llm_graph_context::build_rs_inp() const {
1851
0
    const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
1852
1853
0
    auto inp = build_rs_inp_impl(ctx0, ubatch, mctx_cur);
1854
1855
0
    return (llm_graph_input_rs *) res->add_input(std::move(inp));
1856
0
}
1857
1858
ggml_tensor * llm_graph_context::build_rs(
1859
        llm_graph_input_rs * inp,
1860
        ggml_tensor * s,
1861
            int32_t   state_size,
1862
            int32_t   n_seqs,
1863
0
        const llm_graph_get_rows_fn & get_state_rows) const {
1864
0
    const auto * kv_state = inp->mctx;
1865
1866
0
    return build_rs(s, inp->s_copy_main, inp->s_copy_extra, state_size, n_seqs,
1867
0
                    kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(),
1868
0
                    get_state_rows);
1869
0
}
1870
1871
ggml_tensor * llm_graph_context::build_rwkv_token_shift_load(
1872
    llm_graph_input_rs * inp,
1873
    const llama_ubatch & ubatch,
1874
0
                   int   il) const {
1875
0
    const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
1876
1877
0
    const auto token_shift_count = hparams.token_shift_count;
1878
1879
0
    const int64_t n_seqs  = ubatch.n_seqs;
1880
1881
0
    ggml_tensor * token_shift_all = mctx_cur->get_r_l(il);
1882
1883
0
    ggml_tensor * token_shift = build_rs(
1884
0
            inp, token_shift_all,
1885
0
            hparams.n_embd_r(), n_seqs);
1886
1887
0
    token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs);
1888
1889
0
    return token_shift;
1890
0
}
1891
1892
ggml_tensor * llm_graph_context::build_rwkv_token_shift_store(
1893
         ggml_tensor * token_shift,
1894
  const llama_ubatch & ubatch,
1895
0
                 int   il) const {
1896
0
    const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx);
1897
1898
0
    const auto token_shift_count = hparams.token_shift_count;
1899
0
    const auto n_embd = hparams.n_embd;
1900
1901
0
    const int64_t n_seqs = ubatch.n_seqs;
1902
1903
0
    const auto kv_head = mctx_cur->get_head();
1904
1905
0
    return ggml_cpy(
1906
0
        ctx0,
1907
0
        ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0),
1908
0
        ggml_view_1d(ctx0, mctx_cur->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(mctx_cur->get_r_l(il)))
1909
0
    );
1910
0
}
1911
1912
0
llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const {
1913
0
    const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx);
1914
1915
0
    auto inp_rs   = build_rs_inp_impl(ctx0, ubatch, mctx_cur->get_recr());
1916
0
    auto inp_attn = build_attn_inp_kv_impl(ctx0, ubatch, hparams, cparams, mctx_cur->get_attn());
1917
1918
0
    auto inp = std::make_unique<llm_graph_input_mem_hybrid>(std::move(inp_attn), std::move(inp_rs), mctx_cur);
1919
1920
0
    return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp));
1921
0
}
1922
1923
void llm_graph_context::build_dense_out(
1924
    ggml_tensor * dense_2,
1925
0
    ggml_tensor * dense_3) const {
1926
0
    if (!cparams.embeddings || dense_2 == nullptr || dense_3 == nullptr) {
1927
0
        return;
1928
0
    }
1929
0
    ggml_tensor * cur = res->t_embd_pooled != nullptr ? res->t_embd_pooled : res->t_embd;
1930
0
    GGML_ASSERT(cur != nullptr && "missing t_embd_pooled/t_embd");
1931
1932
0
    cur = ggml_mul_mat(ctx0, dense_2, cur);
1933
0
    cur = ggml_mul_mat(ctx0, dense_3, cur);
1934
0
    cb(cur, "result_embd_pooled", -1);
1935
0
    res->t_embd_pooled = cur;
1936
0
    ggml_build_forward_expand(gf, cur);
1937
0
}
1938
1939
1940
void llm_graph_context::build_pooling(
1941
        ggml_tensor * cls,
1942
        ggml_tensor * cls_b,
1943
        ggml_tensor * cls_out,
1944
0
        ggml_tensor * cls_out_b) const {
1945
0
    if (!cparams.embeddings) {
1946
0
        return;
1947
0
    }
1948
1949
0
    ggml_tensor * inp = res->t_embd;
1950
1951
    //// find result_norm tensor for input
1952
    //for (int i = ggml_graph_n_nodes(gf) - 1; i >= 0; --i) {
1953
    //    inp = ggml_graph_node(gf, i);
1954
    //    if (strcmp(inp->name, "result_norm") == 0 || strcmp(inp->name, "result_embd") == 0) {
1955
    //        break;
1956
    //    }
1957
1958
    //    inp = nullptr;
1959
    //}
1960
1961
0
    GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor");
1962
1963
0
    ggml_tensor * cur;
1964
1965
0
    switch (pooling_type) {
1966
0
        case LLAMA_POOLING_TYPE_NONE:
1967
0
            {
1968
0
                cur = inp;
1969
0
            } break;
1970
0
        case LLAMA_POOLING_TYPE_MEAN:
1971
0
            {
1972
0
                ggml_tensor * inp_mean = build_inp_mean();
1973
0
                cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean);
1974
0
            } break;
1975
0
        case LLAMA_POOLING_TYPE_CLS:
1976
0
        case LLAMA_POOLING_TYPE_LAST:
1977
0
            {
1978
0
                ggml_tensor * inp_cls = build_inp_cls();
1979
0
                cur = ggml_get_rows(ctx0, inp, inp_cls);
1980
0
            } break;
1981
0
        case LLAMA_POOLING_TYPE_RANK:
1982
0
            {
1983
0
                ggml_tensor * inp_cls = build_inp_cls();
1984
0
                cur = ggml_get_rows(ctx0, inp, inp_cls);
1985
1986
                // classification head
1987
                // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566
1988
0
                if (cls) {
1989
0
                    cur = ggml_mul_mat(ctx0, cls, cur);
1990
0
                    if (cls_b) {
1991
0
                        cur = ggml_add(ctx0, cur, cls_b);
1992
0
                    }
1993
0
                    cur = ggml_tanh(ctx0, cur);
1994
0
                }
1995
1996
                // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en
1997
                // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896
1998
                // Single layer classification head (direct projection)
1999
                // https://github.com/huggingface/transformers/blob/f4fc42216cd56ab6b68270bf80d811614d8d59e4/src/transformers/models/bert/modeling_bert.py#L1476
2000
0
                if (cls_out) {
2001
0
                    cur = ggml_mul_mat(ctx0, cls_out, cur);
2002
0
                    if (cls_out_b) {
2003
0
                        cur = ggml_add(ctx0, cur, cls_out_b);
2004
0
                    }
2005
0
                }
2006
2007
                // softmax for qwen3 reranker
2008
0
                if (arch == LLM_ARCH_QWEN3) {
2009
0
                    cur = ggml_soft_max(ctx0, cur);
2010
0
                }
2011
0
            } break;
2012
0
        default:
2013
0
            {
2014
0
                GGML_ABORT("unknown pooling type");
2015
0
            }
2016
0
    }
2017
2018
0
    cb(cur, "result_embd_pooled", -1);
2019
0
    res->t_embd_pooled = cur;
2020
2021
0
    ggml_build_forward_expand(gf, cur);
2022
0
}
2023
2024
0
int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) {
2025
    // TODO move to hparams if a T5 variant appears that uses a different value
2026
0
    const int64_t max_distance = 128;
2027
2028
0
    if (bidirectional) {
2029
0
        n_buckets >>= 1;
2030
0
    }
2031
2032
0
    const int64_t max_exact = n_buckets >> 1;
2033
2034
0
    int32_t relative_position = x - y;
2035
0
    int32_t relative_bucket = 0;
2036
2037
0
    if (bidirectional) {
2038
0
        relative_bucket += (relative_position > 0) * n_buckets;
2039
0
        relative_position = std::abs(relative_position);
2040
0
    } else {
2041
0
        relative_position = -std::min<int32_t>(relative_position, 0);
2042
0
    }
2043
2044
0
    int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact));
2045
0
    relative_position_if_large = std::min<int32_t>(relative_position_if_large, n_buckets - 1);
2046
0
    relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large);
2047
2048
0
    return relative_bucket;
2049
0
}