Coverage Report

Created: 2026-04-12 06:40

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-memory-recurrent.cpp
Line
Count
Source
1
#include "llama-memory-recurrent.h"
2
3
#include "ggml-backend.h"
4
#include "llama-impl.h"
5
#include "llama-io.h"
6
#include "llama-batch.h"
7
#include "llama-model.h"
8
9
#include <algorithm>
10
#include <cassert>
11
#include <cstring>
12
#include <limits>
13
#include <map>
14
#include <stdexcept>
15
16
//
17
// llama_memory_recurrent
18
//
19
20
llama_memory_recurrent::llama_memory_recurrent(
21
        const llama_model & model,
22
                ggml_type   type_r,
23
                ggml_type   type_s,
24
                     bool   offload,
25
                 uint32_t   mem_size,
26
                 uint32_t   n_seq_max,
27
0
    const layer_filter_cb & filter) : hparams(model.hparams), n_seq_max(n_seq_max) {
28
0
    const int32_t n_layer = hparams.n_layer;
29
30
0
    head = 0;
31
0
    size = mem_size;
32
0
    used = 0;
33
34
0
    cells.clear();
35
0
    cells.resize(mem_size);
36
37
    // define a comparator for the buft -> ctx map to ensure that the order is well-defined:
38
0
    struct ggml_backend_buft_comparator {
39
0
        bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const {
40
0
            return strcmp(ggml_backend_buft_name(lhs), ggml_backend_buft_name(rhs)) < 0;
41
0
        }
42
0
    };
43
0
    std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map;
44
45
    // create a context for each buffer type
46
0
    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
47
0
        auto it = ctx_map.find(buft);
48
0
        if (it == ctx_map.end()) {
49
0
            ggml_init_params params = {
50
0
                /*.mem_size   =*/ size_t(2u*n_layer*ggml_tensor_overhead()),
51
0
                /*.mem_buffer =*/ NULL,
52
0
                /*.no_alloc   =*/ true,
53
0
            };
54
55
0
            ggml_context * ctx = ggml_init(params);
56
0
            if (!ctx) {
57
0
                return nullptr;
58
0
            }
59
60
0
            ctx_map.emplace(buft, ctx);
61
62
0
            return ctx;
63
0
        }
64
65
0
        return it->second.get();
66
0
    };
67
68
0
    r_l.resize(n_layer);
69
0
    s_l.resize(n_layer);
70
71
0
    for (int i = 0; i < n_layer; i++) {
72
0
        if (filter && !filter(i)) {
73
0
            LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, i);
74
0
            continue;
75
0
        }
76
77
0
        const char * dev_name = "CPU";
78
79
0
        ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
80
81
0
        if (offload) {
82
0
            auto * dev = model.dev_layer(i);
83
0
            buft = ggml_backend_dev_buffer_type(dev);
84
85
0
            dev_name = ggml_backend_dev_name(dev);
86
0
        }
87
88
0
        LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name);
89
90
0
        ggml_context * ctx = ctx_for_buft(buft);
91
0
        if (!ctx) {
92
0
            throw std::runtime_error("failed to create ggml context for rs cache");
93
0
        }
94
95
0
        ggml_tensor * r = ggml_new_tensor_2d(ctx, type_r, hparams.n_embd_r(), mem_size);
96
0
        ggml_tensor * s = ggml_new_tensor_2d(ctx, type_s, hparams.n_embd_s(), mem_size);
97
0
        ggml_format_name(r, "cache_r_l%d", i);
98
0
        ggml_format_name(s, "cache_s_l%d", i);
99
0
        r_l[i] = r;
100
0
        s_l[i] = s;
101
0
    }
102
103
    // allocate tensors and initialize the buffers to avoid NaNs in the padding
104
0
    for (auto & [buft, ctx] : ctx_map) {
105
0
        ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx.get(), buft);
106
0
        if (!buf) {
107
0
            throw std::runtime_error("failed to allocate buffer for rs cache");
108
0
        }
109
0
        ggml_backend_buffer_clear(buf, 0);
110
0
        LLAMA_LOG_INFO("%s: %10s RS buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
111
0
        ctxs_bufs.emplace_back(std::move(ctx), buf);
112
0
    }
113
114
0
    {
115
0
        const size_t memory_size_r = size_r_bytes();
116
0
        const size_t memory_size_s = size_s_bytes();
117
118
0
        LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u seqs), R (%s): %7.2f MiB, S (%s): %7.2f MiB\n", __func__,
119
0
                (float)(memory_size_r + memory_size_s) / (1024.0f * 1024.0f), mem_size, n_layer, n_seq_max,
120
0
                ggml_type_name(type_r), (float)memory_size_r / (1024.0f * 1024.0f),
121
0
                ggml_type_name(type_s), (float)memory_size_s / (1024.0f * 1024.0f));
122
0
    }
123
0
}
124
125
0
void llama_memory_recurrent::clear(bool data) {
126
0
    for (int32_t i = 0; i < (int32_t) size; ++i) {
127
0
        cells[i].pos = -1;
128
0
        cells[i].seq_id.clear();
129
0
        cells[i].src = -1;
130
0
        cells[i].tail = -1;
131
0
    }
132
133
0
    head = 0;
134
0
    used = 0;
135
136
0
    if (data) {
137
0
        for (auto & [_, buf] : ctxs_bufs) {
138
0
            ggml_backend_buffer_clear(buf.get(), 0);
139
0
        }
140
0
    }
141
0
}
142
143
0
bool llama_memory_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
144
    //printf("[DEBUG] calling llama_memory_recurrent::seq_rm` with `seq_id=%d, p0=%d, p1=%d`\n", seq_id, p0, p1);
145
0
    uint32_t new_head = size;
146
147
0
    if (p0 < 0) {
148
0
        p0 = 0;
149
0
    }
150
151
0
    if (p1 < 0) {
152
0
        p1 = std::numeric_limits<llama_pos>::max();
153
0
    }
154
155
    // models like Mamba or RWKV can't have a state partially erased at the end
156
    // of the sequence because their state isn't preserved for previous tokens
157
0
    if (seq_id >= (int64_t) size) {
158
        // could be fatal
159
0
        return false;
160
0
    }
161
0
    if (0 <= seq_id) {
162
0
        int32_t & tail_id = cells[seq_id].tail;
163
0
        if (tail_id >= 0) {
164
0
            const auto & cell = cells[tail_id];
165
            // partial intersection is invalid if it includes the final pos
166
0
            if (0 < p0 && p0 <= cell.pos && p1 > cell.pos) {
167
                //printf("[DEBUG] inside `llama_memory_recurrent::seq_rm`: partial intersection is invalid, so returning false, p0 = %d, cell.pos = %d, p1 = %d\n", p0, cell.pos, p1);
168
0
                return false;
169
0
            }
170
            // invalidate tails which will be cleared
171
0
            if (p0 <= cell.pos && cell.pos < p1) {
172
0
                tail_id = -1;
173
0
            }
174
0
        }
175
0
    } else {
176
        // seq_id is negative, then the range should include everything or nothing
177
0
        if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
178
            //printf("[DEBUG] inside `llama_memory_recurrent::seq_rm`: `seq_id` is negative, so returning false\n");
179
0
            return false;
180
0
        }
181
0
    }
182
183
0
    for (uint32_t i = 0; i < size; ++i) {
184
0
        if (cells[i].pos >= p0 && cells[i].pos < p1) {
185
0
            if (seq_id < 0) {
186
0
                cells[i].seq_id.clear();
187
0
            } else if (cells[i].has_seq_id(seq_id)) {
188
0
                cells[i].seq_id.erase(seq_id);
189
0
            } else {
190
0
                continue;
191
0
            }
192
0
            if (cells[i].is_empty()) {
193
                // keep count of the number of used cells
194
0
                if (cells[i].pos >= 0) {
195
0
                    used--;
196
0
                }
197
0
                cells[i].pos = -1;
198
0
                cells[i].src = -1;
199
0
                if (new_head == size) {
200
0
                    new_head = i;
201
0
                }
202
0
            }
203
0
        }
204
0
    }
205
206
    // If we freed up a slot, set head to it so searching can start there.
207
0
    if (new_head != size && new_head < head) {
208
0
        head = new_head;
209
0
    }
210
211
0
    return true;
212
0
}
213
214
0
void llama_memory_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
215
0
    if (seq_id_src == seq_id_dst) {
216
0
        return;
217
0
    }
218
219
0
    if (p0 < 0) {
220
0
        p0 = 0;
221
0
    }
222
223
0
    if (p1 < 0) {
224
0
        p1 = std::numeric_limits<llama_pos>::max();
225
0
    }
226
227
0
    if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
228
0
        auto & tail_src = cells[seq_id_src];
229
0
        auto & tail_dst = cells[seq_id_dst];
230
0
        if (tail_dst.tail >= 0) {
231
            // clear destination seq_id if it wasn't empty
232
0
            auto & cell_dst = cells[tail_dst.tail];
233
234
0
            cell_dst.seq_id.erase(seq_id_dst);
235
0
            tail_dst.tail = -1;
236
0
            if (cell_dst.seq_id.empty()) {
237
0
                cell_dst.pos = -1;
238
0
                cell_dst.src = -1;
239
0
                used -= 1;
240
0
            }
241
0
        }
242
0
        if (tail_src.tail >= 0) {
243
0
            auto & cell_src = cells[tail_src.tail];
244
245
0
            cell_src.seq_id.insert(seq_id_dst);
246
0
            tail_dst.tail = tail_src.tail;
247
0
        }
248
0
    }
249
0
}
250
251
0
void llama_memory_recurrent::seq_keep(llama_seq_id seq_id) {
252
0
    uint32_t new_head = size;
253
254
0
    for (uint32_t i = 0; i < size; ++i) {
255
0
        if ((llama_seq_id) i != seq_id) {
256
0
            cells[i].tail = -1;
257
0
        }
258
259
0
        if (!cells[i].has_seq_id(seq_id)) {
260
0
            if (cells[i].pos >= 0) {
261
0
                used--;
262
0
            }
263
264
0
            cells[i].pos = -1;
265
0
            cells[i].src = -1;
266
0
            cells[i].seq_id.clear();
267
268
0
            if (new_head == size){
269
0
                new_head = i;
270
0
            }
271
0
        } else {
272
0
            cells[i].seq_id.clear();
273
0
            cells[i].seq_id.insert(seq_id);
274
0
        }
275
0
    }
276
277
    // If we freed up a slot, set head to it so searching can start there.
278
0
    if (new_head != size && new_head < head) {
279
0
        head = new_head;
280
0
    }
281
0
}
282
283
0
void llama_memory_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
284
0
    if (shift == 0) {
285
0
        return;
286
0
    }
287
288
0
    if (p0 < 0) {
289
0
        p0 = 0;
290
0
    }
291
292
0
    if (p1 < 0) {
293
0
        p1 = std::numeric_limits<llama_pos>::max();
294
0
    }
295
296
    // If there is no range then return early to avoid looping over the
297
0
    if (p0 == p1) {
298
0
        return;
299
0
    }
300
301
    // for Mamba-like or RWKV models, only the pos needs to be shifted
302
0
    if (0 <= seq_id && seq_id < (int64_t) size) {
303
0
        const int32_t tail_id = cells[seq_id].tail;
304
0
        if (tail_id >= 0) {
305
0
            auto & cell = cells[tail_id];
306
0
            if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
307
0
                cell.pos += shift;
308
0
            }
309
0
        }
310
0
    }
311
0
}
312
313
0
void llama_memory_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
314
0
    if (d == 1) {
315
0
        return;
316
0
    }
317
318
0
    if (p0 < 0) {
319
0
        p0 = 0;
320
0
    }
321
322
0
    if (p1 < 0) {
323
0
        p1 = std::numeric_limits<llama_pos>::max();
324
0
    }
325
326
    // If there is no range then return early to avoid looping over the cache.
327
0
    if (p0 == p1) {
328
0
        return;
329
0
    }
330
331
    // for Mamba-like or RWKV models, only the pos needs to be changed
332
0
    if (0 <= seq_id && seq_id < (int64_t) size) {
333
0
        const int32_t tail_id = cells[seq_id].tail;
334
0
        if (tail_id >= 0) {
335
0
            auto & cell = cells[tail_id];
336
0
            if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
337
0
                cell.pos /= d;
338
0
            }
339
0
        }
340
0
    }
341
0
}
342
343
0
llama_pos llama_memory_recurrent::seq_pos_min(llama_seq_id seq_id) const {
344
0
    llama_pos result = std::numeric_limits<llama_pos>::max();
345
346
0
    for (uint32_t i = 0; i < size; ++i) {
347
0
        if (cells[i].has_seq_id(seq_id)) {
348
0
            result = std::min(result, cells[i].pos);
349
0
        }
350
0
    }
351
352
0
    if (result == std::numeric_limits<llama_pos>::max()) {
353
0
        result = -1;
354
0
    }
355
356
0
    return result;
357
0
}
358
359
0
llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const {
360
0
    llama_pos result = -1;
361
362
0
    for (uint32_t i = 0; i < size; ++i) {
363
0
        if (cells[i].has_seq_id(seq_id)) {
364
0
            result = std::max(result, cells[i].pos);
365
0
        }
366
0
    }
367
368
0
    return result;
369
0
}
370
371
0
std::map<ggml_backend_buffer_type_t, size_t> llama_memory_recurrent::memory_breakdown() const {
372
0
    std::map<ggml_backend_buffer_type_t, size_t> ret;
373
0
    for (const auto & [_, buf] : ctxs_bufs) {
374
0
        ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get());
375
0
    }
376
0
    return ret;
377
0
}
378
379
0
llama_memory_context_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
380
0
    do {
381
0
        balloc.split_reset();
382
383
0
        std::vector<llama_ubatch> ubatches;
384
0
        while (true) {
385
0
            llama_ubatch ubatch;
386
387
0
            if (embd_all) {
388
                // if all tokens are output, split by sequence
389
0
                ubatch = balloc.split_seq(n_ubatch);
390
0
            } else {
391
                // TODO: non-sequential equal split can be done if using unified KV cache
392
                //       for simplicity, we always use sequential equal split for now
393
0
                ubatch = balloc.split_equal(n_ubatch, true);
394
0
            }
395
396
0
            if (ubatch.n_tokens == 0) {
397
0
                break;
398
0
            }
399
400
0
            ubatches.push_back(std::move(ubatch)); // NOLINT
401
0
        }
402
403
0
        if (balloc.get_n_used() < balloc.get_n_tokens()) {
404
            // failed to find a suitable split
405
0
            break;
406
0
        }
407
408
0
        if (!prepare(ubatches)) {
409
0
            break;
410
0
        }
411
412
0
        return std::make_unique<llama_memory_recurrent_context>(this, std::move(ubatches));
413
0
    } while (false);
414
415
0
    return std::make_unique<llama_memory_recurrent_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
416
0
}
417
418
0
llama_memory_context_ptr llama_memory_recurrent::init_full() {
419
0
    return std::make_unique<llama_memory_recurrent_context>(this);
420
0
}
421
422
0
llama_memory_context_ptr llama_memory_recurrent::init_update(llama_context * lctx, bool optimize) {
423
0
    GGML_UNUSED(lctx);
424
0
    GGML_UNUSED(optimize);
425
426
0
    return std::make_unique<llama_memory_recurrent_context>(LLAMA_MEMORY_STATUS_NO_UPDATE);
427
0
}
428
429
0
bool llama_memory_recurrent::prepare(const std::vector<llama_ubatch> & ubatches) {
430
    // simply remember the full state because it is very small for this type of cache
431
    // TODO: optimize
432
0
    auto org_cells = cells;
433
0
    auto org_used = used;
434
0
    auto org_head = head;
435
436
0
    bool success = true;
437
438
0
    for (const auto & ubatch : ubatches) {
439
0
        if (!find_slot(ubatch)) {
440
0
            success = false;
441
0
            break;
442
0
        }
443
0
    }
444
445
    // restore the original state
446
0
    cells = std::move(org_cells);
447
0
    used = org_used;
448
0
    head = org_head;
449
450
0
    return success;
451
0
}
452
453
0
bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
454
0
    const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
455
0
    const uint32_t n_seqs       = ubatch.n_seqs;
456
457
    // if we have enough unused cells before the current head ->
458
    //   better to start searching from the beginning of the cache, hoping to fill it
459
0
    if (head > used + 2*n_seqs) {
460
0
        head = 0;
461
0
    }
462
463
    // For recurrent state architectures (like Mamba or RWKV),
464
    // each cache cell can store the state for a whole sequence.
465
    // A slot should be always be contiguous.
466
467
    // can only process batches with an equal number of new tokens in each sequence
468
0
    GGML_ASSERT(ubatch.equal_seqs());
469
470
0
    int32_t min = size - 1;
471
0
    int32_t max = 0;
472
473
    // everything should fit if all seq_ids are smaller than the max
474
0
    for (uint32_t s = 0; s < n_seqs; ++s) {
475
0
        const uint32_t i = s*n_seq_tokens; // first token of sequence set s
476
0
        const uint32_t n_seq_id = ubatch.n_seq_id[i];
477
478
0
        for (uint32_t j = 0; j < n_seq_id; ++j) {
479
0
            const llama_seq_id seq_id = ubatch.seq_id[i][j];
480
481
0
            if (seq_id < 0 || (uint32_t) seq_id >= size) {
482
                // too big seq_id
483
                // TODO: would it be possible to resize the cache instead?
484
0
                LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%u Try using a bigger --parallel value\n", __func__, seq_id, n_seq_max);
485
0
                return false;
486
0
            }
487
0
            if (j > 0) {
488
0
                auto & seq = cells[seq_id];
489
0
                if (seq.tail >= 0) {
490
0
                    auto & cell = cells[seq.tail];
491
                    // clear cells from seq_ids that become shared
492
                    // (should not normally happen, but let's handle it anyway)
493
0
                    cell.seq_id.erase(seq_id);
494
0
                    seq.tail = -1;
495
0
                    if (cell.seq_id.empty()) {
496
0
                        cell.pos = -1;
497
0
                        cell.src = -1;
498
0
                        used -= 1;
499
0
                    }
500
0
                }
501
0
            }
502
0
        }
503
0
    }
504
505
#ifndef NDEBUG
506
    {
507
        std::vector<int32_t> tails_verif;
508
        tails_verif.assign(size, -1);
509
        for (uint32_t i = 0; i < size; ++i) {
510
            auto & cell = cells[i];
511
            for (llama_seq_id seq_id : cell.seq_id) {
512
                if (tails_verif[seq_id] != -1) {
513
                    LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
514
                }
515
                tails_verif[seq_id] = i;
516
            }
517
        }
518
        for (uint32_t i = 0; i < size; ++i) {
519
            if (tails_verif[i] != cells[i].tail) {
520
                LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]);
521
            }
522
        }
523
    }
524
#endif
525
526
    // find next empty cell
527
0
    uint32_t next_empty_cell = head;
528
529
0
    for (uint32_t i = 0; i < size; ++i) {
530
0
        if (next_empty_cell >= size) { next_empty_cell -= size; }
531
0
        auto & cell = cells[next_empty_cell];
532
0
        if (cell.is_empty()) { break; }
533
0
        next_empty_cell += 1;
534
0
    }
535
536
    // find usable cell range
537
0
    for (uint32_t s = 0; s < n_seqs; ++s) {
538
0
        const uint32_t i = s*n_seq_tokens;
539
0
        const llama_seq_id seq_id = ubatch.seq_id[i][0];
540
0
        auto & seq_meta = cells[seq_id];
541
0
        bool has_cell = false;
542
0
        if (seq_meta.tail >= 0) {
543
0
            auto & cell = cells[seq_meta.tail];
544
0
            GGML_ASSERT(cell.has_seq_id(seq_id));
545
            // does this seq_id "own" the cell?
546
0
            if (cell.seq_id.size() == 1) { has_cell = true; }
547
0
        }
548
0
        if (!has_cell) {
549
0
            auto & empty_cell = cells[next_empty_cell];
550
0
            GGML_ASSERT(empty_cell.is_empty());
551
            // copy old tail into the empty cell
552
0
            if (seq_meta.tail >= 0) {
553
0
                auto & orig_cell = cells[seq_meta.tail];
554
0
                empty_cell.pos = orig_cell.pos;
555
0
                empty_cell.src = orig_cell.src;
556
0
                orig_cell.seq_id.erase(seq_id);
557
0
                empty_cell.seq_id.insert(seq_id); // will be overwritten
558
0
                GGML_ASSERT(!orig_cell.is_empty()); // has at least one remaining seq_id
559
0
            }
560
0
            seq_meta.tail = next_empty_cell;
561
            // find next empty cell
562
0
            if (s + 1 < n_seqs) {
563
0
                for (uint32_t j = 0; j < size; ++j) {
564
0
                    next_empty_cell += 1;
565
0
                    if (next_empty_cell >= size) { next_empty_cell -= size; }
566
0
                    auto & cell = cells[next_empty_cell];
567
0
                    if (cell.is_empty()) { break; }
568
0
                }
569
0
            }
570
0
        }
571
0
        if (min > seq_meta.tail) { min = seq_meta.tail; }
572
0
        if (max < seq_meta.tail) { max = seq_meta.tail; }
573
0
    }
574
575
    // gather and re-order
576
0
    for (uint32_t s = 0; s < n_seqs; ++s) {
577
0
        const uint32_t i = s*n_seq_tokens;
578
0
        const int32_t dst_id = s + min;
579
0
        const int32_t src_id = cells[ubatch.seq_id[i][0]].tail;
580
0
        if (dst_id != src_id) {
581
0
            auto & dst_cell = cells[dst_id];
582
0
            auto & src_cell = cells[src_id];
583
584
0
            std::swap(dst_cell.pos, src_cell.pos);
585
0
            std::swap(dst_cell.src, src_cell.src);
586
0
            std::swap(dst_cell.seq_id, src_cell.seq_id);
587
588
            // swap tails
589
0
            for (uint32_t j = 0; j < size; ++j) {
590
0
                int32_t & tail = cells[j].tail;
591
0
                if (tail == src_id) {
592
0
                    tail = dst_id;
593
0
                } else if (tail == dst_id) {
594
0
                    tail = src_id;
595
0
                }
596
0
            }
597
0
        }
598
0
    }
599
600
    // update the pos of the used seqs
601
0
    for (uint32_t s = 0; s < n_seqs; ++s) {
602
0
        const uint32_t i = s*n_seq_tokens;
603
0
        const llama_pos last_pos = ubatch.pos[i + n_seq_tokens - 1];
604
0
        const int32_t cell_id = s + min;
605
0
        auto & cell = cells[cell_id];
606
607
0
        if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
608
            // What should happen when the pos backtracks or skips a value?
609
            // Clearing the state mid-batch would require special-casing which isn't done.
610
0
            LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
611
0
                __func__, last_pos, cell.pos, ubatch.seq_id[i][0], n_seq_tokens);
612
0
        }
613
0
        cell.pos = last_pos;
614
0
        cell.seq_id.clear();
615
0
        for (int32_t j = 0; j < ubatch.n_seq_id[i]; ++j) {
616
0
            const llama_seq_id seq_id = ubatch.seq_id[i][j];
617
0
            cell.seq_id.insert(seq_id);
618
0
            cells[seq_id].tail = cell_id;
619
0
        }
620
0
    }
621
622
    // Find first cell without src refs, to use as the zero-ed state
623
0
    {
624
        // TODO: bake-in src refcounts in the cell metadata
625
0
        std::vector<int32_t> refcounts(size, 0);
626
0
        for (size_t i = 0; i < size; ++i) {
627
0
            const int32_t src = cells[i].src;
628
0
            if (src >= 0) {
629
0
                refcounts[src] += 1;
630
0
            }
631
0
        }
632
633
0
        rs_z = -1;
634
0
        for (int i = min; i <= max; ++i) {
635
0
            if (refcounts[i] == 0) {
636
0
                rs_z = i;
637
0
                break;
638
0
            }
639
0
        }
640
641
0
        for (int i = min; i <= max; ++i) {
642
0
            if (cells[i].src < 0) {
643
0
                GGML_ASSERT(rs_z >= 0);
644
0
                cells[i].src0 = rs_z;
645
0
            } else {
646
                // Stage the source ids for all used cells to allow correct seq_* behavior
647
                // and still make these values available when setting the inputs
648
0
                cells[i].src0 = cells[i].src;
649
0
            }
650
0
            cells[i].src = i; // avoid moving or clearing twice
651
0
        }
652
0
    }
653
654
    // allow getting the range of used cells, from head to head + n
655
0
    head = min;
656
0
    n    = max - min + 1;
657
0
    used = std::count_if(cells.begin(), cells.end(),
658
0
        [](const mem_cell & cell){ return !cell.is_empty(); });
659
660
    // sanity check
661
0
    return n >= n_seqs;
662
0
}
663
664
0
bool llama_memory_recurrent::get_can_shift() const {
665
    // shifting the pos is trivial for recurrent models
666
0
    return true;
667
0
}
668
669
0
size_t llama_memory_recurrent::total_size() const {
670
0
    size_t size = 0;
671
0
    for (const auto & [_, buf] : ctxs_bufs) {
672
0
        size += ggml_backend_buffer_get_size(buf.get());
673
0
    }
674
675
0
    return size;
676
0
}
677
678
0
size_t llama_memory_recurrent::size_r_bytes() const {
679
0
    size_t size_r_bytes = 0;
680
681
0
    for (const auto & r : r_l) {
682
0
        if (r != nullptr) {
683
0
            size_r_bytes += ggml_nbytes(r);
684
0
        }
685
0
    }
686
687
0
    return size_r_bytes;
688
0
}
689
690
0
size_t llama_memory_recurrent::size_s_bytes() const {
691
0
    size_t size_s_bytes = 0;
692
693
0
    for (const auto & s : s_l) {
694
0
        if (s != nullptr) {
695
0
            size_s_bytes += ggml_nbytes(s);
696
0
        }
697
0
    }
698
699
0
    return size_s_bytes;
700
0
}
701
702
0
void llama_memory_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) const {
703
0
    GGML_UNUSED(flags);
704
705
0
    std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
706
0
    uint32_t cell_count = 0;
707
708
    // Count the number of cells with the specified seq_id
709
    // Find all the ranges of cells with this seq id (or all, when -1)
710
0
    uint32_t cell_range_begin = size;
711
0
    for (uint32_t i = 0; i < size; ++i) {
712
0
        const auto & cell = cells[i];
713
0
        if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
714
0
            ++cell_count;
715
0
            if (cell_range_begin == size) {
716
0
                cell_range_begin = i;
717
0
            }
718
0
        } else {
719
0
            if (cell_range_begin != size) {
720
0
                cell_ranges.emplace_back(cell_range_begin, i);
721
0
                cell_range_begin = size;
722
0
            }
723
0
        }
724
0
    }
725
0
    if (cell_range_begin != size) {
726
0
        cell_ranges.emplace_back(cell_range_begin, size);
727
0
    }
728
729
    // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
730
0
    uint32_t cell_count_check = 0;
731
0
    for (const auto & range : cell_ranges) {
732
0
        cell_count_check += range.second - range.first;
733
0
    }
734
0
    GGML_ASSERT(cell_count == cell_count_check);
735
736
0
    io.write(&cell_count, sizeof(cell_count));
737
738
0
    state_write_meta(io, cell_ranges, seq_id);
739
0
    state_write_data(io, cell_ranges);
740
0
}
741
742
0
void llama_memory_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
743
0
    GGML_UNUSED(flags);
744
745
0
    uint32_t cell_count;
746
0
    io.read_to(&cell_count, sizeof(cell_count));
747
748
0
    bool res = true;
749
750
0
    res = res && state_read_meta(io, cell_count, seq_id);
751
0
    res = res && state_read_data(io, cell_count);
752
753
0
    if (!res) {
754
0
        if (seq_id == -1) {
755
0
            clear(true);
756
0
        } else {
757
0
            seq_rm(seq_id, -1, -1);
758
0
        }
759
0
        throw std::runtime_error("failed to restore kv cache");
760
0
    }
761
0
}
762
763
0
void llama_memory_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
764
0
    for (const auto & range : cell_ranges) {
765
0
        for (uint32_t i = range.first; i < range.second; ++i) {
766
0
            const auto & cell = cells[i];
767
0
            const llama_pos pos      = cell.pos;
768
0
            const uint32_t  n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
769
770
0
            io.write(&pos,      sizeof(pos));
771
0
            io.write(&n_seq_id, sizeof(n_seq_id));
772
773
0
            if (n_seq_id) {
774
0
                for (auto seq_id : cell.seq_id) {
775
0
                    io.write(&seq_id, sizeof(seq_id));
776
0
                }
777
0
            }
778
0
        }
779
0
    }
780
0
}
781
782
0
void llama_memory_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
783
0
    const uint32_t s_trans = 0;
784
0
    const uint32_t n_layer = hparams.n_layer;
785
786
0
    io.write(&s_trans, sizeof(s_trans));
787
0
    io.write(&n_layer,   sizeof(n_layer));
788
789
    // Iterate and write all the R tensors first, each row is a cell
790
    // Get whole range at a time
791
0
    for (uint32_t il = 0; il < n_layer; ++il) {
792
        // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null)
793
0
        if (r_l[il] == nullptr) continue;
794
795
        // Write R tensor type
796
0
        const int32_t r_type_i = (int32_t)r_l[il]->type;
797
0
        io.write(&r_type_i, sizeof(r_type_i));
798
799
        // Write row size of R tensor
800
0
        const uint64_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r());
801
0
        io.write(&r_size_row, sizeof(r_size_row));
802
803
        // Write each range of cells of r_size_row length
804
0
        for (const auto & range : cell_ranges) {
805
0
            const size_t range_size = range.second - range.first;
806
0
            const size_t buf_size = range_size * r_size_row;
807
0
            io.write_tensor(r_l[il], range.first * r_size_row, buf_size);
808
0
        }
809
0
    }
810
811
0
    if (!s_trans) {
812
0
        for (uint32_t il = 0; il < n_layer; ++il) {
813
            // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null)
814
0
            if (s_l[il] == nullptr) continue;
815
816
            // Write S tensor type
817
0
            const int32_t s_type_i = (int32_t)s_l[il]->type;
818
0
            io.write(&s_type_i, sizeof(s_type_i));
819
820
            // Write row size of S tensor
821
0
            const uint64_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s());
822
0
            io.write(&s_size_row, sizeof(s_size_row));
823
824
            // Write each range of S tensor rows
825
0
            for (const auto & range : cell_ranges) {
826
0
                const size_t range_size = range.second - range.first;
827
0
                const size_t buf_size = range_size * s_size_row;
828
0
                io.write_tensor(s_l[il], range.first * s_size_row, buf_size);
829
0
            }
830
0
        }
831
0
    } else {
832
        // When S tensor is transposed, we also need the element size and get the element ranges from each row
833
0
        const uint32_t mem_size = size;
834
0
        for (uint32_t il = 0; il < n_layer; ++il) {
835
            // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null)
836
0
            if (s_l[il] == nullptr) continue;
837
838
0
            const uint32_t n_embd_s = hparams.n_embd_s();
839
840
            // Write S tensor type
841
0
            const int32_t s_type_i = (int32_t)s_l[il]->type;
842
0
            io.write(&s_type_i, sizeof(s_type_i));
843
844
            // Write element size
845
0
            const uint32_t s_size_el = ggml_type_size(s_l[il]->type);
846
0
            io.write(&s_size_el, sizeof(s_size_el));
847
848
            // Write GQA embedding size
849
0
            io.write(&n_embd_s, sizeof(n_embd_s));
850
851
            // For each row, we get the element values of each cell
852
0
            for (uint32_t j = 0; j < n_embd_s; ++j) {
853
                // Write each range of cells of s_size_el length
854
0
                for (const auto & range : cell_ranges) {
855
0
                    const size_t range_size = range.second - range.first;
856
0
                    const size_t src_offset = (range.first + j * mem_size) * s_size_el;
857
0
                    const size_t buf_size = range_size * s_size_el;
858
0
                    io.write_tensor(s_l[il], src_offset, buf_size);
859
0
                }
860
0
            }
861
0
        }
862
0
    }
863
0
}
864
865
0
bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
866
0
    if (dest_seq_id != -1) {
867
        // single sequence
868
0
        seq_rm(dest_seq_id, -1, -1);
869
870
0
        if (cell_count == 0) {
871
0
            return true;
872
0
        }
873
874
0
        llama_batch_allocr balloc(hparams.n_pos_per_embd());
875
876
0
        llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
877
878
0
        for (uint32_t i = 0; i < cell_count; ++i) {
879
0
            llama_pos pos;
880
0
            uint32_t n_seq_id;
881
882
0
            io.read_to(&pos,      sizeof(pos));
883
0
            io.read_to(&n_seq_id, sizeof(n_seq_id));
884
885
0
            if (n_seq_id != 0) {
886
0
                LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
887
0
                return false;
888
0
            }
889
890
0
            ubatch.pos[i] = pos;
891
0
        }
892
0
        ubatch.n_seq_id[0] = 1;
893
0
        ubatch.seq_id[0] = &dest_seq_id;
894
895
0
        if (!find_slot(ubatch)) {
896
0
            LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
897
0
            return false;
898
0
        }
899
900
        // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
901
        // Assume that this is one contiguous block of cells
902
0
        GGML_ASSERT(head + cell_count <= size);
903
0
        GGML_ASSERT(cells[head].pos == ubatch.pos[0]);
904
0
        GGML_ASSERT(cells[head + cell_count - 1].pos == ubatch.pos[cell_count - 1]);
905
0
        GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
906
0
        GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
907
0
    } else {
908
        // whole KV cache restore
909
910
0
        if (cell_count > size) {
911
0
            LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
912
0
            return false;
913
0
        }
914
915
0
        clear(true);
916
917
0
        for (uint32_t i = 0; i < cell_count; ++i) {
918
0
            auto & cell = cells[i];
919
920
0
            llama_pos pos;
921
0
            uint32_t  n_seq_id;
922
923
0
            io.read_to(&pos,      sizeof(pos));
924
0
            io.read_to(&n_seq_id, sizeof(n_seq_id));
925
926
0
            cell.pos = pos;
927
928
0
            for (uint32_t j = 0; j < n_seq_id; ++j) {
929
0
                llama_seq_id seq_id;
930
0
                io.read_to(&seq_id, sizeof(seq_id));
931
932
0
                if (seq_id < 0 || (uint32_t) seq_id >= this->n_seq_max) {
933
0
                    LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, this->n_seq_max);
934
0
                    return false;
935
0
                }
936
937
0
                cell.seq_id.insert(seq_id);
938
939
0
                int32_t & tail = cells[seq_id].tail;
940
0
                if (tail != -1) {
941
0
                    LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
942
0
                    return false;
943
0
                }
944
0
                tail = i;
945
0
            }
946
0
        }
947
948
0
        head = 0;
949
0
        used = cell_count;
950
0
    }
951
952
0
    for (uint32_t i = 0; i < cell_count; ++i) {
953
0
        uint32_t cell_id = head + i;
954
        // make sure the recurrent states will keep their restored state
955
0
        cells[cell_id].src = cell_id;
956
0
    }
957
958
0
    return true;
959
0
}
960
961
0
bool llama_memory_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
962
0
    uint32_t s_trans;
963
0
    uint32_t n_layer;
964
0
    io.read_to(&s_trans, sizeof(s_trans));
965
0
    io.read_to(&n_layer, sizeof(n_layer));
966
967
0
    if (n_layer != hparams.n_layer) {
968
0
        LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
969
0
        return false;
970
0
    }
971
0
    if (cell_count > size) {
972
0
        LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
973
0
        return false;
974
0
    }
975
0
    if (false != (bool) s_trans) {
976
0
        LLAMA_LOG_ERROR("%s: incompatible s transposition\n", __func__);
977
0
        return false;
978
0
    }
979
980
    // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
981
0
    for (uint32_t il = 0; il < n_layer; ++il) {
982
        // skip null layers
983
0
        if (r_l[il] == nullptr) continue;
984
985
        // Read type of key
986
0
        int32_t r_type_i_ref;
987
0
        io.read_to(&r_type_i_ref, sizeof(r_type_i_ref));
988
0
        const int32_t r_type_i = (int32_t) r_l[il]->type;
989
0
        if (r_type_i != r_type_i_ref) {
990
0
            LLAMA_LOG_ERROR("%s: mismatched r type (%d != %d, layer %d)\n", __func__, r_type_i, r_type_i_ref, il);
991
0
            return false;
992
0
        }
993
994
        // Read row size of key
995
0
        uint64_t r_size_row_ref;
996
0
        io.read_to(&r_size_row_ref, sizeof(r_size_row_ref));
997
0
        const size_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r());
998
0
        if (r_size_row != r_size_row_ref) {
999
0
            LLAMA_LOG_ERROR("%s: mismatched r row size (%zu != %zu, layer %d)\n", __func__, r_size_row, (size_t) r_size_row_ref, il);
1000
0
            return false;
1001
0
        }
1002
1003
0
        if (cell_count) {
1004
            // Read and set the keys for the whole cell range
1005
0
            ggml_backend_tensor_set(r_l[il], io.read(cell_count * r_size_row), head * r_size_row, cell_count * r_size_row);
1006
0
        }
1007
0
    }
1008
1009
0
    if (!s_trans) {
1010
0
        for (uint32_t il = 0; il < n_layer; ++il) {
1011
            // skip null layers
1012
0
            if (s_l[il] == nullptr) continue;
1013
1014
            // Read type of value
1015
0
            int32_t s_type_i_ref;
1016
0
            io.read_to(&s_type_i_ref, sizeof(s_type_i_ref));
1017
0
            const int32_t s_type_i = (int32_t)s_l[il]->type;
1018
1019
0
            if (s_type_i != s_type_i_ref) {
1020
0
                LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il);
1021
0
                return false;
1022
0
            }
1023
1024
            // Read row size of value
1025
0
            uint64_t s_size_row_ref;
1026
0
            io.read_to(&s_size_row_ref, sizeof(s_size_row_ref));
1027
0
            const size_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s());
1028
0
            if (s_size_row != s_size_row_ref) {
1029
0
                LLAMA_LOG_ERROR("%s: mismatched s row size (%zu != %zu, layer %d)\n", __func__, s_size_row, (size_t) s_size_row_ref, il);
1030
0
                return false;
1031
0
            }
1032
1033
0
            if (cell_count) {
1034
                // Read and set the values for the whole cell range
1035
0
                ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_row), head * s_size_row, cell_count * s_size_row);
1036
0
            }
1037
0
        }
1038
0
    } else {
1039
        // For each layer, read the values for each cell (transposed)
1040
0
        for (uint32_t il = 0; il < n_layer; ++il) {
1041
            // skip null layers
1042
0
            if (s_l[il] == nullptr) continue;
1043
1044
0
            const uint32_t n_embd_s = hparams.n_embd_s();
1045
1046
            // Read type of value
1047
0
            int32_t s_type_i_ref;
1048
0
            io.read_to(&s_type_i_ref, sizeof(s_type_i_ref));
1049
0
            const int32_t s_type_i = (int32_t)s_l[il]->type;
1050
0
            if (s_type_i != s_type_i_ref) {
1051
0
                LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il);
1052
0
                return false;
1053
0
            }
1054
1055
            // Read element size of value
1056
0
            uint32_t s_size_el_ref;
1057
0
            io.read_to(&s_size_el_ref, sizeof(s_size_el_ref));
1058
0
            const size_t s_size_el = ggml_type_size(s_l[il]->type);
1059
0
            if (s_size_el != s_size_el_ref) {
1060
0
                LLAMA_LOG_ERROR("%s: mismatched s element size (%zu != %zu, layer %d)\n", __func__, s_size_el, (size_t) s_size_el_ref, il);
1061
0
                return false;
1062
0
            }
1063
1064
            // Read state embedding size
1065
0
            uint32_t n_embd_s_ref;
1066
0
            io.read_to(&n_embd_s_ref, sizeof(n_embd_s_ref));
1067
0
            if (n_embd_s != n_embd_s_ref) {
1068
0
                LLAMA_LOG_ERROR("%s: mismatched s embedding size (%u != %u, layer %d)\n", __func__, n_embd_s, n_embd_s_ref, il);
1069
0
                return false;
1070
0
            }
1071
1072
0
            if (cell_count) {
1073
                // For each row in the transposed matrix, read the values for the whole cell range
1074
0
                for (uint32_t j = 0; j < n_embd_s; ++j) {
1075
0
                    const size_t dst_offset = (head + j * size) * s_size_el;
1076
0
                    ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_el), dst_offset, cell_count * s_size_el);
1077
0
                }
1078
0
            }
1079
0
        }
1080
0
    }
1081
1082
0
    return true;
1083
0
}
1084
1085
//
1086
// llama_memory_recurrent_context
1087
//
1088
1089
0
llama_memory_recurrent_context::llama_memory_recurrent_context(llama_memory_status status) : status(status) {}
1090
1091
llama_memory_recurrent_context::llama_memory_recurrent_context(
1092
0
        llama_memory_recurrent * mem) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), is_full(true) {
1093
0
}
1094
1095
llama_memory_recurrent_context::llama_memory_recurrent_context(
1096
        llama_memory_recurrent * mem,
1097
0
        std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), ubatches(std::move(ubatches)) {}
1098
1099
0
llama_memory_recurrent_context::~llama_memory_recurrent_context() = default;
1100
1101
0
bool llama_memory_recurrent_context::next() {
1102
0
    assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
1103
1104
0
    if (++i_next >= ubatches.size()) {
1105
0
        return false;
1106
0
    }
1107
1108
0
    return true;
1109
0
}
1110
1111
0
bool llama_memory_recurrent_context::apply() {
1112
0
    assert(!llama_memory_status_is_fail(status));
1113
1114
    // no ubatches -> this is an update
1115
0
    if (ubatches.empty()) {
1116
        // recurrent cache never performs updates
1117
0
        assert(status == LLAMA_MEMORY_STATUS_NO_UPDATE);
1118
1119
0
        return true;
1120
0
    }
1121
1122
0
    mem->find_slot(ubatches[i_next]);
1123
1124
0
    return true;
1125
0
}
1126
1127
0
llama_memory_status llama_memory_recurrent_context::get_status() const {
1128
0
    return status;
1129
0
}
1130
1131
0
const llama_ubatch & llama_memory_recurrent_context::get_ubatch() const {
1132
0
    assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
1133
1134
0
    return ubatches[i_next];
1135
0
}
1136
1137
0
uint32_t llama_memory_recurrent_context::get_n_rs() const {
1138
0
    return is_full ? mem->size : mem->n;
1139
0
}
1140
1141
0
uint32_t llama_memory_recurrent_context::get_head() const {
1142
0
    return is_full ? 0 : mem->head;
1143
0
}
1144
1145
0
int32_t llama_memory_recurrent_context::get_rs_z() const {
1146
0
    return is_full ? 0 : mem->rs_z;
1147
0
}
1148
1149
0
uint32_t llama_memory_recurrent_context::get_size() const {
1150
0
    return mem->size;
1151
0
}
1152
1153
0
ggml_tensor * llama_memory_recurrent_context::get_r_l(int32_t il) const {
1154
0
    return mem->r_l[il];
1155
0
}
1156
1157
0
ggml_tensor * llama_memory_recurrent_context::get_s_l(int32_t il) const {
1158
0
    return mem->s_l[il];
1159
0
}
1160
1161
0
int32_t llama_memory_recurrent_context::s_copy(int i) const {
1162
0
    return  mem->cells[i + mem->head].src0;
1163
0
}