Coverage Report

Created: 2025-12-28 06:25

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-kv-cache.cpp
Line
Count
Source
1
#include "llama-kv-cache.h"
2
3
#include "llama-impl.h"
4
#include "llama-io.h"
5
#include "llama-model.h"
6
#include "llama-context.h"
7
8
#include <algorithm>
9
#include <cassert>
10
#include <cmath>
11
#include <cstring>
12
#include <limits>
13
#include <map>
14
#include <stdexcept>
15
16
//
17
// llama_kv_cache
18
//
19
20
llama_kv_cache::llama_kv_cache(
21
        const llama_model & model,
22
                ggml_type   type_k,
23
                ggml_type   type_v,
24
                     bool   v_trans,
25
                     bool   offload,
26
                     bool   unified,
27
                 uint32_t   kv_size,
28
                 uint32_t   n_seq_max,
29
                 uint32_t   n_pad,
30
                 uint32_t   n_swa,
31
           llama_swa_type   swa_type,
32
    const layer_filter_cb & filter,
33
    const  layer_reuse_cb & reuse) :
34
0
    model(model), hparams(model.hparams), v_trans(v_trans),
35
0
    n_seq_max(n_seq_max), n_stream(unified ? 1 : n_seq_max), n_pad(n_pad), n_swa(n_swa), swa_type(swa_type) {
36
37
0
    GGML_ASSERT(kv_size % n_pad == 0);
38
39
0
    const uint32_t n_layer_kv = hparams.n_layer_kv();
40
41
    // define a comparator for the buft -> ctx map to ensure that the order is well-defined:
42
0
    struct ggml_backend_buft_comparator {
43
0
        bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const {
44
0
            return strcmp(ggml_backend_buft_name(lhs), ggml_backend_buft_name(rhs)) < 0;
45
0
        }
46
0
    };
47
0
    std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map;
48
49
    // create a context for each buffer type
50
0
    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
51
0
        auto it = ctx_map.find(buft);
52
0
        if (it == ctx_map.end()) {
53
0
            ggml_init_params params = {
54
0
                /*.mem_size   =*/ size_t(2u*(1 + n_stream)*n_layer_kv*ggml_tensor_overhead()),
55
0
                /*.mem_buffer =*/ NULL,
56
0
                /*.no_alloc   =*/ true,
57
0
            };
58
59
0
            ggml_context * ctx = ggml_init(params);
60
0
            if (!ctx) {
61
0
                return nullptr;
62
0
            }
63
64
0
            ctx_map.emplace(buft, ctx);
65
66
0
            return ctx;
67
0
        }
68
69
0
        return it->second.get();
70
0
    };
71
72
0
    GGML_ASSERT(n_stream == 1 || n_stream == n_seq_max);
73
74
0
    v_heads.resize(n_stream);
75
0
    for (uint32_t s = 0; s < n_stream; ++s) {
76
0
        v_heads[s] = 0;
77
0
    }
78
79
0
    v_cells.resize(n_stream);
80
0
    for (uint32_t s = 0; s < n_stream; ++s) {
81
0
        v_cells[s].resize(kv_size);
82
0
    }
83
84
    // by default, all sequence ids are mapped to the 0th stream
85
0
    seq_to_stream.resize(LLAMA_MAX_SEQ, 0);
86
87
0
    if (n_stream > 1) {
88
0
        seq_to_stream.resize(n_stream, 0);
89
0
        for (uint32_t s = 0; s < n_stream; ++s) {
90
0
            seq_to_stream[s] = s;
91
0
        }
92
0
    }
93
94
    // [TAG_V_CACHE_VARIABLE]
95
0
    if (v_trans && hparams.is_n_embd_v_gqa_variable()) {
96
0
        LLAMA_LOG_WARN("%s: the V embeddings have different sizes across layers and FA is not enabled - padding V cache to %d\n",
97
0
                __func__, hparams.n_embd_v_gqa_max());
98
0
    }
99
100
0
    for (uint32_t il = 0; il < hparams.n_layer; il++) {
101
0
        if (!hparams.has_kv(il)) {
102
0
            LLAMA_LOG_DEBUG("%s: layer %3d: does not have KV cache\n", __func__, il);
103
0
            continue;
104
0
        }
105
106
0
        if (filter && !filter(il)) {
107
0
            LLAMA_LOG_DEBUG("%s: layer %3d: filtered\n", __func__, il);
108
0
            continue;
109
0
        }
110
111
        // [TAG_V_CACHE_VARIABLE]
112
0
        const uint32_t n_embd_k_gqa =            hparams.n_embd_k_gqa(il);
113
0
        const uint32_t n_embd_v_gqa = !v_trans ? hparams.n_embd_v_gqa(il) : hparams.n_embd_v_gqa_max();
114
115
0
        const char * dev_name = "CPU";
116
117
0
        ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
118
119
0
        if (offload) {
120
0
            auto * dev = model.dev_layer(il);
121
0
            buft = ggml_backend_dev_buffer_type(dev);
122
123
0
            dev_name = ggml_backend_dev_name(dev);
124
0
        }
125
126
0
        LLAMA_LOG_DEBUG("%s: layer %3d: dev = %s\n", __func__, il, dev_name);
127
128
0
        ggml_context * ctx = ctx_for_buft(buft);
129
0
        if (!ctx) {
130
0
            throw std::runtime_error("failed to create ggml context for kv cache");
131
0
        }
132
133
0
        ggml_tensor * k = ggml_new_tensor_3d(ctx, type_k, n_embd_k_gqa, kv_size, n_stream);
134
0
        ggml_tensor * v = ggml_new_tensor_3d(ctx, type_v, n_embd_v_gqa, kv_size, n_stream);
135
136
0
        ggml_format_name(k, "cache_k_l%d", il);
137
0
        ggml_format_name(v, "cache_v_l%d", il);
138
139
0
        std::vector<ggml_tensor *> k_stream;
140
0
        std::vector<ggml_tensor *> v_stream;
141
142
0
        for (uint32_t s = 0; s < n_stream; ++s) {
143
0
            k_stream.push_back(ggml_view_2d(ctx, k, n_embd_k_gqa, kv_size, k->nb[1], s*k->nb[2]));
144
0
            v_stream.push_back(ggml_view_2d(ctx, v, n_embd_v_gqa, kv_size, v->nb[1], s*v->nb[2]));
145
0
        }
146
147
0
        map_layer_ids[il] = layers.size();
148
149
0
        layers.push_back({ il, k, v, k_stream, v_stream, });
150
0
    }
151
152
0
    if (reuse) {
153
0
        LLAMA_LOG_DEBUG("%s: reusing layers:\n", __func__);
154
155
0
        for (uint32_t il = 0; il < hparams.n_layer; il++) {
156
0
            const int32_t il_reuse = reuse(il);
157
158
0
            if (il_reuse < 0) {
159
0
                LLAMA_LOG_DEBUG("%s: - layer %3d: no reuse\n", __func__, il);
160
0
                continue;
161
0
            }
162
163
0
            if (filter && !filter(il)) {
164
0
                LLAMA_LOG_DEBUG("%s: - layer %3d: filtered\n", __func__, il);
165
0
                continue;
166
0
            }
167
168
0
            GGML_ASSERT(map_layer_ids.find(il_reuse) != map_layer_ids.end());
169
170
0
            map_layer_ids[il] = map_layer_ids[il_reuse];
171
172
0
            LLAMA_LOG_DEBUG("%s: - layer %3d: reuse layer %d, is_swa = %d\n", __func__, il, il_reuse, hparams.is_swa(il));
173
0
        }
174
0
    }
175
176
    // allocate tensors and initialize the buffers to avoid NaNs in the padding
177
0
    for (auto & [buft, ctx] : ctx_map) {
178
0
        ggml_backend_buffer_t buf;
179
0
        if (model.hparams.no_alloc) {
180
0
            buf = ggml_backend_buft_alloc_buffer(buft, /*size =*/ 0); // dummy buffer
181
0
            for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != nullptr; t = ggml_get_next_tensor(ctx.get(), t)) {
182
0
                t->buffer = buf; // set dummy buffer for KV cache so that the backend scheduler won't try to allocate it
183
0
            }
184
0
        } else {
185
0
            buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx.get(), buft); // real buffer
186
0
        }
187
0
        if (!buf) {
188
0
            throw std::runtime_error("failed to allocate buffer for kv cache");
189
0
        }
190
191
0
        LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
192
193
0
        ggml_backend_buffer_clear(buf, 0);
194
0
        ctxs_bufs.emplace_back(std::move(ctx), buf);
195
0
    }
196
197
0
    {
198
0
        const size_t memory_size_k = size_k_bytes();
199
0
        const size_t memory_size_v = size_v_bytes();
200
201
0
        LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u/%u seqs), K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
202
0
                (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), kv_size, (int) layers.size(), n_seq_max, n_stream,
203
0
                ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
204
0
                ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
205
0
    }
206
207
0
    const char * LLAMA_KV_CACHE_DEBUG = getenv("LLAMA_KV_CACHE_DEBUG");
208
0
    debug = LLAMA_KV_CACHE_DEBUG ? atoi(LLAMA_KV_CACHE_DEBUG) : 0;
209
0
}
210
211
0
void llama_kv_cache::clear(bool data) {
212
0
    for (uint32_t s = 0; s < n_stream; ++s) {
213
0
        v_cells[s].reset();
214
0
        v_heads[s] = 0;
215
0
    }
216
217
0
    if (data) {
218
0
        for (auto & [_, buf] : ctxs_bufs) {
219
0
            ggml_backend_buffer_clear(buf.get(), 0);
220
0
        }
221
0
    }
222
0
}
223
224
0
bool llama_kv_cache::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
225
0
    GGML_ASSERT(seq_id == -1 || (seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()));
226
227
0
    if (p0 < 0) {
228
0
        p0 = 0;
229
0
    }
230
231
0
    if (p1 < 0) {
232
0
        p1 = std::numeric_limits<llama_pos>::max();
233
0
    }
234
235
0
    if (seq_id >= 0) {
236
0
        auto & cells = v_cells[seq_to_stream[seq_id]];
237
0
        auto & head  = v_heads[seq_to_stream[seq_id]];
238
239
0
        uint32_t new_head = cells.size();
240
241
0
        for (uint32_t i = 0; i < cells.size(); ++i) {
242
0
            if (!cells.pos_in(i, p0, p1)) {
243
0
                continue;
244
0
            }
245
246
0
            if (cells.seq_has(i, seq_id) && cells.seq_rm(i, seq_id)) {
247
0
                if (new_head == cells.size()) {
248
0
                    new_head = i;
249
0
                }
250
0
            }
251
0
        }
252
253
        // If we freed up a slot, set head to it so searching can start there.
254
0
        if (new_head != cells.size() && new_head < head) {
255
0
            head = new_head;
256
0
        }
257
0
    } else {
258
        // match any sequence
259
0
        for (uint32_t s = 0; s < n_stream; ++s) {
260
0
            auto & cells = v_cells[s];
261
0
            auto & head  = v_heads[s];
262
263
0
            uint32_t new_head = cells.size();
264
265
0
            for (uint32_t i = 0; i < cells.size(); ++i) {
266
0
                if (!cells.pos_in(i, p0, p1)) {
267
0
                    continue;
268
0
                }
269
270
0
                cells.rm(i);
271
272
0
                if (new_head == cells.size()) {
273
0
                    new_head = i;
274
0
                }
275
0
            }
276
277
            // If we freed up a slot, set head to it so searching can start there.
278
0
            if (new_head != cells.size() && new_head < head) {
279
0
                head = new_head;
280
0
            }
281
0
        }
282
0
    }
283
284
0
    return true;
285
0
}
286
287
0
void llama_kv_cache::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
288
0
    GGML_ASSERT(seq_id_src >= 0 && (size_t) seq_id_src < seq_to_stream.size());
289
0
    GGML_ASSERT(seq_id_dst >= 0 && (size_t) seq_id_dst < seq_to_stream.size());
290
291
0
    const auto s0 = seq_to_stream[seq_id_src];
292
0
    const auto s1 = seq_to_stream[seq_id_dst];
293
294
0
    if (s0 == s1) {
295
        // since both sequences are in the same stream, no data copy is necessary
296
        // we just have to update the cells meta data
297
298
0
        auto & cells = v_cells[s0];
299
300
0
        if (seq_id_src == seq_id_dst) {
301
0
            return;
302
0
        }
303
304
0
        if (p0 < 0) {
305
0
            p0 = 0;
306
0
        }
307
308
0
        if (p1 < 0) {
309
0
            p1 = std::numeric_limits<llama_pos>::max();
310
0
        }
311
312
0
        for (uint32_t i = 0; i < cells.size(); ++i) {
313
0
            if (!cells.pos_in(i, p0, p1)) {
314
0
                continue;
315
0
            }
316
317
0
            if (cells.seq_has(i, seq_id_src)) {
318
0
                cells.seq_add(i, seq_id_dst);
319
0
            }
320
0
        }
321
322
0
        return;
323
0
    }
324
325
    // cross-stream sequence copies require to copy the actual buffer data
326
327
0
    bool is_full = true;
328
329
0
    if (p0 > 0 && p0 + 1 < (int) get_size()) {
330
0
        is_full = false;
331
0
    }
332
333
0
    if (p1 > 0 && p1 + 1 < (int) get_size()) {
334
0
        is_full = false;
335
0
    }
336
337
0
    GGML_ASSERT(is_full && "seq_cp() is only supported for full KV buffers");
338
339
    // enqueue the copy operation - the buffer copy will be performed during the next update
340
0
    sc_info.ssrc.push_back(s0);
341
0
    sc_info.sdst.push_back(s1);
342
343
0
    v_cells[s1].reset();
344
0
    for (uint32_t i = 0; i < v_cells[s0].size(); ++i) {
345
0
        if (v_cells[s0].seq_has(i, seq_id_src)) {
346
0
            llama_pos pos   = v_cells[s0].pos_get(i);
347
0
            llama_pos shift = v_cells[s0].get_shift(i);
348
349
0
            llama_kv_cell_ext ext = v_cells[s0].ext_get(i);
350
351
0
            if (shift != 0) {
352
0
                pos -= shift;
353
0
                assert(pos >= 0);
354
0
            }
355
356
0
            v_cells[s1].pos_set(i, pos);
357
0
            v_cells[s1].seq_add(i, seq_id_dst);
358
359
0
            if (shift != 0) {
360
0
                v_cells[s1].pos_add(i, shift);
361
0
            }
362
363
0
            v_cells[s1].ext_set(i, ext);
364
0
        }
365
0
    }
366
367
0
    v_heads[s1] = v_heads[s0];
368
369
    //for (uint32_t s = 0; s < n_stream; ++s) {
370
    //    LLAMA_LOG_WARN("%s: seq %d: min = %d, max = %d\n", __func__, s, v_cells[s].seq_pos_min(s), v_cells[s].seq_pos_max(s));
371
    //}
372
0
}
373
374
0
void llama_kv_cache::seq_keep(llama_seq_id seq_id) {
375
0
    GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
376
377
0
    auto & cells = v_cells[seq_to_stream[seq_id]];
378
0
    auto & head  = v_heads[seq_to_stream[seq_id]];
379
380
0
    uint32_t new_head = cells.size();
381
382
0
    for (uint32_t i = 0; i < cells.size(); ++i) {
383
0
        if (cells.seq_keep(i, seq_id)) {
384
0
            if (new_head == cells.size()) {
385
0
                new_head = i;
386
0
            }
387
0
        }
388
0
    }
389
390
    // If we freed up a slot, set head to it so searching can start there.
391
0
    if (new_head != cells.size() && new_head < head) {
392
0
        head = new_head;
393
0
    }
394
0
}
395
396
0
void llama_kv_cache::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
397
0
    GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
398
0
    GGML_ASSERT(hparams.n_pos_per_embd() == 1 && "seq_add() is only supported for n_pos_per_embd() == 1");
399
400
0
    auto & cells = v_cells[seq_to_stream[seq_id]];
401
0
    auto & head  = v_heads[seq_to_stream[seq_id]];
402
403
0
    if (shift == 0) {
404
0
        return;
405
0
    }
406
407
0
    uint32_t new_head = cells.size();
408
409
0
    if (p0 < 0) {
410
0
        p0 = 0;
411
0
    }
412
413
0
    if (p1 < 0) {
414
0
        p1 = std::numeric_limits<llama_pos>::max();
415
0
    }
416
417
    // If there is no range then return early to avoid looping over all cells.
418
0
    if (p0 == p1) {
419
0
        return;
420
0
    }
421
422
0
    for (uint32_t i = 0; i < cells.size(); ++i) {
423
0
        if (!cells.pos_in(i, p0, p1)) {
424
0
            continue;
425
0
        }
426
427
0
        if (cells.seq_has(i, seq_id)) {
428
0
            if (cells.pos_add(i, shift)) {
429
0
                if (new_head == cells.size()) {
430
0
                    new_head = i;
431
0
                }
432
0
            }
433
0
        }
434
0
    }
435
436
    // If we freed up a slot, set head to it so searching can start there.
437
    // Otherwise we just start the next search from the beginning.
438
0
    head = new_head != cells.size() ? new_head : 0;
439
0
}
440
441
0
void llama_kv_cache::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
442
0
    GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
443
0
    GGML_ASSERT(hparams.n_pos_per_embd() == 1 && "seq_div() is only supported for n_pos_per_embd() == 1");
444
445
0
    auto & cells = v_cells[seq_to_stream[seq_id]];
446
447
0
    if (d == 1) {
448
0
        return;
449
0
    }
450
451
0
    if (p0 < 0) {
452
0
        p0 = 0;
453
0
    }
454
455
0
    if (p1 < 0) {
456
0
        p1 = std::numeric_limits<llama_pos>::max();
457
0
    }
458
459
    // If there is no range then return early to avoid looping over the cache.
460
0
    if (p0 == p1) {
461
0
        return;
462
0
    }
463
464
0
    for (uint32_t i = 0; i < cells.size(); ++i) {
465
0
        if (!cells.pos_in(i, p0, p1)) {
466
0
            continue;
467
0
        }
468
469
0
        if (cells.seq_has(i, seq_id)) {
470
0
            cells.pos_div(i, d);
471
0
        }
472
0
    }
473
0
}
474
475
0
llama_pos llama_kv_cache::seq_pos_min(llama_seq_id seq_id) const {
476
0
    GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
477
478
0
    const auto & cells = v_cells[seq_to_stream[seq_id]];
479
480
0
    return cells.seq_pos_min(seq_id);
481
0
}
482
483
0
llama_pos llama_kv_cache::seq_pos_max(llama_seq_id seq_id) const {
484
0
    GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
485
486
0
    const auto & cells = v_cells[seq_to_stream[seq_id]];
487
488
0
    return cells.seq_pos_max(seq_id);
489
0
}
490
491
0
std::map<ggml_backend_buffer_type_t, size_t> llama_kv_cache::memory_breakdown() const {
492
0
    std::map<ggml_backend_buffer_type_t, size_t> ret;
493
0
    for (const auto & [ctx, buf] : ctxs_bufs) {
494
0
        ggml_backend_buffer_type_t buft = ggml_backend_buffer_get_type(buf.get());
495
496
0
        if (hparams.no_alloc) {
497
0
            GGML_ASSERT(ggml_backend_buffer_get_base(buf.get()) == nullptr);
498
0
            ret[buft] += ggml_backend_alloc_ctx_tensors_from_buft_size(ctx.get(), buft);
499
0
        } else {
500
            // GGML_ASSERT(ggml_backend_buffer_get_base(buf.get()) != nullptr); // multi_buffer does not have a defined base
501
0
            ret[buft] += ggml_backend_buffer_get_size(buf.get());
502
0
        }
503
0
    }
504
505
0
    return ret;
506
0
}
507
508
llama_memory_context_ptr llama_kv_cache::init_batch(
509
            llama_batch_allocr & balloc,
510
            uint32_t n_ubatch,
511
0
            bool embd_all) {
512
0
    GGML_UNUSED(embd_all);
513
514
0
    do {
515
0
        balloc.split_reset();
516
517
0
        std::vector<llama_ubatch> ubatches;
518
0
        while (true) {
519
0
            auto ubatch = n_stream == 1 ? balloc.split_simple(n_ubatch) : balloc.split_equal(n_ubatch, true);
520
521
0
            if (ubatch.n_tokens == 0) {
522
0
                break;
523
0
            }
524
525
0
            ubatches.push_back(std::move(ubatch)); // NOLINT
526
0
        }
527
528
0
        if (balloc.get_n_used() < balloc.get_n_tokens()) {
529
            // failed to find a suitable split
530
0
            break;
531
0
        }
532
533
0
        auto sinfos = prepare(ubatches);
534
0
        if (sinfos.empty()) {
535
0
            break;
536
0
        }
537
538
0
        return std::make_unique<llama_kv_cache_context>(
539
0
                this, std::move(sinfos), std::move(ubatches));
540
0
    } while (false);
541
542
0
    return std::make_unique<llama_kv_cache_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
543
0
}
544
545
0
llama_memory_context_ptr llama_kv_cache::init_full() {
546
0
    return std::make_unique<llama_kv_cache_context>(this);
547
0
}
548
549
0
llama_memory_context_ptr llama_kv_cache::init_update(llama_context * lctx, bool optimize) {
550
0
    GGML_UNUSED(optimize);
551
552
0
    bool do_shift = get_has_shift();
553
554
0
    return std::make_unique<llama_kv_cache_context>(this, lctx, do_shift, std::move(sc_info));
555
0
}
556
557
0
llama_kv_cache::slot_info_vec_t llama_kv_cache::prepare(const std::vector<llama_ubatch> & ubatches) {
558
0
    llama_kv_cache::slot_info_vec_t res;
559
560
0
    struct state_t {
561
0
        slot_info sinfo; // slot info for the ubatch
562
563
0
        std::vector<uint32_t> v_heads_old; // old positions of the heads, before placing the ubatch
564
565
0
        std::vector<llama_kv_cells> v_cells; // copy of the old cells, before placing the ubatch
566
0
    };
567
568
    // remember the old state of the cells so we can restore it in the end
569
0
    std::vector<state_t> states;
570
571
0
    bool success = true;
572
573
0
    for (const auto & ubatch : ubatches) {
574
        // only find a suitable slot for the ubatch. don't modify the cells yet
575
0
        const auto sinfo_new = find_slot(ubatch, false);
576
0
        if (sinfo_new.empty()) {
577
0
            success = false;
578
0
            break;
579
0
        }
580
581
        // remeber the position that we found
582
0
        res.push_back(sinfo_new);
583
584
        // store the old state of the cells in the recovery stack
585
0
        {
586
0
            state_t state = { sinfo_new, v_heads, {} };
587
588
0
            for (uint32_t s = 0; s < sinfo_new.n_stream(); ++s) {
589
0
                auto & cells = v_cells[sinfo_new.strm[s]];
590
591
0
                state.v_cells.push_back(cells.cp(sinfo_new.idxs[s]));
592
0
            }
593
594
0
            states.push_back(std::move(state));
595
0
        }
596
597
        // now emplace the ubatch
598
0
        apply_ubatch(sinfo_new, ubatch);
599
0
    }
600
601
0
    GGML_ASSERT(!states.empty() || !success);
602
603
    // iterate backwards and restore the cells to their original state
604
0
    for (auto it = states.rbegin(); it != states.rend(); ++it) {
605
0
        const auto & sinfo = it->sinfo;
606
607
0
        for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
608
0
            auto & cells = v_cells[sinfo.strm[s]];
609
0
            auto & head  = v_heads[sinfo.strm[s]];
610
611
0
            cells.set(sinfo.idxs[s], it->v_cells[s]);
612
0
            head = it->v_heads_old[s];
613
0
        }
614
0
    }
615
616
0
    if (!success) {
617
0
        return {};
618
0
    }
619
620
0
    return res;
621
0
}
622
623
0
bool llama_kv_cache::update(llama_context * lctx, bool do_shift, const stream_copy_info & sc_info) {
624
0
    bool updated = false;
625
626
0
    auto * sched = lctx->get_sched();
627
628
0
    if (!sc_info.empty()) {
629
0
        assert(n_stream > 1 && "stream copy should never happen with a single stream");
630
631
0
        llama_synchronize(lctx);
632
633
0
        const size_t n_copy = sc_info.ssrc.size();
634
635
0
        for (size_t i = 0; i < n_copy; ++i) {
636
0
            const auto ssrc = sc_info.ssrc[i];
637
0
            const auto sdst = sc_info.sdst[i];
638
639
0
            assert(ssrc < n_stream);
640
0
            assert(sdst < n_stream);
641
642
0
            LLAMA_LOG_DEBUG("%s: copying KV buffer: stream %d to stream %d\n", __func__, ssrc, sdst);
643
644
0
            assert(ssrc != sdst);
645
646
0
            for (uint32_t il = 0; il < layers.size(); ++il) {
647
0
                const auto & layer = layers[il];
648
649
0
                ggml_backend_tensor_copy(layer.k_stream[ssrc], layer.k_stream[sdst]);
650
0
                ggml_backend_tensor_copy(layer.v_stream[ssrc], layer.v_stream[sdst]);
651
0
            }
652
0
        }
653
0
    }
654
655
0
    if (do_shift) {
656
0
        if (!get_can_shift()) {
657
0
            GGML_ABORT("The current KV cache / model configuration does not support K-shift");
658
0
        }
659
660
0
        LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__);
661
662
        // apply K-shift if needed
663
0
        if (hparams.rope_type != LLAMA_ROPE_TYPE_NONE) {
664
0
            ggml_backend_sched_reset(sched);
665
666
0
            auto * res = lctx->get_gf_res_reserve();
667
668
0
            res->reset();
669
670
0
            auto * gf = build_graph_shift(res, lctx);
671
0
            if (!ggml_backend_sched_alloc_graph(sched, gf)) {
672
0
                LLAMA_LOG_ERROR("%s: failed to allocate compute graph for K-shift\n", __func__);
673
0
                return updated;
674
0
            }
675
676
0
            res->set_inputs(nullptr);
677
678
0
            if (lctx->graph_compute(gf, false) != GGML_STATUS_SUCCESS) {
679
0
                LLAMA_LOG_ERROR("%s: failed to compute K-shift\n", __func__);
680
0
                return updated;
681
0
            }
682
683
0
            updated = true;
684
0
        }
685
686
0
        for (uint32_t s = 0; s < n_stream; ++s) {
687
0
            auto & cells = v_cells[s];
688
689
0
            cells.reset_shift();
690
0
        }
691
0
    }
692
693
0
    return updated;
694
0
}
695
696
0
llama_kv_cache::slot_info llama_kv_cache::find_slot(const llama_ubatch & ubatch, bool cont) const {
697
698
0
    if (debug > 0) {
699
0
        for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
700
0
            const auto seq_id = ubatch.seq_id_unq[s];
701
0
            const auto stream_id = seq_to_stream[seq_id];
702
0
            const auto & cells = v_cells[stream_id];
703
0
            const uint32_t head_cur = v_heads[stream_id];
704
705
0
            LLAMA_LOG_DEBUG("%s: stream[%d], n = %5d, used = %5d, head = %5d, size = %5d, n_swa = %5d\n",
706
0
                    __func__, stream_id, cells.used_max_p1(), cells.get_used(), head_cur, get_size(), n_swa);
707
708
0
            if ((debug == 2 && n_swa > 0) || debug > 2) {
709
0
                std::string ss;
710
0
                for (uint32_t i = 0; i < cells.size(); ++i) {
711
0
                    if (cells.is_empty(i)) {
712
0
                        ss += '.';
713
0
                    } else {
714
0
                        assert(cells.seq_count(i) >= 1);
715
716
0
                        if (cells.seq_count(i) == 1) {
717
0
                            ss += std::to_string(cells.seq_get(i));
718
0
                        } else {
719
0
                            ss += 'M';
720
0
                        }
721
0
                    }
722
0
                    if (i%256 == 255) {
723
0
                        ss += " *";
724
0
                        ss += '\n';
725
0
                    }
726
0
                }
727
0
                LLAMA_LOG_DEBUG("\n%s\n", ss.c_str());
728
0
            }
729
730
0
            if ((debug == 2 && n_swa > 0) || debug > 2) {
731
0
                std::string ss;
732
0
                for (uint32_t i = 0; i < cells.size(); ++i) {
733
0
                    std::string cur;
734
0
                    if (cells.is_empty(i)) {
735
0
                        cur = '.';
736
0
                    } else {
737
0
                        cur = std::to_string(cells.pos_get(i));
738
0
                    }
739
0
                    const int n = cur.size();
740
0
                    for (int j = 0; j < 5 - n; ++j) {
741
0
                        cur += ' ';
742
0
                    }
743
0
                    ss += cur;
744
0
                    if (i%256 == 255) {
745
0
                        ss += " *";
746
0
                    }
747
0
                    if (i%64 == 63) {
748
0
                        ss += '\n';
749
0
                    }
750
0
                }
751
0
                LLAMA_LOG_DEBUG("\n%s\n", ss.c_str());
752
0
            }
753
754
0
            for (int s = 0; s < LLAMA_MAX_SEQ; ++s) {
755
0
                if (cells.seq_pos_min(s) < 0) {
756
0
                    continue;
757
0
                }
758
759
0
                LLAMA_LOG_DEBUG("%s: stream[%d] min[%d] = %5d, max[%d] = %5d\n", __func__, stream_id, s, cells.seq_pos_min(s), s, cells.seq_pos_max(s));
760
0
            }
761
0
        }
762
0
    }
763
764
0
    uint32_t n_tokens = ubatch.n_tokens;
765
0
    uint32_t n_seqs   = 1;
766
767
0
    if (n_stream > 1) {
768
0
        GGML_ASSERT(n_tokens % ubatch.n_seqs_unq == 0);
769
770
0
        n_seqs   = ubatch.n_seqs_unq;
771
0
        n_tokens = n_tokens / n_seqs;
772
0
    }
773
774
0
    slot_info res = {
775
0
        /*.s0   =*/ LLAMA_MAX_SEQ,
776
0
        /*.s1   =*/ 0,
777
0
        /*.strm =*/ { },
778
0
        /*.idxs =*/ { },
779
0
    };
780
781
0
    res.resize(n_seqs);
782
783
0
    for (uint32_t s = 0; s < n_seqs; ++s) {
784
0
        const auto seq_id = ubatch.seq_id_unq[s];
785
786
0
        if (n_stream > 1) {
787
0
            GGML_ASSERT(ubatch.n_seq_id[s*n_tokens]    == 1);
788
0
            GGML_ASSERT(ubatch.seq_id  [s*n_tokens][0] == seq_id);
789
0
        }
790
791
0
        res.s0 = std::min<uint32_t>(res.s0, seq_to_stream[seq_id]);
792
0
        res.s1 = std::max<uint32_t>(res.s1, seq_to_stream[seq_id]);
793
794
0
        res.strm[s] = seq_to_stream[seq_id];
795
0
        res.idxs[s].reserve(n_tokens);
796
797
0
        const auto & cells = v_cells[seq_to_stream[seq_id]];
798
799
0
        uint32_t head_cur = v_heads[seq_to_stream[seq_id]];
800
801
        // if we have enough unused cells before the current head ->
802
        //   better to start searching from the beginning of the cache, hoping to fill it
803
0
        if (head_cur > cells.get_used() + 2*n_tokens) {
804
0
            head_cur = 0;
805
0
        }
806
807
0
        if (n_tokens > cells.size()) {
808
0
            LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %u\n", __func__, n_tokens, cells.size());
809
0
            return { };
810
0
        }
811
812
0
        uint32_t n_tested = 0;
813
814
        // for continuous slots, we test that all tokens in the ubatch fit, starting from the current head
815
        // for non-continuous slots, we test the tokens one by one
816
0
        const uint32_t n_test = cont ? n_tokens : 1;
817
818
0
        while (true) {
819
0
            if (head_cur + n_test > cells.size()) {
820
0
                n_tested += cells.size() - head_cur;
821
0
                head_cur = 0;
822
0
                continue;
823
0
            }
824
825
0
            for (uint32_t i = 0; i < n_test; i++) {
826
0
                const auto idx = head_cur;
827
828
0
                head_cur++;
829
0
                n_tested++;
830
831
                //const llama_pos    pos    = ubatch.pos[i];
832
                //const llama_seq_id seq_id = ubatch.seq_id[i][0];
833
834
                // can we use this cell? either:
835
                //  - the cell is empty
836
                //  - the cell is occupied only by one sequence:
837
                //    - (disabled) mask causally, if the sequence is the same as the one we are inserting
838
                //    - mask SWA, using current max pos for that sequence in the cache
839
                //                always insert in the cell with minimum pos
840
0
                bool can_use = cells.is_empty(idx);
841
842
0
                if (!can_use && cells.seq_count(idx) == 1) {
843
0
                    const llama_pos pos_cell = cells.pos_get(idx);
844
845
                    // (disabled) causal mask
846
                    // note: it's better to purge any "future" tokens beforehand
847
                    //if (cells.seq_has(idx, seq_id)) {
848
                    //    can_use = pos_cell >= pos;
849
                    //}
850
851
0
                    if (!can_use) {
852
0
                        const llama_seq_id seq_id_cell = cells.seq_get(idx);
853
854
                        // SWA mask
855
0
                        if (is_masked_swa(pos_cell, cells.seq_pos_max(seq_id_cell) + 1)) {
856
0
                            can_use = true;
857
0
                        }
858
0
                    }
859
0
                }
860
861
0
                if (can_use) {
862
0
                    res.idxs[s].push_back(idx);
863
0
                } else {
864
0
                    if (cont) {
865
0
                        break;
866
0
                    }
867
0
                }
868
0
            }
869
870
0
            if (res.idxs[s].size() == n_tokens) {
871
0
                break;
872
0
            }
873
874
0
            if (cont) {
875
0
                res.idxs[s].clear();
876
0
            }
877
878
0
            if (n_tested >= cells.size()) {
879
                //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
880
0
                return { };
881
0
            }
882
0
        }
883
884
        // we didn't find a suitable slot - return empty result
885
0
        if (res.idxs[s].size() < n_tokens) {
886
0
            return { };
887
0
        }
888
0
    }
889
890
0
    assert(res.s1 >= res.s0);
891
892
0
    return res;
893
0
}
894
895
0
void llama_kv_cache::apply_ubatch(const slot_info & sinfo, const llama_ubatch & ubatch) {
896
    // keep track of the max sequence position that we would overwrite with this ubatch
897
    // for non-SWA cache, this would be always empty
898
0
    llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ];
899
0
    for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
900
0
        seq_pos_max_rm[s] = -1;
901
0
    }
902
903
0
    assert(ubatch.n_tokens == sinfo.n_stream()*sinfo.size());
904
905
0
    for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
906
0
        for (uint32_t ii = 0; ii < sinfo.size(); ++ii) {
907
0
            const uint32_t i = s*sinfo.size() + ii;
908
909
0
            auto & cells = v_cells[sinfo.strm[s]];
910
911
0
            const auto idx = sinfo.idxs[s][ii];
912
913
0
            if (!cells.is_empty(idx)) {
914
0
                assert(cells.seq_count(idx) == 1);
915
916
0
                const llama_seq_id seq_id = cells.seq_get(idx);
917
0
                const llama_pos    pos    = cells.pos_get(idx);
918
919
0
                seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos);
920
921
0
                cells.rm(idx);
922
0
            }
923
924
0
            cells.pos_set(idx, ubatch.pos[i]);
925
926
0
            if (ubatch.is_pos_2d()) {
927
0
                llama_kv_cell_ext ext {
928
0
                    /*.x =*/ ubatch.pos[i + ubatch.n_tokens*2],
929
0
                    /*.y =*/ ubatch.pos[i + ubatch.n_tokens],
930
0
                };
931
0
                cells.ext_set(idx, ext);
932
0
            }
933
934
0
            for (int32_t s = 0; s < ubatch.n_seq_id[i]; s++) {
935
0
                cells.seq_add(idx, ubatch.seq_id[i][s]);
936
0
            }
937
0
        }
938
0
    }
939
940
    // note: we want to preserve the invariant that all positions between [pos_min, pos_max] for each sequence
941
    //       will be present in the cache. so we have to purge any position which is less than those we would overwrite
942
    //       ref: https://github.com/ggml-org/llama.cpp/pull/13746#issuecomment-2916057092
943
0
    for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
944
0
        if (seq_pos_max_rm[s] == -1) {
945
0
            continue;
946
0
        }
947
948
0
        GGML_ASSERT(s < seq_to_stream.size());
949
950
0
        auto & cells = v_cells[seq_to_stream[s]];
951
952
0
        if (cells.seq_pos_min(s) <= seq_pos_max_rm[s]) {
953
0
            LLAMA_LOG_DEBUG("%s: purging positions [%d, %d] of sequence %d from KV cache\n",
954
0
                    __func__, cells.seq_pos_min(s), seq_pos_max_rm[s], s);
955
956
0
            seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1);
957
0
        }
958
0
    }
959
960
    // move the head at the end of the slot
961
0
    for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
962
0
        auto & head = v_heads[sinfo.strm[s]];
963
964
0
        head = sinfo.idxs[s].back() + 1;
965
0
    }
966
0
}
967
968
0
bool llama_kv_cache::get_can_shift() const {
969
0
    return true;
970
0
}
971
972
0
uint32_t llama_kv_cache::get_size() const {
973
0
    const auto & cells = v_cells[seq_to_stream[0]];
974
975
0
    return cells.size();
976
0
}
977
978
0
uint32_t llama_kv_cache::get_n_stream() const {
979
0
    return n_stream;
980
0
}
981
982
0
bool llama_kv_cache::get_has_shift() const {
983
0
    bool result = false;
984
985
0
    for (uint32_t s = 0; s < n_stream; ++s) {
986
0
        result |= v_cells[s].get_has_shift();
987
0
    }
988
989
0
    return result;
990
0
}
991
992
0
uint32_t llama_kv_cache::get_n_kv(const slot_info & sinfo) const {
993
0
    uint32_t result = 0;
994
995
    // pad the n_kv value so that the graph remains constant across batches and can be reused
996
    // note: this also helps some backends with performance (f.ex https://github.com/ggml-org/llama.cpp/pull/16812#issuecomment-3455112220)
997
0
    const uint32_t n_pad_cur = std::max(n_pad, 256u);
998
999
0
    for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
1000
0
        const auto & cells = v_cells[sinfo.strm[s]];
1001
1002
0
        result = std::max(std::min(cells.size(), std::max(n_pad_cur, GGML_PAD(cells.used_max_p1(), n_pad_cur))), result);
1003
0
    }
1004
1005
0
    return result;
1006
0
}
1007
1008
0
ggml_tensor * llama_kv_cache::get_k(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const {
1009
0
    const int32_t ikv = map_layer_ids.at(il);
1010
1011
0
    auto * k = layers[ikv].k;
1012
1013
0
    const uint64_t kv_size      = get_size();
1014
0
    const uint64_t n_embd_k_gqa = k->ne[0];
1015
1016
0
    assert(n_embd_k_gqa == hparams.n_embd_k_gqa(il));
1017
1018
0
    const uint32_t ns = sinfo.s1 - sinfo.s0 + 1;
1019
1020
0
    return ggml_view_4d(ctx, k,
1021
0
            hparams.n_embd_head_k, hparams.n_head_kv(il), n_kv, ns,
1022
0
            ggml_row_size(k->type, hparams.n_embd_head_k),
1023
0
            ggml_row_size(k->type, n_embd_k_gqa),
1024
0
            ggml_row_size(k->type, n_embd_k_gqa*kv_size),
1025
0
            ggml_row_size(k->type, n_embd_k_gqa*kv_size)*sinfo.s0);
1026
0
}
1027
1028
0
ggml_tensor * llama_kv_cache::get_v(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const {
1029
0
    const int32_t ikv = map_layer_ids.at(il);
1030
1031
0
    auto * v = layers[ikv].v;
1032
1033
0
    const uint64_t kv_size      = get_size();
1034
0
    const uint64_t n_embd_v_gqa = v->ne[0];
1035
1036
    // [TAG_V_CACHE_VARIABLE]
1037
0
    assert(n_embd_v_gqa >= hparams.n_embd_v_gqa(il));
1038
1039
0
    const uint32_t ns = sinfo.s1 - sinfo.s0 + 1;
1040
1041
0
    if (!v_trans) {
1042
        // note: v->nb[1] <= v->nb[2]
1043
0
        return ggml_view_4d(ctx, v,
1044
0
                hparams.n_embd_head_v, hparams.n_head_kv(il), n_kv, ns,
1045
0
                ggml_row_size(v->type, hparams.n_embd_head_v),          // v->nb[1]
1046
0
                ggml_row_size(v->type, n_embd_v_gqa),                   // v->nb[2]
1047
0
                ggml_row_size(v->type, n_embd_v_gqa*kv_size),           // v->nb[3]
1048
0
                ggml_row_size(v->type, n_embd_v_gqa*kv_size)*sinfo.s0);
1049
0
    }
1050
1051
    // note: v->nb[1] > v->nb[2]
1052
0
    return ggml_view_4d(ctx, v,
1053
0
            n_kv, hparams.n_head_kv(il), hparams.n_embd_head_v, ns,
1054
0
            ggml_row_size(v->type, kv_size*hparams.n_embd_head_v),  // v->nb[1]
1055
0
            ggml_row_size(v->type, kv_size),                        // v->nb[2]
1056
0
            ggml_row_size(v->type, kv_size*n_embd_v_gqa),           // v->nb[3]
1057
0
            ggml_row_size(v->type, kv_size*n_embd_v_gqa)*sinfo.s0);
1058
0
}
1059
1060
0
ggml_tensor * llama_kv_cache::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il, const slot_info & sinfo) const {
1061
0
    GGML_UNUSED(sinfo);
1062
1063
0
    const int32_t ikv = map_layer_ids.at(il);
1064
1065
0
    ggml_tensor * k = layers[ikv].k;
1066
1067
0
    const int64_t n_embd_head = k_cur->ne[0];
1068
0
    const int64_t n_head      = k_cur->ne[1];
1069
0
    const int64_t n_tokens    = k_cur->ne[2];
1070
1071
0
    const int64_t n_embd_gqa = n_embd_head*n_head;
1072
1073
    // we can merge dims 0 and 1
1074
    // TODO: add ggml helper function for this?
1075
0
    GGML_ASSERT(ggml_row_size(k_cur->type, n_embd_head) == k_cur->nb[1]);
1076
1077
0
    k_cur = ggml_view_2d(ctx, k_cur, n_embd_gqa, n_tokens, k_cur->nb[2], 0);
1078
1079
0
    const int64_t n_stream = k->ne[2];
1080
1081
0
    if (n_stream > 1) {
1082
0
        const int64_t kv_size = get_size();
1083
1084
0
        assert(n_embd_gqa == k->ne[0]);
1085
0
        assert(kv_size    == k->ne[1]);
1086
1087
        // merge the buffer across all streams because the idxs are global
1088
0
        k = ggml_reshape_2d(ctx, k, n_embd_gqa, kv_size*n_stream);
1089
0
    }
1090
1091
    // store the current K values into the cache
1092
0
    return ggml_set_rows(ctx, k, k_cur, k_idxs);
1093
0
}
1094
1095
0
ggml_tensor * llama_kv_cache::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il, const slot_info & sinfo) const {
1096
0
    GGML_UNUSED(sinfo);
1097
1098
0
    const int32_t ikv = map_layer_ids.at(il);
1099
1100
0
    auto * v = layers[ikv].v;
1101
1102
0
    const int64_t n_embd_head = v_cur->ne[0];
1103
0
    const int64_t n_head      = v_cur->ne[1];
1104
0
    const int64_t n_tokens    = v_cur->ne[2];
1105
1106
0
    const int64_t n_embd_gqa = n_embd_head*n_head;
1107
1108
    // we can merge dims 0 and 1
1109
0
    GGML_ASSERT(ggml_row_size(v_cur->type, n_embd_head) == v_cur->nb[1]);
1110
1111
0
    const int64_t n_stream = v->ne[2];
1112
1113
    // take this branch when FA is enabled (the V cache is not transposed)
1114
0
    if (!v_trans) {
1115
0
        v_cur = ggml_view_2d(ctx, v_cur, n_embd_gqa, n_tokens, v_cur->nb[2], 0);
1116
1117
0
        if (n_stream > 1) {
1118
0
            const int64_t kv_size = get_size();
1119
1120
0
            assert(n_embd_gqa == v->ne[0]);
1121
0
            assert(kv_size    == v->ne[1]);
1122
1123
            // merge the buffer across all streams because the idxs are global
1124
0
            v = ggml_reshape_2d(ctx, v, n_embd_gqa, kv_size*n_stream);
1125
0
        }
1126
1127
0
        return ggml_set_rows(ctx, v, v_cur, v_idxs);
1128
0
    }
1129
1130
0
    if (ggml_row_size(v_cur->type, n_embd_gqa) == v_cur->nb[2]) {
1131
        // we can merge dims 0, 1 and 2
1132
0
        v_cur = ggml_reshape_2d(ctx, v_cur, n_embd_gqa, n_tokens);
1133
0
    } else {
1134
        // otherwise -> make a copy to get contiguous data
1135
0
        v_cur = ggml_cont_2d   (ctx, v_cur, n_embd_gqa, n_tokens);
1136
0
    }
1137
1138
    // [TAG_V_CACHE_VARIABLE]
1139
0
    if (n_embd_gqa < v->ne[0]) {
1140
0
        v_cur = ggml_pad(ctx, v_cur, v->ne[0] - n_embd_gqa, 0, 0, 0);
1141
0
    }
1142
1143
    // in this branch the v_idxs are constructed in such a way that each row is a single head element
1144
0
    ggml_tensor * v_view = ggml_reshape_2d(ctx, v, 1, ggml_nelements(v));
1145
1146
0
    v_cur = ggml_reshape_2d(ctx, v_cur, 1, ggml_nelements(v_cur));
1147
1148
0
    return ggml_set_rows(ctx, v_view, v_cur, v_idxs);
1149
0
}
1150
1151
0
ggml_tensor * llama_kv_cache::build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const {
1152
0
    const uint32_t n_tokens = ubatch.n_tokens;
1153
1154
0
    ggml_tensor * k_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I64, n_tokens);
1155
1156
0
    ggml_set_input(k_idxs);
1157
1158
0
    return k_idxs;
1159
0
}
1160
1161
0
ggml_tensor * llama_kv_cache::build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const {
1162
0
    const uint32_t n_tokens = ubatch.n_tokens;
1163
1164
0
    ggml_tensor * v_idxs;
1165
1166
0
    if (!v_trans) {
1167
0
        v_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I64, n_tokens);
1168
0
    } else {
1169
0
        v_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I64, n_tokens*hparams.n_embd_v_gqa_max());
1170
0
    }
1171
1172
0
    ggml_set_input(v_idxs);
1173
1174
0
    return v_idxs;
1175
0
}
1176
1177
0
void llama_kv_cache::set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const {
1178
0
    const uint32_t n_tokens = ubatch->n_tokens;
1179
0
    GGML_ASSERT(n_tokens == (int64_t) sinfo.size()*sinfo.n_stream());
1180
1181
0
    GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
1182
0
    int64_t * data = (int64_t *) dst->data;
1183
1184
0
    for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
1185
0
        const int64_t offs = sinfo.strm[s]*get_size();
1186
1187
0
        for (uint32_t i = 0; i < sinfo.size(); ++i) {
1188
0
            data[s*sinfo.size() + i] = offs + sinfo.idxs[s][i];
1189
0
        }
1190
0
    }
1191
0
}
1192
1193
0
void llama_kv_cache::set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const {
1194
0
    const uint32_t n_tokens = ubatch->n_tokens;
1195
0
    GGML_ASSERT(n_tokens == (int64_t) sinfo.size()*sinfo.n_stream());
1196
1197
0
    GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
1198
0
    int64_t * data = (int64_t *) dst->data;
1199
1200
0
    if (!v_trans) {
1201
0
        for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
1202
0
            const int64_t offs = sinfo.strm[s]*get_size();
1203
1204
0
            for (uint32_t i = 0; i < sinfo.size(); ++i) {
1205
0
                data[s*sinfo.size() + i] = offs + sinfo.idxs[s][i];
1206
0
            }
1207
0
        }
1208
0
    } else {
1209
        // note: the V cache is transposed when not using flash attention
1210
0
        const int64_t kv_size = get_size();
1211
1212
0
        const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa_max();
1213
1214
0
        for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
1215
0
            const int64_t offs = sinfo.strm[s]*kv_size*n_embd_v_gqa;
1216
1217
0
            for (uint32_t i = 0; i < sinfo.size(); ++i) {
1218
0
                for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1219
0
                    data[s*sinfo.size()*n_embd_v_gqa + i*n_embd_v_gqa + j] = offs + j*kv_size + sinfo.idxs[s][i];
1220
0
                }
1221
0
            }
1222
0
        }
1223
0
    }
1224
0
}
1225
1226
0
void llama_kv_cache::set_input_k_shift(ggml_tensor * dst) const {
1227
0
    GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
1228
1229
0
    int32_t * data = (int32_t *) dst->data;
1230
1231
0
    for (uint32_t s = 0; s < n_stream; ++s) {
1232
0
        const auto & cells = v_cells[s];
1233
1234
0
        for (uint32_t i = 0; i < cells.size(); ++i) {
1235
0
            data[s*cells.size() + i] = cells.is_empty(i) ? 0 : cells.get_shift(i);
1236
0
        }
1237
0
    }
1238
0
}
1239
1240
0
void llama_kv_cache::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
1241
0
    const uint32_t n_tokens = ubatch->n_tokens;
1242
1243
0
    GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
1244
0
    float * data = (float *) dst->data;
1245
1246
0
    const int64_t n_kv     = dst->ne[0];
1247
0
    const int64_t n_stream = dst->ne[3]; // num streams in the current ubatch
1248
1249
0
    GGML_ASSERT(n_tokens%n_stream == 0);
1250
1251
    // n_tps == n_tokens_per_stream
1252
0
    const int64_t n_tps = n_tokens/n_stream;
1253
1254
0
    std::fill(data, data + ggml_nelements(dst), -INFINITY);
1255
1256
    // Use only the previous KV cells of the correct sequence for each token of the ubatch.
1257
    // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
1258
    // Example with a cache of 10 tokens, 2 tokens populated in cache and 3 tokens in batch:
1259
    //   Causal mask:
1260
    //      xxx-------
1261
    //      xxxx------
1262
    //      xxxxx-----
1263
    //   Non-causal mask:
1264
    //      xxxxx-----
1265
    //      xxxxx-----
1266
    //      xxxxx-----
1267
    // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615
1268
    // TODO: optimize this section
1269
0
    for (uint32_t h = 0; h < 1; ++h) {
1270
0
        for (uint32_t s = 0; s < n_stream; ++s) {
1271
0
            for (uint32_t ii = 0; ii < n_tps; ++ii) {
1272
0
                const uint32_t i = s*n_tps + ii;
1273
1274
0
                const llama_seq_id seq_id = ubatch->seq_id[i][0];
1275
1276
0
                const auto & cells = v_cells[seq_to_stream[seq_id]];
1277
1278
0
                const llama_pos p1 = ubatch->pos[i];
1279
1280
                // for M-RoPE
1281
0
                const bool is_2d = ubatch->is_pos_2d();
1282
0
                const llama_pos p1_x = is_2d ? ubatch->pos[i + ubatch->n_tokens*2] : 0;
1283
0
                const llama_pos p1_y = is_2d ? ubatch->pos[i + ubatch->n_tokens]   : 0;
1284
1285
0
                const uint64_t idst = n_kv*(h*n_stream*n_tps + s*n_tps + ii);
1286
1287
0
                for (uint32_t j = 0; j < n_kv; ++j) {
1288
0
                    if (cells.is_empty(j)) {
1289
0
                        continue;
1290
0
                    }
1291
1292
                    // mask the token if not the same sequence
1293
0
                    if (!cells.seq_has(j, seq_id)) {
1294
0
                        continue;
1295
0
                    }
1296
1297
0
                    const llama_pos p0 = cells.pos_get(j);
1298
1299
                    // mask future tokens
1300
0
                    if (causal_attn && p0 > p1) {
1301
0
                        continue;
1302
0
                    }
1303
1304
                    // M-RoPE causal mask
1305
0
                    if (causal_attn && is_2d && p0 == p1) {
1306
0
                        const auto & p0_ext = cells.ext_get(j);
1307
0
                        if (p0_ext.is_2d_gt(p1_x, p1_y)) {
1308
0
                            continue;
1309
0
                        }
1310
0
                    }
1311
1312
                    // apply SWA if any
1313
0
                    if (is_masked_swa(p0, p1)) {
1314
0
                        continue;
1315
0
                    }
1316
1317
0
                    data[idst + j] = hparams.use_alibi ? -std::abs(p0 - p1) : 0.0f;
1318
0
                }
1319
0
            }
1320
0
        }
1321
0
    }
1322
0
}
1323
1324
0
void llama_kv_cache::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const {
1325
0
    const int64_t n_tokens = ubatch->n_tokens;
1326
1327
0
    GGML_ASSERT(n_stream == 1 && "TODO: support multiple streams");
1328
0
    const auto & cells = v_cells[0];
1329
1330
0
    GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
1331
0
    GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing
1332
1333
0
    int32_t * data = (int32_t *) dst->data;
1334
1335
0
    const int32_t n_kv = dst->ne[0];
1336
1337
0
    for (int h = 0; h < 1; ++h) {
1338
0
        for (int i = 0; i < n_tokens; ++i) {
1339
0
            for (int j = 0; j < n_kv; ++j) {
1340
                // the position when the cells is empty is irrelevant - it will be masked out later in the attention
1341
0
                const llama_pos p0 = cells.is_empty(j) ? -1 : cells.pos_get(j);
1342
1343
0
                data[h*(n_kv*n_tokens) + i*n_kv + j] = llama_relative_position_bucket(p0, ubatch->pos[i], hparams.n_rel_attn_bkts, false);
1344
0
            }
1345
0
        }
1346
0
    }
1347
0
}
1348
1349
0
size_t llama_kv_cache::total_size() const {
1350
0
    size_t size = 0;
1351
1352
0
    for (const auto & [_, buf] : ctxs_bufs) {
1353
0
        size += ggml_backend_buffer_get_size(buf.get());
1354
0
    }
1355
1356
0
    return size;
1357
0
}
1358
1359
0
size_t llama_kv_cache::size_k_bytes() const {
1360
0
    size_t size_k_bytes = 0;
1361
1362
0
    for (const auto & layer : layers) {
1363
0
        size_k_bytes += ggml_nbytes(layer.k);
1364
0
    }
1365
1366
0
    return size_k_bytes;
1367
0
}
1368
1369
0
size_t llama_kv_cache::size_v_bytes() const {
1370
0
    size_t size_v_bytes = 0;
1371
1372
0
    for (const auto & layer : layers) {
1373
0
        size_v_bytes += ggml_nbytes(layer.v);
1374
0
    }
1375
1376
0
    return size_v_bytes;
1377
0
}
1378
1379
ggml_tensor * llama_kv_cache::build_rope_shift(
1380
        const llama_cparams & cparams,
1381
               ggml_context * ctx,
1382
                ggml_tensor * cur,
1383
                ggml_tensor * shift,
1384
                ggml_tensor * factors,
1385
                      float   freq_base,
1386
0
                      float   freq_scale) const {
1387
0
    const auto & n_ctx_orig = cparams.n_ctx_orig_yarn;
1388
1389
0
    const auto & yarn_ext_factor  = cparams.yarn_ext_factor;
1390
0
    const auto & yarn_beta_fast   = cparams.yarn_beta_fast;
1391
0
    const auto & yarn_beta_slow   = cparams.yarn_beta_slow;
1392
0
    const auto & yarn_attn_factor = cparams.yarn_attn_factor;
1393
1394
0
    const auto & n_rot     = hparams.n_rot;
1395
0
    const auto & rope_type = hparams.rope_type == LLAMA_ROPE_TYPE_MROPE || hparams.rope_type == LLAMA_ROPE_TYPE_IMROPE
1396
                                // @ngxson : this is a workaround
1397
                                // for M-RoPE, we want to rotate the whole vector when doing KV shift
1398
                                // a normal RoPE should work, we just need to use the correct ordering
1399
                                // ref: https://github.com/ggml-org/llama.cpp/pull/13870
1400
0
                                ? LLAMA_ROPE_TYPE_NEOX
1401
0
                                : hparams.rope_type;
1402
1403
0
    ggml_tensor * tmp;
1404
1405
0
    if (ggml_is_quantized(cur->type)) {
1406
        // dequantize to f32 -> RoPE -> quantize back
1407
0
        tmp = ggml_cast(ctx, cur, GGML_TYPE_F32);
1408
1409
0
        tmp = ggml_rope_ext(ctx, tmp,
1410
0
                shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
1411
0
                yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
1412
1413
0
        tmp = ggml_cpy(ctx, tmp, cur);
1414
0
    } else {
1415
        // we rotate only the first n_rot dimensions
1416
0
        tmp = ggml_rope_ext_inplace(ctx, cur,
1417
0
                shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
1418
0
                yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
1419
0
    }
1420
1421
0
    return tmp;
1422
0
}
1423
1424
class llm_graph_input_k_shift : public llm_graph_input_i {
1425
public:
1426
0
    llm_graph_input_k_shift(const llama_kv_cache * kv_self) : kv_self(kv_self) {}
1427
    virtual ~llm_graph_input_k_shift() = default;
1428
1429
    void set_input(const llama_ubatch * ubatch) override;
1430
1431
    ggml_tensor * k_shift; // I32 [kv_size*n_stream]
1432
1433
    const llama_kv_cache * kv_self;
1434
};
1435
1436
0
void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) {
1437
0
    GGML_UNUSED(ubatch);
1438
1439
0
    if (k_shift) {
1440
0
        kv_self->set_input_k_shift(k_shift);
1441
0
    }
1442
0
}
1443
1444
0
ggml_cgraph * llama_kv_cache::build_graph_shift(llm_graph_result * res, llama_context * lctx) const {
1445
0
    auto * ctx = res->get_ctx();
1446
0
    auto * gf  = res->get_gf();
1447
1448
0
    const auto & n_embd_head_k = hparams.n_embd_head_k;
1449
  //const auto & n_embd_head_v = hparams.n_embd_head_v;
1450
1451
0
    auto inp = std::make_unique<llm_graph_input_k_shift>(this);
1452
1453
0
    inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, (int64_t) get_size()*n_stream);
1454
0
    ggml_set_input(inp->k_shift);
1455
1456
0
    const auto & cparams = lctx->get_cparams();
1457
1458
0
    for (const auto & layer : layers) {
1459
0
        const uint32_t il = layer.il;
1460
1461
0
        const int64_t n_head_kv    = hparams.n_head_kv(il);
1462
0
        const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
1463
1464
0
        const float freq_base_l  = model.get_rope_freq_base (cparams, il);
1465
0
        const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
1466
1467
0
        ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
1468
1469
0
        ggml_tensor * k =
1470
0
            ggml_view_3d(ctx, layer.k,
1471
0
                n_embd_head_k, n_head_kv, get_size()*n_stream,
1472
0
                ggml_row_size(layer.k->type, n_embd_head_k),
1473
0
                ggml_row_size(layer.k->type, n_embd_k_gqa),
1474
0
                0);
1475
1476
0
        ggml_tensor * cur = build_rope_shift(cparams, ctx, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l);
1477
1478
0
        ggml_build_forward_expand(gf, cur);
1479
0
    }
1480
1481
0
    res->add_input(std::move(inp));
1482
1483
0
    return gf;
1484
0
}
1485
1486
0
bool llama_kv_cache::is_masked_swa(llama_pos p0, llama_pos p1) const {
1487
0
    return llama_hparams::is_masked_swa(n_swa, swa_type, p0, p1);
1488
0
}
1489
1490
0
void llama_kv_cache::state_write(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) const {
1491
0
    GGML_UNUSED(flags);
1492
1493
0
    io.write(&n_stream, sizeof(n_stream));
1494
1495
0
    for (uint32_t s = 0; s < n_stream; ++s) {
1496
0
        cell_ranges_t cr { s, {} };
1497
1498
0
        uint32_t cell_count = 0;
1499
1500
0
        const auto & cells = v_cells[s];
1501
1502
        // Count the number of cells with the specified seq_id
1503
        // Find all the ranges of cells with this seq id (or all, when -1)
1504
0
        uint32_t cell_range_begin = cells.size();
1505
1506
0
        for (uint32_t i = 0; i < cells.size(); ++i) {
1507
0
            if (!cells.is_empty(i) && (seq_id == -1 || cells.seq_has(i, seq_id))) {
1508
0
                ++cell_count;
1509
0
                if (cell_range_begin == cells.size()) {
1510
0
                    cell_range_begin = i;
1511
0
                }
1512
0
            } else {
1513
0
                if (cell_range_begin != cells.size()) {
1514
0
                    cr.data.emplace_back(cell_range_begin, i);
1515
0
                    cell_range_begin = cells.size();
1516
0
                }
1517
0
            }
1518
0
        }
1519
1520
0
        if (cell_range_begin != cells.size()) {
1521
0
            cr.data.emplace_back(cell_range_begin, cells.size());
1522
0
        }
1523
1524
        // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
1525
0
        uint32_t cell_count_check = 0;
1526
0
        for (const auto & range : cr.data) {
1527
0
            cell_count_check += range.second - range.first;
1528
0
        }
1529
0
        GGML_ASSERT(cell_count == cell_count_check);
1530
1531
0
        io.write(&cell_count, sizeof(cell_count));
1532
1533
        // skip empty streams
1534
0
        if (cell_count == 0) {
1535
0
            continue;
1536
0
        }
1537
1538
0
        state_write_meta(io, cr, seq_id);
1539
0
        state_write_data(io, cr);
1540
0
    }
1541
0
}
1542
1543
0
void llama_kv_cache::state_read(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
1544
0
    GGML_UNUSED(flags);
1545
1546
0
    GGML_ASSERT(seq_id == -1 || (seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()));
1547
1548
0
    uint32_t n_stream_cur;
1549
0
    io.read_to(&n_stream_cur, sizeof(n_stream_cur));
1550
0
    if (n_stream_cur != n_stream) {
1551
0
        throw std::runtime_error("n_stream mismatch");
1552
0
    }
1553
1554
0
    for (uint32_t s = 0; s < n_stream; ++s) {
1555
0
        uint32_t cell_count;
1556
0
        io.read_to(&cell_count, sizeof(cell_count));
1557
1558
0
        if (cell_count == 0) {
1559
0
            continue;
1560
0
        }
1561
1562
0
        const uint32_t strm = seq_id == -1 ? s : seq_to_stream[seq_id];
1563
1564
0
        slot_info sinfo;
1565
1566
0
        bool res = true;
1567
0
        res = res && state_read_meta(io, strm, cell_count, sinfo, seq_id);
1568
0
        res = res && state_read_data(io, strm, cell_count, sinfo);
1569
1570
0
        if (!res) {
1571
0
            if (seq_id == -1) {
1572
0
                clear(true);
1573
0
            } else {
1574
0
                seq_rm(seq_id, -1, -1);
1575
0
            }
1576
0
            throw std::runtime_error("failed to restore kv cache");
1577
0
        }
1578
0
    }
1579
0
}
1580
1581
0
void llama_kv_cache::state_write_meta(llama_io_write_i & io, const cell_ranges_t & cr, llama_seq_id seq_id) const {
1582
0
    const auto & cells = v_cells[cr.strm];
1583
1584
0
    for (const auto & range : cr.data) {
1585
0
        for (uint32_t i = range.first; i < range.second; ++i) {
1586
0
            std::vector<llama_seq_id> seq_ids;
1587
1588
0
            for (llama_seq_id cur = 0; cur < (int) n_seq_max; ++cur) {
1589
0
                if (cur == seq_id || seq_id == -1) {
1590
0
                    if (cells.seq_has(i, cur)) {
1591
0
                        seq_ids.push_back(cur);
1592
0
                    }
1593
0
                }
1594
0
            }
1595
1596
0
            const llama_pos pos     = cells.pos_get(i);
1597
0
            const uint32_t n_seq_id = seq_ids.size();
1598
1599
0
            io.write(&pos,      sizeof(pos));
1600
0
            io.write(&n_seq_id, sizeof(n_seq_id));
1601
1602
            // TODO: we also need to save llama_kv_cell_ext when apply_ubatch() support loading it
1603
            //       see: https://github.com/ggml-org/llama.cpp/pull/16825#issuecomment-3460868350
1604
1605
0
            for (const auto & seq_id : seq_ids) {
1606
0
                io.write(&seq_id, sizeof(seq_id));
1607
0
            }
1608
0
        }
1609
0
    }
1610
0
}
1611
1612
0
void llama_kv_cache::state_write_data(llama_io_write_i & io, const cell_ranges_t & cr) const {
1613
0
    const auto & cells = v_cells[cr.strm];
1614
1615
0
    const uint32_t v_trans = this->v_trans ? 1 : 0;
1616
0
    const uint32_t n_layer = layers.size();
1617
1618
0
    io.write(&v_trans, sizeof(v_trans));
1619
0
    io.write(&n_layer, sizeof(n_layer));
1620
1621
0
    std::vector<uint8_t> tmp_buf;
1622
1623
    // Iterate and write all the keys first, each row is a cell
1624
    // Get whole range at a time
1625
0
    for (const auto & layer : layers) {
1626
0
        const uint32_t il = layer.il;
1627
1628
0
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
1629
1630
0
        auto * k = layer.k_stream[cr.strm];
1631
1632
        // Write key type
1633
0
        const int32_t k_type_i = (int32_t) k->type;
1634
0
        io.write(&k_type_i, sizeof(k_type_i));
1635
1636
        // Write row size of key
1637
0
        const uint64_t k_size_row = ggml_row_size(k->type, n_embd_k_gqa);
1638
0
        io.write(&k_size_row, sizeof(k_size_row));
1639
1640
        // Read each range of cells of k_size length each into tmp_buf and write out
1641
0
        for (const auto & range : cr.data) {
1642
0
            const size_t range_size = range.second - range.first;
1643
0
            const size_t buf_size = range_size * k_size_row;
1644
0
            io.write_tensor(k, range.first * k_size_row, buf_size);
1645
0
        }
1646
0
    }
1647
1648
0
    if (!v_trans) {
1649
0
        for (const auto & layer : layers) {
1650
0
            const uint32_t il = layer.il;
1651
1652
0
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
1653
1654
0
            auto * v = layer.v_stream[cr.strm];
1655
1656
            // Write value type
1657
0
            const int32_t v_type_i = (int32_t) v->type;
1658
0
            io.write(&v_type_i, sizeof(v_type_i));
1659
1660
            // Write row size of value
1661
0
            const uint64_t v_size_row = ggml_row_size(v->type, n_embd_v_gqa);
1662
0
            io.write(&v_size_row, sizeof(v_size_row));
1663
1664
            // Read each range of cells of v_size length each into tmp_buf and write out
1665
0
            for (const auto & range : cr.data) {
1666
0
                const size_t range_size = range.second - range.first;
1667
0
                const size_t buf_size = range_size * v_size_row;
1668
0
                io.write_tensor(v, range.first * v_size_row, buf_size);
1669
0
            }
1670
0
        }
1671
0
    } else {
1672
        // When v is transposed, we also need the element size and get the element ranges from each row
1673
0
        const uint32_t kv_size = cells.size();
1674
1675
0
        for (const auto & layer : layers) {
1676
0
            const uint32_t il = layer.il;
1677
1678
0
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
1679
1680
0
            auto * v = layer.v_stream[cr.strm];
1681
1682
            // Write value type
1683
0
            const int32_t v_type_i = (int32_t) v->type;
1684
0
            io.write(&v_type_i, sizeof(v_type_i));
1685
1686
            // Write element size
1687
0
            const uint32_t v_size_el = ggml_type_size(v->type);
1688
0
            io.write(&v_size_el, sizeof(v_size_el));
1689
1690
            // Write GQA embedding size
1691
0
            io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
1692
1693
            // For each row, we get the element values of each cell
1694
0
            for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1695
                // Read each range of cells of v_size_el length each into tmp_buf and write out
1696
0
                for (const auto & range : cr.data) {
1697
0
                    const size_t range_size = range.second - range.first;
1698
0
                    const size_t src_offset = (range.first + j * kv_size) * v_size_el;
1699
0
                    const size_t buf_size = range_size * v_size_el;
1700
0
                    io.write_tensor(v, src_offset, buf_size);
1701
0
                }
1702
0
            }
1703
0
        }
1704
0
    }
1705
0
}
1706
1707
0
bool llama_kv_cache::state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, slot_info & sinfo, llama_seq_id dest_seq_id) {
1708
0
    auto & cells = v_cells[strm];
1709
0
    auto & head  = v_heads[strm];
1710
1711
0
    if (dest_seq_id != -1) {
1712
        // single sequence
1713
0
        seq_rm(dest_seq_id, -1, -1);
1714
1715
0
        llama_batch_allocr balloc(hparams.n_pos_per_embd());
1716
1717
0
        llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
1718
1719
0
        ubatch.seq_id_unq[0] = dest_seq_id;
1720
1721
0
        for (uint32_t i = 0; i < cell_count; ++i) {
1722
0
            llama_pos pos;
1723
0
            uint32_t n_seq_id;
1724
1725
0
            io.read_to(&pos,      sizeof(pos));
1726
0
            io.read_to(&n_seq_id, sizeof(n_seq_id));
1727
1728
0
            if (n_seq_id != 1) {
1729
0
                LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
1730
0
                return false;
1731
0
            }
1732
1733
            // read the sequence id, but directly discard it - we will use dest_seq_id instead
1734
0
            {
1735
0
                llama_seq_id seq_id;
1736
0
                io.read_to(&seq_id, sizeof(seq_id));
1737
0
            }
1738
1739
0
            ubatch.pos[i]      = pos;
1740
0
            ubatch.n_seq_id[i] = n_seq_id;
1741
0
            ubatch.seq_id[i]   = &dest_seq_id;
1742
0
        }
1743
1744
0
        sinfo = find_slot(ubatch, false);
1745
0
        if (sinfo.empty()) {
1746
0
            LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
1747
0
            return false;
1748
0
        }
1749
1750
        // TODO: we cannot yet restore llama_kv_cell_ext as the apply_ubatch() does not support it yet
1751
        //       see: https://github.com/ggml-org/llama.cpp/pull/16825#issuecomment-3460868350
1752
0
        apply_ubatch(sinfo, ubatch);
1753
1754
0
        LLAMA_LOG_DEBUG("%s: cell_count = %d, dest_seq_id = %d\n", __func__, cell_count, dest_seq_id);
1755
1756
        // DEBUG CHECK: verify that all cells were allocated and have correct seq_id and pos values
1757
0
        GGML_ASSERT(sinfo.n_stream() == 1);
1758
0
        GGML_ASSERT(sinfo.idxs[0].size() == cell_count);
1759
0
        for (uint32_t i = 0; i < cell_count; ++i) {
1760
0
            const uint32_t idx = sinfo.idxs[0][i];
1761
0
            GGML_ASSERT(cells.pos_get(idx) == ubatch.pos[i]);
1762
0
            GGML_ASSERT(cells.seq_has(idx, dest_seq_id));
1763
0
        }
1764
0
    } else {
1765
        // whole KV cache restore
1766
1767
0
        if (cell_count > cells.size()) {
1768
0
            LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
1769
0
            return false;
1770
0
        }
1771
1772
0
        clear(true);
1773
1774
0
        for (uint32_t i = 0; i < cell_count; ++i) {
1775
0
            llama_pos pos;
1776
0
            uint32_t  n_seq_id;
1777
1778
0
            io.read_to(&pos,      sizeof(pos));
1779
0
            io.read_to(&n_seq_id, sizeof(n_seq_id));
1780
1781
0
            cells.pos_set(i, pos);
1782
1783
0
            for (uint32_t j = 0; j < n_seq_id; ++j) {
1784
0
                llama_seq_id seq_id;
1785
0
                io.read_to(&seq_id, sizeof(seq_id));
1786
1787
0
                if (seq_id < 0 || (uint32_t) seq_id >= n_seq_max) {
1788
0
                    LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, n_seq_max);
1789
0
                    return false;
1790
0
                }
1791
1792
0
                cells.seq_add(i, seq_id);
1793
0
            }
1794
0
        }
1795
1796
        // Create contiguous slot_info for whole cache restore
1797
0
        sinfo.s0 = strm;
1798
0
        sinfo.s1 = strm;
1799
0
        sinfo.resize(1);
1800
0
        sinfo.strm[0] = strm;
1801
0
        sinfo.idxs[0].resize(cell_count);
1802
0
        for (uint32_t i = 0; i < cell_count; ++i) {
1803
0
            sinfo.idxs[0][i] = i;
1804
0
        }
1805
1806
0
        head = 0;
1807
0
    }
1808
1809
0
    return true;
1810
0
}
1811
1812
0
bool llama_kv_cache::state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, const slot_info & sinfo) {
1813
0
    auto & cells = v_cells[strm];
1814
1815
0
    uint32_t v_trans;
1816
0
    uint32_t n_layer;
1817
1818
0
    io.read_to(&v_trans, sizeof(v_trans));
1819
0
    io.read_to(&n_layer, sizeof(n_layer));
1820
1821
0
    if (n_layer != layers.size()) {
1822
0
        LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, (uint32_t) layers.size());
1823
0
        return false;
1824
0
    }
1825
1826
0
    if (cell_count > cells.size()) {
1827
0
        LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, cells.size());
1828
0
        return false;
1829
0
    }
1830
1831
0
    if (this->v_trans != (bool) v_trans) {
1832
0
        LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
1833
0
        return false;
1834
0
    }
1835
1836
    // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
1837
0
    for (const auto & layer : layers) {
1838
0
        const uint32_t il = layer.il;
1839
1840
0
        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
1841
1842
0
        auto * k = layer.k_stream[strm];
1843
1844
        // Read type of key
1845
0
        int32_t k_type_i_ref;
1846
0
        io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
1847
0
        const int32_t k_type_i = (int32_t) k->type;
1848
0
        if (k_type_i != k_type_i_ref) {
1849
0
            LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
1850
0
            return false;
1851
0
        }
1852
1853
        // Read row size of key
1854
0
        uint64_t k_size_row_ref;
1855
0
        io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
1856
0
        const size_t k_size_row = ggml_row_size(k->type, n_embd_k_gqa);
1857
0
        if (k_size_row != k_size_row_ref) {
1858
0
            LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
1859
0
            return false;
1860
0
        }
1861
1862
0
        if (cell_count) {
1863
0
            if (sinfo.is_contiguous()) {
1864
                // Fast path: contiguous cells, single memcpy
1865
0
                ggml_backend_tensor_set(k, io.read(cell_count * k_size_row), sinfo.head() * k_size_row, cell_count * k_size_row);
1866
0
            } else {
1867
                // Slow path: scatter to non-contiguous positions
1868
0
                const void * src = io.read(cell_count * k_size_row);
1869
0
                for (uint32_t i = 0; i < cell_count; ++i) {
1870
0
                    const size_t dst_offset = sinfo.idxs[0][i] * k_size_row;
1871
0
                    ggml_backend_tensor_set(k, (const char*)src + i * k_size_row, dst_offset, k_size_row);
1872
0
                }
1873
0
            }
1874
0
        }
1875
0
    }
1876
1877
0
    if (!this->v_trans) {
1878
0
        for (const auto & layer : layers) {
1879
0
            const uint32_t il = layer.il;
1880
1881
0
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
1882
1883
0
            auto * v = layer.v_stream[strm];
1884
1885
            // Read type of value
1886
0
            int32_t v_type_i_ref;
1887
0
            io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
1888
0
            const int32_t v_type_i = (int32_t) v->type;
1889
0
            if (v_type_i != v_type_i_ref) {
1890
0
                LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
1891
0
                return false;
1892
0
            }
1893
1894
            // Read row size of value
1895
0
            uint64_t v_size_row_ref;
1896
0
            io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
1897
0
            const size_t v_size_row = ggml_row_size(v->type, n_embd_v_gqa);
1898
0
            if (v_size_row != v_size_row_ref) {
1899
0
                LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
1900
0
                return false;
1901
0
            }
1902
1903
0
            if (cell_count) {
1904
0
                if (sinfo.is_contiguous()) {
1905
                    // Fast path: contiguous cells, single memcpy
1906
0
                    ggml_backend_tensor_set(v, io.read(cell_count * v_size_row), sinfo.head() * v_size_row, cell_count * v_size_row);
1907
0
                } else {
1908
                    // Slow path: scatter to non-contiguous positions
1909
0
                    const void * src = io.read(cell_count * v_size_row);
1910
0
                    for (uint32_t i = 0; i < cell_count; ++i) {
1911
0
                        const size_t dst_offset = sinfo.idxs[0][i] * v_size_row;
1912
0
                        ggml_backend_tensor_set(v, (const char*)src + i * v_size_row, dst_offset, v_size_row);
1913
0
                    }
1914
0
                }
1915
0
            }
1916
0
        }
1917
0
    } else {
1918
        // For each layer, read the values for each cell (transposed)
1919
0
        for (const auto & layer : layers) {
1920
0
            const uint32_t il = layer.il;
1921
1922
0
            const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
1923
1924
0
            auto * v = layer.v_stream[strm];
1925
1926
            // Read type of value
1927
0
            int32_t v_type_i_ref;
1928
0
            io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
1929
0
            const int32_t v_type_i = (int32_t) v->type;
1930
0
            if (v_type_i != v_type_i_ref) {
1931
0
                LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
1932
0
                return false;
1933
0
            }
1934
1935
            // Read element size of value
1936
0
            uint32_t v_size_el_ref;
1937
0
            io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
1938
0
            const size_t v_size_el = ggml_type_size(v->type);
1939
0
            if (v_size_el != v_size_el_ref) {
1940
0
                LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
1941
0
                return false;
1942
0
            }
1943
1944
            // Read GQA embedding size
1945
0
            uint32_t n_embd_v_gqa_ref;
1946
0
            io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
1947
0
            if (n_embd_v_gqa != n_embd_v_gqa_ref) {
1948
0
                LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
1949
0
                return false;
1950
0
            }
1951
1952
0
            if (cell_count) {
1953
0
                if (sinfo.is_contiguous()) {
1954
                    // Fast path: contiguous cells
1955
0
                    const uint32_t h = sinfo.head();
1956
0
                    for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1957
0
                        const size_t dst_offset = (h + j * cells.size()) * v_size_el;
1958
0
                        ggml_backend_tensor_set(v, io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
1959
0
                    }
1960
0
                } else {
1961
                    // Slow path: scatter to non-contiguous positions
1962
0
                    for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
1963
0
                        const void * src = io.read(cell_count * v_size_el);
1964
0
                        for (uint32_t i = 0; i < cell_count; ++i) {
1965
0
                            const size_t dst_offset = (sinfo.idxs[0][i] + j * cells.size()) * v_size_el;
1966
0
                            ggml_backend_tensor_set(v, (const char*)src + i * v_size_el, dst_offset, v_size_el);
1967
0
                        }
1968
0
                    }
1969
0
                }
1970
0
            }
1971
0
        }
1972
0
    }
1973
1974
0
    return true;
1975
0
}
1976
1977
//
1978
// llama_kv_cache_context
1979
//
1980
1981
0
llama_kv_cache_context::llama_kv_cache_context(llama_memory_status status) : status(status) {}
1982
1983
llama_kv_cache_context::llama_kv_cache_context(
1984
0
        llama_kv_cache * kv) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv) {
1985
0
    n_kv = kv->get_size();
1986
1987
0
    const uint32_t n_stream = kv->get_n_stream();
1988
1989
    // create a dummy slot info - the actual data is irrelevant. we just need to build the graph
1990
0
    sinfos.resize(1);
1991
0
    sinfos[0].s0 = 0;
1992
0
    sinfos[0].s1 = n_stream - 1;
1993
0
    sinfos[0].idxs.resize(n_stream);
1994
0
    for (uint32_t s = 0; s < n_stream; ++s) {
1995
0
        sinfos[0].strm.push_back(s);
1996
0
        sinfos[0].idxs[s].resize(1, 0);
1997
0
    }
1998
0
}
1999
2000
llama_kv_cache_context::llama_kv_cache_context(
2001
        llama_kv_cache * kv,
2002
        llama_context * lctx,
2003
        bool do_shift,
2004
0
        stream_copy_info sc_info) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), lctx(lctx), do_shift(do_shift), sc_info(std::move(sc_info)) {
2005
0
    if (!do_shift && this->sc_info.empty()) {
2006
0
        status = LLAMA_MEMORY_STATUS_NO_UPDATE;
2007
0
    }
2008
0
}
2009
2010
llama_kv_cache_context::llama_kv_cache_context(
2011
        llama_kv_cache * kv,
2012
        llama_kv_cache::slot_info_vec_t sinfos,
2013
0
        std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), sinfos(std::move(sinfos)), ubatches(std::move(ubatches)) {
2014
0
}
2015
2016
0
llama_kv_cache_context::~llama_kv_cache_context() = default;
2017
2018
0
bool llama_kv_cache_context::next() {
2019
0
    assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
2020
2021
0
    if (++i_cur >= ubatches.size()) {
2022
0
        return false;
2023
0
    }
2024
2025
0
    return true;
2026
0
}
2027
2028
0
bool llama_kv_cache_context::apply() {
2029
0
    assert(!llama_memory_status_is_fail(status));
2030
2031
    // no ubatches -> this is a KV cache update
2032
0
    if (ubatches.empty()) {
2033
0
        kv->update(lctx, do_shift, sc_info);
2034
2035
0
        return true;
2036
0
    }
2037
2038
0
    kv->apply_ubatch(sinfos[i_cur], ubatches[i_cur]);
2039
0
    n_kv = kv->get_n_kv(sinfos[i_cur]);
2040
2041
0
    return true;
2042
0
}
2043
2044
0
llama_memory_status llama_kv_cache_context::get_status() const {
2045
0
    return status;
2046
0
}
2047
2048
0
const llama_ubatch & llama_kv_cache_context::get_ubatch() const {
2049
0
    assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
2050
2051
0
    return ubatches[i_cur];
2052
0
}
2053
2054
0
uint32_t llama_kv_cache_context::get_n_kv() const {
2055
0
    return n_kv;
2056
0
}
2057
2058
0
ggml_tensor * llama_kv_cache_context::get_k(ggml_context * ctx, int32_t il) const {
2059
0
    return kv->get_k(ctx, il, n_kv, sinfos[i_cur]);
2060
0
}
2061
2062
0
ggml_tensor * llama_kv_cache_context::get_v(ggml_context * ctx, int32_t il) const {
2063
0
    return kv->get_v(ctx, il, n_kv, sinfos[i_cur]);
2064
0
}
2065
2066
0
ggml_tensor * llama_kv_cache_context::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il) const {
2067
0
    return kv->cpy_k(ctx, k_cur, k_idxs, il, sinfos[i_cur]);
2068
0
}
2069
2070
0
ggml_tensor * llama_kv_cache_context::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il) const {
2071
0
    return kv->cpy_v(ctx, v_cur, v_idxs, il, sinfos[i_cur]);
2072
0
}
2073
2074
0
ggml_tensor * llama_kv_cache_context::build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const {
2075
0
    return kv->build_input_k_idxs(ctx, ubatch);
2076
0
}
2077
2078
0
ggml_tensor * llama_kv_cache_context::build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const {
2079
0
    return kv->build_input_v_idxs(ctx, ubatch);
2080
0
}
2081
2082
0
void llama_kv_cache_context::set_input_k_shift(ggml_tensor * dst) const {
2083
0
    kv->set_input_k_shift(dst);
2084
0
}
2085
2086
0
void llama_kv_cache_context::set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const {
2087
0
    kv->set_input_k_idxs(dst, ubatch, sinfos[i_cur]);
2088
0
}
2089
2090
0
void llama_kv_cache_context::set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const {
2091
0
    kv->set_input_v_idxs(dst, ubatch, sinfos[i_cur]);
2092
0
}
2093
2094
0
void llama_kv_cache_context::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
2095
0
    kv->set_input_kq_mask(dst, ubatch, causal_attn);
2096
0
}
2097
2098
0
void llama_kv_cache_context::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const {
2099
0
    kv->set_input_pos_bucket(dst, ubatch);
2100
0
}