/src/llama.cpp/src/llama-memory-recurrent.cpp
Line | Count | Source |
1 | | #include "llama-memory-recurrent.h" |
2 | | |
3 | | #include "llama-impl.h" |
4 | | #include "llama-io.h" |
5 | | #include "llama-batch.h" |
6 | | #include "llama-model.h" |
7 | | |
8 | | #include <algorithm> |
9 | | #include <cassert> |
10 | | #include <cstring> |
11 | | #include <limits> |
12 | | #include <map> |
13 | | #include <stdexcept> |
14 | | |
15 | | // |
16 | | // llama_memory_recurrent |
17 | | // |
18 | | |
19 | | llama_memory_recurrent::llama_memory_recurrent( |
20 | | const llama_model & model, |
21 | | ggml_type type_r, |
22 | | ggml_type type_s, |
23 | | bool offload, |
24 | | uint32_t mem_size, |
25 | | uint32_t n_seq_max, |
26 | 0 | const layer_filter_cb & filter) : hparams(model.hparams), n_seq_max(n_seq_max) { |
27 | 0 | const int32_t n_layer = hparams.n_layer; |
28 | |
|
29 | 0 | head = 0; |
30 | 0 | size = mem_size; |
31 | 0 | used = 0; |
32 | |
|
33 | 0 | cells.clear(); |
34 | 0 | cells.resize(mem_size); |
35 | | |
36 | | // define a comparator for the buft -> ctx map to ensure that the order is well-defined: |
37 | 0 | struct ggml_backend_buft_comparator { |
38 | 0 | bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const { |
39 | 0 | return strcmp(ggml_backend_buft_name(lhs), ggml_backend_buft_name(rhs)) < 0; |
40 | 0 | } |
41 | 0 | }; |
42 | 0 | std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map; |
43 | | |
44 | | // create a context for each buffer type |
45 | 0 | auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { |
46 | 0 | auto it = ctx_map.find(buft); |
47 | 0 | if (it == ctx_map.end()) { |
48 | 0 | ggml_init_params params = { |
49 | 0 | /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()), |
50 | 0 | /*.mem_buffer =*/ NULL, |
51 | 0 | /*.no_alloc =*/ true, |
52 | 0 | }; |
53 | |
|
54 | 0 | ggml_context * ctx = ggml_init(params); |
55 | 0 | if (!ctx) { |
56 | 0 | return nullptr; |
57 | 0 | } |
58 | | |
59 | 0 | ctx_map.emplace(buft, ctx); |
60 | |
|
61 | 0 | return ctx; |
62 | 0 | } |
63 | | |
64 | 0 | return it->second.get(); |
65 | 0 | }; |
66 | |
|
67 | 0 | r_l.resize(n_layer); |
68 | 0 | s_l.resize(n_layer); |
69 | |
|
70 | 0 | for (int i = 0; i < n_layer; i++) { |
71 | 0 | if (filter && !filter(i)) { |
72 | 0 | LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, i); |
73 | 0 | continue; |
74 | 0 | } |
75 | | |
76 | 0 | const char * dev_name = "CPU"; |
77 | |
|
78 | 0 | ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type(); |
79 | |
|
80 | 0 | if (offload) { |
81 | 0 | auto * dev = model.dev_layer(i); |
82 | 0 | buft = ggml_backend_dev_buffer_type(dev); |
83 | |
|
84 | 0 | dev_name = ggml_backend_dev_name(dev); |
85 | 0 | } |
86 | |
|
87 | 0 | LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name); |
88 | |
|
89 | 0 | ggml_context * ctx = ctx_for_buft(buft); |
90 | 0 | if (!ctx) { |
91 | 0 | throw std::runtime_error("failed to create ggml context for rs cache"); |
92 | 0 | } |
93 | | |
94 | 0 | ggml_tensor * r = ggml_new_tensor_1d(ctx, type_r, hparams.n_embd_r()*mem_size); |
95 | 0 | ggml_tensor * s = ggml_new_tensor_1d(ctx, type_s, hparams.n_embd_s()*mem_size); |
96 | 0 | ggml_format_name(r, "cache_r_l%d", i); |
97 | 0 | ggml_format_name(s, "cache_s_l%d", i); |
98 | 0 | r_l[i] = r; |
99 | 0 | s_l[i] = s; |
100 | 0 | } |
101 | | |
102 | | // allocate tensors and initialize the buffers to avoid NaNs in the padding |
103 | 0 | for (auto & [buft, ctx] : ctx_map) { |
104 | 0 | ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx.get(), buft); |
105 | 0 | if (!buf) { |
106 | 0 | throw std::runtime_error("failed to allocate buffer for rs cache"); |
107 | 0 | } |
108 | 0 | ggml_backend_buffer_clear(buf, 0); |
109 | 0 | LLAMA_LOG_INFO("%s: %10s RS buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); |
110 | 0 | ctxs_bufs.emplace_back(std::move(ctx), buf); |
111 | 0 | } |
112 | | |
113 | 0 | { |
114 | 0 | const size_t memory_size_r = size_r_bytes(); |
115 | 0 | const size_t memory_size_s = size_s_bytes(); |
116 | |
|
117 | 0 | LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u seqs), R (%s): %7.2f MiB, S (%s): %7.2f MiB\n", __func__, |
118 | 0 | (float)(memory_size_r + memory_size_s) / (1024.0f * 1024.0f), mem_size, n_layer, n_seq_max, |
119 | 0 | ggml_type_name(type_r), (float)memory_size_r / (1024.0f * 1024.0f), |
120 | 0 | ggml_type_name(type_s), (float)memory_size_s / (1024.0f * 1024.0f)); |
121 | 0 | } |
122 | 0 | } |
123 | | |
124 | 0 | void llama_memory_recurrent::clear(bool data) { |
125 | 0 | for (int32_t i = 0; i < (int32_t) size; ++i) { |
126 | 0 | cells[i].pos = -1; |
127 | 0 | cells[i].seq_id.clear(); |
128 | 0 | cells[i].src = -1; |
129 | 0 | cells[i].tail = -1; |
130 | 0 | } |
131 | |
|
132 | 0 | head = 0; |
133 | 0 | used = 0; |
134 | |
|
135 | 0 | if (data) { |
136 | 0 | for (auto & [_, buf] : ctxs_bufs) { |
137 | 0 | ggml_backend_buffer_clear(buf.get(), 0); |
138 | 0 | } |
139 | 0 | } |
140 | 0 | } |
141 | | |
142 | 0 | bool llama_memory_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { |
143 | | //printf("[DEBUG] calling llama_memory_recurrent::seq_rm` with `seq_id=%d, p0=%d, p1=%d`\n", seq_id, p0, p1); |
144 | 0 | uint32_t new_head = size; |
145 | |
|
146 | 0 | if (p0 < 0) { |
147 | 0 | p0 = 0; |
148 | 0 | } |
149 | |
|
150 | 0 | if (p1 < 0) { |
151 | 0 | p1 = std::numeric_limits<llama_pos>::max(); |
152 | 0 | } |
153 | | |
154 | | // models like Mamba or RWKV can't have a state partially erased at the end |
155 | | // of the sequence because their state isn't preserved for previous tokens |
156 | 0 | if (seq_id >= (int64_t) size) { |
157 | | // could be fatal |
158 | 0 | return false; |
159 | 0 | } |
160 | 0 | if (0 <= seq_id) { |
161 | 0 | int32_t & tail_id = cells[seq_id].tail; |
162 | 0 | if (tail_id >= 0) { |
163 | 0 | const auto & cell = cells[tail_id]; |
164 | | // partial intersection is invalid if it includes the final pos |
165 | 0 | if (0 < p0 && p0 <= cell.pos && p1 > cell.pos) { |
166 | | //printf("[DEBUG] inside `llama_memory_recurrent::seq_rm`: partial intersection is invalid, so returning false\n"); |
167 | 0 | return false; |
168 | 0 | } |
169 | | // invalidate tails which will be cleared |
170 | 0 | if (p0 <= cell.pos && cell.pos < p1) { |
171 | 0 | tail_id = -1; |
172 | 0 | } |
173 | 0 | } |
174 | 0 | } else { |
175 | | // seq_id is negative, then the range should include everything or nothing |
176 | 0 | if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) { |
177 | | //printf("[DEBUG] inside `llama_memory_recurrent::seq_rm`: `seq_id` is negative, so returning false\n"); |
178 | 0 | return false; |
179 | 0 | } |
180 | 0 | } |
181 | | |
182 | 0 | for (uint32_t i = 0; i < size; ++i) { |
183 | 0 | if (cells[i].pos >= p0 && cells[i].pos < p1) { |
184 | 0 | if (seq_id < 0) { |
185 | 0 | cells[i].seq_id.clear(); |
186 | 0 | } else if (cells[i].has_seq_id(seq_id)) { |
187 | 0 | cells[i].seq_id.erase(seq_id); |
188 | 0 | } else { |
189 | 0 | continue; |
190 | 0 | } |
191 | 0 | if (cells[i].is_empty()) { |
192 | | // keep count of the number of used cells |
193 | 0 | if (cells[i].pos >= 0) { |
194 | 0 | used--; |
195 | 0 | } |
196 | 0 | cells[i].pos = -1; |
197 | 0 | cells[i].src = -1; |
198 | 0 | if (new_head == size) { |
199 | 0 | new_head = i; |
200 | 0 | } |
201 | 0 | } |
202 | 0 | } |
203 | 0 | } |
204 | | |
205 | | // If we freed up a slot, set head to it so searching can start there. |
206 | 0 | if (new_head != size && new_head < head) { |
207 | 0 | head = new_head; |
208 | 0 | } |
209 | |
|
210 | 0 | return true; |
211 | 0 | } |
212 | | |
213 | 0 | void llama_memory_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { |
214 | 0 | if (seq_id_src == seq_id_dst) { |
215 | 0 | return; |
216 | 0 | } |
217 | | |
218 | 0 | if (p0 < 0) { |
219 | 0 | p0 = 0; |
220 | 0 | } |
221 | |
|
222 | 0 | if (p1 < 0) { |
223 | 0 | p1 = std::numeric_limits<llama_pos>::max(); |
224 | 0 | } |
225 | |
|
226 | 0 | if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) { |
227 | 0 | auto & tail_src = cells[seq_id_src]; |
228 | 0 | auto & tail_dst = cells[seq_id_dst]; |
229 | 0 | if (tail_dst.tail >= 0) { |
230 | | // clear destination seq_id if it wasn't empty |
231 | 0 | auto & cell_dst = cells[tail_dst.tail]; |
232 | |
|
233 | 0 | cell_dst.seq_id.erase(seq_id_dst); |
234 | 0 | tail_dst.tail = -1; |
235 | 0 | if (cell_dst.seq_id.empty()) { |
236 | 0 | cell_dst.pos = -1; |
237 | 0 | cell_dst.src = -1; |
238 | 0 | used -= 1; |
239 | 0 | } |
240 | 0 | } |
241 | 0 | if (tail_src.tail >= 0) { |
242 | 0 | auto & cell_src = cells[tail_src.tail]; |
243 | |
|
244 | 0 | cell_src.seq_id.insert(seq_id_dst); |
245 | 0 | tail_dst.tail = tail_src.tail; |
246 | 0 | } |
247 | 0 | } |
248 | 0 | } |
249 | | |
250 | 0 | void llama_memory_recurrent::seq_keep(llama_seq_id seq_id) { |
251 | 0 | uint32_t new_head = size; |
252 | |
|
253 | 0 | for (uint32_t i = 0; i < size; ++i) { |
254 | 0 | if ((llama_seq_id) i != seq_id) { |
255 | 0 | cells[i].tail = -1; |
256 | 0 | } |
257 | |
|
258 | 0 | if (!cells[i].has_seq_id(seq_id)) { |
259 | 0 | if (cells[i].pos >= 0) { |
260 | 0 | used--; |
261 | 0 | } |
262 | |
|
263 | 0 | cells[i].pos = -1; |
264 | 0 | cells[i].src = -1; |
265 | 0 | cells[i].seq_id.clear(); |
266 | |
|
267 | 0 | if (new_head == size){ |
268 | 0 | new_head = i; |
269 | 0 | } |
270 | 0 | } else { |
271 | 0 | cells[i].seq_id.clear(); |
272 | 0 | cells[i].seq_id.insert(seq_id); |
273 | 0 | } |
274 | 0 | } |
275 | | |
276 | | // If we freed up a slot, set head to it so searching can start there. |
277 | 0 | if (new_head != size && new_head < head) { |
278 | 0 | head = new_head; |
279 | 0 | } |
280 | 0 | } |
281 | | |
282 | 0 | void llama_memory_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) { |
283 | 0 | if (shift == 0) { |
284 | 0 | return; |
285 | 0 | } |
286 | | |
287 | 0 | if (p0 < 0) { |
288 | 0 | p0 = 0; |
289 | 0 | } |
290 | |
|
291 | 0 | if (p1 < 0) { |
292 | 0 | p1 = std::numeric_limits<llama_pos>::max(); |
293 | 0 | } |
294 | | |
295 | | // If there is no range then return early to avoid looping over the |
296 | 0 | if (p0 == p1) { |
297 | 0 | return; |
298 | 0 | } |
299 | | |
300 | | // for Mamba-like or RWKV models, only the pos needs to be shifted |
301 | 0 | if (0 <= seq_id && seq_id < (int64_t) size) { |
302 | 0 | const int32_t tail_id = cells[seq_id].tail; |
303 | 0 | if (tail_id >= 0) { |
304 | 0 | auto & cell = cells[tail_id]; |
305 | 0 | if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { |
306 | 0 | cell.pos += shift; |
307 | 0 | } |
308 | 0 | } |
309 | 0 | } |
310 | 0 | } |
311 | | |
312 | 0 | void llama_memory_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { |
313 | 0 | if (d == 1) { |
314 | 0 | return; |
315 | 0 | } |
316 | | |
317 | 0 | if (p0 < 0) { |
318 | 0 | p0 = 0; |
319 | 0 | } |
320 | |
|
321 | 0 | if (p1 < 0) { |
322 | 0 | p1 = std::numeric_limits<llama_pos>::max(); |
323 | 0 | } |
324 | | |
325 | | // If there is no range then return early to avoid looping over the cache. |
326 | 0 | if (p0 == p1) { |
327 | 0 | return; |
328 | 0 | } |
329 | | |
330 | | // for Mamba-like or RWKV models, only the pos needs to be changed |
331 | 0 | if (0 <= seq_id && seq_id < (int64_t) size) { |
332 | 0 | const int32_t tail_id = cells[seq_id].tail; |
333 | 0 | if (tail_id >= 0) { |
334 | 0 | auto & cell = cells[tail_id]; |
335 | 0 | if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { |
336 | 0 | cell.pos /= d; |
337 | 0 | } |
338 | 0 | } |
339 | 0 | } |
340 | 0 | } |
341 | | |
342 | 0 | llama_pos llama_memory_recurrent::seq_pos_min(llama_seq_id seq_id) const { |
343 | 0 | llama_pos result = std::numeric_limits<llama_pos>::max(); |
344 | |
|
345 | 0 | for (uint32_t i = 0; i < size; ++i) { |
346 | 0 | if (cells[i].has_seq_id(seq_id)) { |
347 | 0 | result = std::min(result, cells[i].pos); |
348 | 0 | } |
349 | 0 | } |
350 | |
|
351 | 0 | if (result == std::numeric_limits<llama_pos>::max()) { |
352 | 0 | result = -1; |
353 | 0 | } |
354 | |
|
355 | 0 | return result; |
356 | 0 | } |
357 | | |
358 | 0 | llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const { |
359 | 0 | llama_pos result = -1; |
360 | |
|
361 | 0 | for (uint32_t i = 0; i < size; ++i) { |
362 | 0 | if (cells[i].has_seq_id(seq_id)) { |
363 | 0 | result = std::max(result, cells[i].pos); |
364 | 0 | } |
365 | 0 | } |
366 | |
|
367 | 0 | return result; |
368 | 0 | } |
369 | | |
370 | 0 | std::map<ggml_backend_buffer_type_t, size_t> llama_memory_recurrent::memory_breakdown() const { |
371 | 0 | std::map<ggml_backend_buffer_type_t, size_t> ret; |
372 | 0 | for (const auto & [_, buf] : ctxs_bufs) { |
373 | 0 | ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get()); |
374 | 0 | } |
375 | 0 | return ret; |
376 | 0 | } |
377 | | |
378 | 0 | llama_memory_context_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { |
379 | 0 | do { |
380 | 0 | balloc.split_reset(); |
381 | |
|
382 | 0 | std::vector<llama_ubatch> ubatches; |
383 | 0 | while (true) { |
384 | 0 | llama_ubatch ubatch; |
385 | |
|
386 | 0 | if (embd_all) { |
387 | | // if all tokens are output, split by sequence |
388 | 0 | ubatch = balloc.split_seq(n_ubatch); |
389 | 0 | } else { |
390 | | // TODO: non-sequential equal split can be done if using unified KV cache |
391 | | // for simplicity, we always use sequential equal split for now |
392 | 0 | ubatch = balloc.split_equal(n_ubatch, true); |
393 | 0 | } |
394 | |
|
395 | 0 | if (ubatch.n_tokens == 0) { |
396 | 0 | break; |
397 | 0 | } |
398 | | |
399 | 0 | ubatches.push_back(std::move(ubatch)); // NOLINT |
400 | 0 | } |
401 | |
|
402 | 0 | if (balloc.get_n_used() < balloc.get_n_tokens()) { |
403 | | // failed to find a suitable split |
404 | 0 | break; |
405 | 0 | } |
406 | | |
407 | 0 | if (!prepare(ubatches)) { |
408 | 0 | break; |
409 | 0 | } |
410 | | |
411 | 0 | return std::make_unique<llama_memory_recurrent_context>(this, std::move(ubatches)); |
412 | 0 | } while (false); |
413 | | |
414 | 0 | return std::make_unique<llama_memory_recurrent_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE); |
415 | 0 | } |
416 | | |
417 | 0 | llama_memory_context_ptr llama_memory_recurrent::init_full() { |
418 | 0 | return std::make_unique<llama_memory_recurrent_context>(this); |
419 | 0 | } |
420 | | |
421 | 0 | llama_memory_context_ptr llama_memory_recurrent::init_update(llama_context * lctx, bool optimize) { |
422 | 0 | GGML_UNUSED(lctx); |
423 | 0 | GGML_UNUSED(optimize); |
424 | |
|
425 | 0 | return std::make_unique<llama_memory_recurrent_context>(LLAMA_MEMORY_STATUS_NO_UPDATE); |
426 | 0 | } |
427 | | |
428 | 0 | bool llama_memory_recurrent::prepare(const std::vector<llama_ubatch> & ubatches) { |
429 | | // simply remember the full state because it is very small for this type of cache |
430 | | // TODO: optimize |
431 | 0 | auto org_cells = cells; |
432 | 0 | auto org_used = used; |
433 | 0 | auto org_head = head; |
434 | |
|
435 | 0 | bool success = true; |
436 | |
|
437 | 0 | for (const auto & ubatch : ubatches) { |
438 | 0 | if (!find_slot(ubatch)) { |
439 | 0 | success = false; |
440 | 0 | break; |
441 | 0 | } |
442 | 0 | } |
443 | | |
444 | | // restore the original state |
445 | 0 | cells = std::move(org_cells); |
446 | 0 | used = org_used; |
447 | 0 | head = org_head; |
448 | |
|
449 | 0 | return success; |
450 | 0 | } |
451 | | |
452 | 0 | bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { |
453 | 0 | const uint32_t n_seq_tokens = ubatch.n_seq_tokens; |
454 | 0 | const uint32_t n_seqs = ubatch.n_seqs; |
455 | | |
456 | | // if we have enough unused cells before the current head -> |
457 | | // better to start searching from the beginning of the cache, hoping to fill it |
458 | 0 | if (head > used + 2*n_seqs) { |
459 | 0 | head = 0; |
460 | 0 | } |
461 | | |
462 | | // For recurrent state architectures (like Mamba or RWKV), |
463 | | // each cache cell can store the state for a whole sequence. |
464 | | // A slot should be always be contiguous. |
465 | | |
466 | | // can only process batches with an equal number of new tokens in each sequence |
467 | 0 | GGML_ASSERT(ubatch.equal_seqs()); |
468 | |
|
469 | 0 | int32_t min = size - 1; |
470 | 0 | int32_t max = 0; |
471 | | |
472 | | // everything should fit if all seq_ids are smaller than the max |
473 | 0 | for (uint32_t s = 0; s < n_seqs; ++s) { |
474 | 0 | const uint32_t i = s*n_seq_tokens; // first token of sequence set s |
475 | 0 | const uint32_t n_seq_id = ubatch.n_seq_id[i]; |
476 | |
|
477 | 0 | for (uint32_t j = 0; j < n_seq_id; ++j) { |
478 | 0 | const llama_seq_id seq_id = ubatch.seq_id[i][j]; |
479 | |
|
480 | 0 | if (seq_id < 0 || (uint32_t) seq_id >= size) { |
481 | | // too big seq_id |
482 | | // TODO: would it be possible to resize the cache instead? |
483 | 0 | LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%u Try using a bigger --parallel value\n", __func__, seq_id, n_seq_max); |
484 | 0 | return false; |
485 | 0 | } |
486 | 0 | if (j > 0) { |
487 | 0 | auto & seq = cells[seq_id]; |
488 | 0 | if (seq.tail >= 0) { |
489 | 0 | auto & cell = cells[seq.tail]; |
490 | | // clear cells from seq_ids that become shared |
491 | | // (should not normally happen, but let's handle it anyway) |
492 | 0 | cell.seq_id.erase(seq_id); |
493 | 0 | seq.tail = -1; |
494 | 0 | if (cell.seq_id.empty()) { |
495 | 0 | cell.pos = -1; |
496 | 0 | cell.src = -1; |
497 | 0 | used -= 1; |
498 | 0 | } |
499 | 0 | } |
500 | 0 | } |
501 | 0 | } |
502 | 0 | } |
503 | | |
504 | | #ifndef NDEBUG |
505 | | { |
506 | | std::vector<int32_t> tails_verif; |
507 | | tails_verif.assign(size, -1); |
508 | | for (uint32_t i = 0; i < size; ++i) { |
509 | | auto & cell = cells[i]; |
510 | | for (llama_seq_id seq_id : cell.seq_id) { |
511 | | if (tails_verif[seq_id] != -1) { |
512 | | LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]); |
513 | | } |
514 | | tails_verif[seq_id] = i; |
515 | | } |
516 | | } |
517 | | for (uint32_t i = 0; i < size; ++i) { |
518 | | if (tails_verif[i] != cells[i].tail) { |
519 | | LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]); |
520 | | } |
521 | | } |
522 | | } |
523 | | #endif |
524 | | |
525 | | // find next empty cell |
526 | 0 | uint32_t next_empty_cell = head; |
527 | |
|
528 | 0 | for (uint32_t i = 0; i < size; ++i) { |
529 | 0 | if (next_empty_cell >= size) { next_empty_cell -= size; } |
530 | 0 | auto & cell = cells[next_empty_cell]; |
531 | 0 | if (cell.is_empty()) { break; } |
532 | 0 | next_empty_cell += 1; |
533 | 0 | } |
534 | | |
535 | | // find usable cell range |
536 | 0 | for (uint32_t s = 0; s < n_seqs; ++s) { |
537 | 0 | const uint32_t i = s*n_seq_tokens; |
538 | 0 | const llama_seq_id seq_id = ubatch.seq_id[i][0]; |
539 | 0 | auto & seq_meta = cells[seq_id]; |
540 | 0 | bool has_cell = false; |
541 | 0 | if (seq_meta.tail >= 0) { |
542 | 0 | auto & cell = cells[seq_meta.tail]; |
543 | 0 | GGML_ASSERT(cell.has_seq_id(seq_id)); |
544 | | // does this seq_id "own" the cell? |
545 | 0 | if (cell.seq_id.size() == 1) { has_cell = true; } |
546 | 0 | } |
547 | 0 | if (!has_cell) { |
548 | 0 | auto & empty_cell = cells[next_empty_cell]; |
549 | 0 | GGML_ASSERT(empty_cell.is_empty()); |
550 | | // copy old tail into the empty cell |
551 | 0 | if (seq_meta.tail >= 0) { |
552 | 0 | auto & orig_cell = cells[seq_meta.tail]; |
553 | 0 | empty_cell.pos = orig_cell.pos; |
554 | 0 | empty_cell.src = orig_cell.src; |
555 | 0 | orig_cell.seq_id.erase(seq_id); |
556 | 0 | empty_cell.seq_id.insert(seq_id); // will be overwritten |
557 | 0 | GGML_ASSERT(!orig_cell.is_empty()); // has at least one remaining seq_id |
558 | 0 | } |
559 | 0 | seq_meta.tail = next_empty_cell; |
560 | | // find next empty cell |
561 | 0 | if (s + 1 < n_seqs) { |
562 | 0 | for (uint32_t j = 0; j < size; ++j) { |
563 | 0 | next_empty_cell += 1; |
564 | 0 | if (next_empty_cell >= size) { next_empty_cell -= size; } |
565 | 0 | auto & cell = cells[next_empty_cell]; |
566 | 0 | if (cell.is_empty()) { break; } |
567 | 0 | } |
568 | 0 | } |
569 | 0 | } |
570 | 0 | if (min > seq_meta.tail) { min = seq_meta.tail; } |
571 | 0 | if (max < seq_meta.tail) { max = seq_meta.tail; } |
572 | 0 | } |
573 | | |
574 | | // gather and re-order |
575 | 0 | for (uint32_t s = 0; s < n_seqs; ++s) { |
576 | 0 | const uint32_t i = s*n_seq_tokens; |
577 | 0 | const int32_t dst_id = s + min; |
578 | 0 | const int32_t src_id = cells[ubatch.seq_id[i][0]].tail; |
579 | 0 | if (dst_id != src_id) { |
580 | 0 | auto & dst_cell = cells[dst_id]; |
581 | 0 | auto & src_cell = cells[src_id]; |
582 | |
|
583 | 0 | std::swap(dst_cell.pos, src_cell.pos); |
584 | 0 | std::swap(dst_cell.src, src_cell.src); |
585 | 0 | std::swap(dst_cell.seq_id, src_cell.seq_id); |
586 | | |
587 | | // swap tails |
588 | 0 | for (uint32_t j = 0; j < size; ++j) { |
589 | 0 | int32_t & tail = cells[j].tail; |
590 | 0 | if (tail == src_id) { |
591 | 0 | tail = dst_id; |
592 | 0 | } else if (tail == dst_id) { |
593 | 0 | tail = src_id; |
594 | 0 | } |
595 | 0 | } |
596 | 0 | } |
597 | 0 | } |
598 | | |
599 | | // update the pos of the used seqs |
600 | 0 | for (uint32_t s = 0; s < n_seqs; ++s) { |
601 | 0 | const uint32_t i = s*n_seq_tokens; |
602 | 0 | const llama_pos last_pos = ubatch.pos[i + n_seq_tokens - 1]; |
603 | 0 | const int32_t cell_id = s + min; |
604 | 0 | auto & cell = cells[cell_id]; |
605 | |
|
606 | 0 | if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) { |
607 | | // What should happen when the pos backtracks or skips a value? |
608 | | // Clearing the state mid-batch would require special-casing which isn't done. |
609 | 0 | LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n", |
610 | 0 | __func__, last_pos, cell.pos, ubatch.seq_id[i][0], n_seq_tokens); |
611 | 0 | } |
612 | 0 | cell.pos = last_pos; |
613 | 0 | cell.seq_id.clear(); |
614 | 0 | for (int32_t j = 0; j < ubatch.n_seq_id[i]; ++j) { |
615 | 0 | const llama_seq_id seq_id = ubatch.seq_id[i][j]; |
616 | 0 | cell.seq_id.insert(seq_id); |
617 | 0 | cells[seq_id].tail = cell_id; |
618 | 0 | } |
619 | 0 | } |
620 | | |
621 | | // Find first cell without src refs, to use as the zero-ed state |
622 | 0 | { |
623 | | // TODO: bake-in src refcounts in the cell metadata |
624 | 0 | std::vector<int32_t> refcounts(size, 0); |
625 | 0 | for (size_t i = 0; i < size; ++i) { |
626 | 0 | const int32_t src = cells[i].src; |
627 | 0 | if (src >= 0) { |
628 | 0 | refcounts[src] += 1; |
629 | 0 | } |
630 | 0 | } |
631 | |
|
632 | 0 | rs_z = -1; |
633 | 0 | for (int i = min; i <= max; ++i) { |
634 | 0 | if (refcounts[i] == 0) { |
635 | 0 | rs_z = i; |
636 | 0 | break; |
637 | 0 | } |
638 | 0 | } |
639 | |
|
640 | 0 | for (int i = min; i <= max; ++i) { |
641 | 0 | if (cells[i].src < 0) { |
642 | 0 | GGML_ASSERT(rs_z >= 0); |
643 | 0 | cells[i].src0 = rs_z; |
644 | 0 | } else { |
645 | | // Stage the source ids for all used cells to allow correct seq_* behavior |
646 | | // and still make these values available when setting the inputs |
647 | 0 | cells[i].src0 = cells[i].src; |
648 | 0 | } |
649 | 0 | cells[i].src = i; // avoid moving or clearing twice |
650 | 0 | } |
651 | 0 | } |
652 | | |
653 | | // allow getting the range of used cells, from head to head + n |
654 | 0 | head = min; |
655 | 0 | n = max - min + 1; |
656 | 0 | used = std::count_if(cells.begin(), cells.end(), |
657 | 0 | [](const mem_cell & cell){ return !cell.is_empty(); }); |
658 | | |
659 | | // sanity check |
660 | 0 | return n >= n_seqs; |
661 | 0 | } |
662 | | |
663 | 0 | bool llama_memory_recurrent::get_can_shift() const { |
664 | | // shifting the pos is trivial for recurrent models |
665 | 0 | return true; |
666 | 0 | } |
667 | | |
668 | 0 | size_t llama_memory_recurrent::total_size() const { |
669 | 0 | size_t size = 0; |
670 | 0 | for (const auto & [_, buf] : ctxs_bufs) { |
671 | 0 | size += ggml_backend_buffer_get_size(buf.get()); |
672 | 0 | } |
673 | |
|
674 | 0 | return size; |
675 | 0 | } |
676 | | |
677 | 0 | size_t llama_memory_recurrent::size_r_bytes() const { |
678 | 0 | size_t size_r_bytes = 0; |
679 | |
|
680 | 0 | for (const auto & r : r_l) { |
681 | 0 | if (r != nullptr) { |
682 | 0 | size_r_bytes += ggml_nbytes(r); |
683 | 0 | } |
684 | 0 | } |
685 | |
|
686 | 0 | return size_r_bytes; |
687 | 0 | } |
688 | | |
689 | 0 | size_t llama_memory_recurrent::size_s_bytes() const { |
690 | 0 | size_t size_s_bytes = 0; |
691 | |
|
692 | 0 | for (const auto & s : s_l) { |
693 | 0 | if (s != nullptr) { |
694 | 0 | size_s_bytes += ggml_nbytes(s); |
695 | 0 | } |
696 | 0 | } |
697 | |
|
698 | 0 | return size_s_bytes; |
699 | 0 | } |
700 | | |
701 | 0 | void llama_memory_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) const { |
702 | 0 | GGML_UNUSED(flags); |
703 | |
|
704 | 0 | std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive |
705 | 0 | uint32_t cell_count = 0; |
706 | | |
707 | | // Count the number of cells with the specified seq_id |
708 | | // Find all the ranges of cells with this seq id (or all, when -1) |
709 | 0 | uint32_t cell_range_begin = size; |
710 | 0 | for (uint32_t i = 0; i < size; ++i) { |
711 | 0 | const auto & cell = cells[i]; |
712 | 0 | if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) { |
713 | 0 | ++cell_count; |
714 | 0 | if (cell_range_begin == size) { |
715 | 0 | cell_range_begin = i; |
716 | 0 | } |
717 | 0 | } else { |
718 | 0 | if (cell_range_begin != size) { |
719 | 0 | cell_ranges.emplace_back(cell_range_begin, i); |
720 | 0 | cell_range_begin = size; |
721 | 0 | } |
722 | 0 | } |
723 | 0 | } |
724 | 0 | if (cell_range_begin != size) { |
725 | 0 | cell_ranges.emplace_back(cell_range_begin, size); |
726 | 0 | } |
727 | | |
728 | | // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count |
729 | 0 | uint32_t cell_count_check = 0; |
730 | 0 | for (const auto & range : cell_ranges) { |
731 | 0 | cell_count_check += range.second - range.first; |
732 | 0 | } |
733 | 0 | GGML_ASSERT(cell_count == cell_count_check); |
734 | |
|
735 | 0 | io.write(&cell_count, sizeof(cell_count)); |
736 | |
|
737 | 0 | state_write_meta(io, cell_ranges, seq_id); |
738 | 0 | state_write_data(io, cell_ranges); |
739 | 0 | } |
740 | | |
741 | 0 | void llama_memory_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) { |
742 | 0 | GGML_UNUSED(flags); |
743 | |
|
744 | 0 | uint32_t cell_count; |
745 | 0 | io.read_to(&cell_count, sizeof(cell_count)); |
746 | |
|
747 | 0 | bool res = true; |
748 | |
|
749 | 0 | res = res && state_read_meta(io, cell_count, seq_id); |
750 | 0 | res = res && state_read_data(io, cell_count); |
751 | |
|
752 | 0 | if (!res) { |
753 | 0 | if (seq_id == -1) { |
754 | 0 | clear(true); |
755 | 0 | } else { |
756 | 0 | seq_rm(seq_id, -1, -1); |
757 | 0 | } |
758 | 0 | throw std::runtime_error("failed to restore kv cache"); |
759 | 0 | } |
760 | 0 | } |
761 | | |
762 | 0 | void llama_memory_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const { |
763 | 0 | for (const auto & range : cell_ranges) { |
764 | 0 | for (uint32_t i = range.first; i < range.second; ++i) { |
765 | 0 | const auto & cell = cells[i]; |
766 | 0 | const llama_pos pos = cell.pos; |
767 | 0 | const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0; |
768 | |
|
769 | 0 | io.write(&pos, sizeof(pos)); |
770 | 0 | io.write(&n_seq_id, sizeof(n_seq_id)); |
771 | |
|
772 | 0 | if (n_seq_id) { |
773 | 0 | for (auto seq_id : cell.seq_id) { |
774 | 0 | io.write(&seq_id, sizeof(seq_id)); |
775 | 0 | } |
776 | 0 | } |
777 | 0 | } |
778 | 0 | } |
779 | 0 | } |
780 | | |
781 | 0 | void llama_memory_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const { |
782 | 0 | const uint32_t s_trans = 0; |
783 | 0 | const uint32_t n_layer = hparams.n_layer; |
784 | |
|
785 | 0 | io.write(&s_trans, sizeof(s_trans)); |
786 | 0 | io.write(&n_layer, sizeof(n_layer)); |
787 | |
|
788 | 0 | std::vector<uint8_t> tmp_buf; |
789 | | |
790 | | // Iterate and write all the keys first, each row is a cell |
791 | | // Get whole range at a time |
792 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
793 | | // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null) |
794 | 0 | if (r_l[il] == nullptr) continue; |
795 | | |
796 | | // Write key type |
797 | 0 | const int32_t r_type_i = (int32_t)r_l[il]->type; |
798 | 0 | io.write(&r_type_i, sizeof(r_type_i)); |
799 | | |
800 | | // Write row size of key |
801 | 0 | const uint64_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r()); |
802 | 0 | io.write(&r_size_row, sizeof(r_size_row)); |
803 | | |
804 | | // Read each range of cells of k_size length each into tmp_buf and write out |
805 | 0 | for (const auto & range : cell_ranges) { |
806 | 0 | const size_t range_size = range.second - range.first; |
807 | 0 | const size_t buf_size = range_size * r_size_row; |
808 | 0 | io.write_tensor(r_l[il], range.first * r_size_row, buf_size); |
809 | 0 | } |
810 | 0 | } |
811 | |
|
812 | 0 | if (!s_trans) { |
813 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
814 | | // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null) |
815 | 0 | if (s_l[il] == nullptr) continue; |
816 | | |
817 | | // Write value type |
818 | 0 | const int32_t s_type_i = (int32_t)s_l[il]->type; |
819 | 0 | io.write(&s_type_i, sizeof(s_type_i)); |
820 | | |
821 | | // Write row size of value |
822 | 0 | const uint64_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s()); |
823 | 0 | io.write(&s_size_row, sizeof(s_size_row)); |
824 | | |
825 | | // Read each range of cells of s_size length each into tmp_buf and write out |
826 | 0 | for (const auto & range : cell_ranges) { |
827 | 0 | const size_t range_size = range.second - range.first; |
828 | 0 | const size_t buf_size = range_size * s_size_row; |
829 | 0 | io.write_tensor(s_l[il], range.first * s_size_row, buf_size); |
830 | 0 | } |
831 | 0 | } |
832 | 0 | } else { |
833 | | // When v is transposed, we also need the element size and get the element ranges from each row |
834 | 0 | const uint32_t mem_size = size; |
835 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
836 | | // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null) |
837 | 0 | if (s_l[il] == nullptr) continue; |
838 | | |
839 | 0 | const uint32_t n_embd_s = hparams.n_embd_s(); |
840 | | |
841 | | // Write value type |
842 | 0 | const int32_t s_type_i = (int32_t)s_l[il]->type; |
843 | 0 | io.write(&s_type_i, sizeof(s_type_i)); |
844 | | |
845 | | // Write element size |
846 | 0 | const uint32_t s_size_el = ggml_type_size(s_l[il]->type); |
847 | 0 | io.write(&s_size_el, sizeof(s_size_el)); |
848 | | |
849 | | // Write GQA embedding size |
850 | 0 | io.write(&n_embd_s, sizeof(n_embd_s)); |
851 | | |
852 | | // For each row, we get the element values of each cell |
853 | 0 | for (uint32_t j = 0; j < n_embd_s; ++j) { |
854 | | // Read each range of cells of v_size_el length each into tmp_buf and write out |
855 | 0 | for (const auto & range : cell_ranges) { |
856 | 0 | const size_t range_size = range.second - range.first; |
857 | 0 | const size_t src_offset = (range.first + j * mem_size) * s_size_el; |
858 | 0 | const size_t buf_size = range_size * s_size_el; |
859 | 0 | io.write_tensor(s_l[il], src_offset, buf_size); |
860 | 0 | } |
861 | 0 | } |
862 | 0 | } |
863 | 0 | } |
864 | 0 | } |
865 | | |
866 | 0 | bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) { |
867 | 0 | if (dest_seq_id != -1) { |
868 | | // single sequence |
869 | 0 | seq_rm(dest_seq_id, -1, -1); |
870 | |
|
871 | 0 | if (cell_count == 0) { |
872 | 0 | return true; |
873 | 0 | } |
874 | | |
875 | 0 | llama_batch_allocr balloc(hparams.n_pos_per_embd()); |
876 | |
|
877 | 0 | llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1); |
878 | |
|
879 | 0 | for (uint32_t i = 0; i < cell_count; ++i) { |
880 | 0 | llama_pos pos; |
881 | 0 | uint32_t n_seq_id; |
882 | |
|
883 | 0 | io.read_to(&pos, sizeof(pos)); |
884 | 0 | io.read_to(&n_seq_id, sizeof(n_seq_id)); |
885 | |
|
886 | 0 | if (n_seq_id != 0) { |
887 | 0 | LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__); |
888 | 0 | return false; |
889 | 0 | } |
890 | | |
891 | 0 | ubatch.pos[i] = pos; |
892 | 0 | } |
893 | 0 | ubatch.n_seq_id[0] = 1; |
894 | 0 | ubatch.seq_id[0] = &dest_seq_id; |
895 | |
|
896 | 0 | if (!find_slot(ubatch)) { |
897 | 0 | LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); |
898 | 0 | return false; |
899 | 0 | } |
900 | | |
901 | | // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values) |
902 | | // Assume that this is one contiguous block of cells |
903 | 0 | GGML_ASSERT(head + cell_count <= size); |
904 | 0 | GGML_ASSERT(cells[head].pos == ubatch.pos[0]); |
905 | 0 | GGML_ASSERT(cells[head + cell_count - 1].pos == ubatch.pos[cell_count - 1]); |
906 | 0 | GGML_ASSERT(cells[head].has_seq_id(dest_seq_id)); |
907 | 0 | GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id)); |
908 | 0 | } else { |
909 | | // whole KV cache restore |
910 | |
|
911 | 0 | if (cell_count > size) { |
912 | 0 | LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__); |
913 | 0 | return false; |
914 | 0 | } |
915 | | |
916 | 0 | clear(true); |
917 | |
|
918 | 0 | for (uint32_t i = 0; i < cell_count; ++i) { |
919 | 0 | auto & cell = cells[i]; |
920 | |
|
921 | 0 | llama_pos pos; |
922 | 0 | uint32_t n_seq_id; |
923 | |
|
924 | 0 | io.read_to(&pos, sizeof(pos)); |
925 | 0 | io.read_to(&n_seq_id, sizeof(n_seq_id)); |
926 | |
|
927 | 0 | cell.pos = pos; |
928 | |
|
929 | 0 | for (uint32_t j = 0; j < n_seq_id; ++j) { |
930 | 0 | llama_seq_id seq_id; |
931 | 0 | io.read_to(&seq_id, sizeof(seq_id)); |
932 | | |
933 | | // TODO: llama_memory_recurrent should have a notion of max sequences |
934 | | //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { |
935 | 0 | if (seq_id < 0) { |
936 | | //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); |
937 | 0 | LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id); |
938 | 0 | return false; |
939 | 0 | } |
940 | | |
941 | 0 | cell.seq_id.insert(seq_id); |
942 | |
|
943 | 0 | int32_t & tail = cells[seq_id].tail; |
944 | 0 | if (tail != -1) { |
945 | 0 | LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail); |
946 | 0 | return false; |
947 | 0 | } |
948 | 0 | tail = i; |
949 | 0 | } |
950 | 0 | } |
951 | | |
952 | 0 | head = 0; |
953 | 0 | used = cell_count; |
954 | 0 | } |
955 | | |
956 | 0 | for (uint32_t i = 0; i < cell_count; ++i) { |
957 | 0 | uint32_t cell_id = head + i; |
958 | | // make sure the recurrent states will keep their restored state |
959 | 0 | cells[cell_id].src = cell_id; |
960 | 0 | } |
961 | |
|
962 | 0 | return true; |
963 | 0 | } |
964 | | |
965 | 0 | bool llama_memory_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) { |
966 | 0 | uint32_t s_trans; |
967 | 0 | uint32_t n_layer; |
968 | 0 | io.read_to(&s_trans, sizeof(s_trans)); |
969 | 0 | io.read_to(&n_layer, sizeof(n_layer)); |
970 | |
|
971 | 0 | if (n_layer != hparams.n_layer) { |
972 | 0 | LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer); |
973 | 0 | return false; |
974 | 0 | } |
975 | 0 | if (cell_count > size) { |
976 | 0 | LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size); |
977 | 0 | return false; |
978 | 0 | } |
979 | 0 | if (false != (bool) s_trans) { |
980 | 0 | LLAMA_LOG_ERROR("%s: incompatible s transposition\n", __func__); |
981 | 0 | return false; |
982 | 0 | } |
983 | | |
984 | | // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block |
985 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
986 | | // skip null layers |
987 | 0 | if (r_l[il] == nullptr) continue; |
988 | | |
989 | | // Read type of key |
990 | 0 | int32_t r_type_i_ref; |
991 | 0 | io.read_to(&r_type_i_ref, sizeof(r_type_i_ref)); |
992 | 0 | const int32_t r_type_i = (int32_t) r_l[il]->type; |
993 | 0 | if (r_type_i != r_type_i_ref) { |
994 | 0 | LLAMA_LOG_ERROR("%s: mismatched r type (%d != %d, layer %d)\n", __func__, r_type_i, r_type_i_ref, il); |
995 | 0 | return false; |
996 | 0 | } |
997 | | |
998 | | // Read row size of key |
999 | 0 | uint64_t r_size_row_ref; |
1000 | 0 | io.read_to(&r_size_row_ref, sizeof(r_size_row_ref)); |
1001 | 0 | const size_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r()); |
1002 | 0 | if (r_size_row != r_size_row_ref) { |
1003 | 0 | LLAMA_LOG_ERROR("%s: mismatched r row size (%zu != %zu, layer %d)\n", __func__, r_size_row, (size_t) r_size_row_ref, il); |
1004 | 0 | return false; |
1005 | 0 | } |
1006 | | |
1007 | 0 | if (cell_count) { |
1008 | | // Read and set the keys for the whole cell range |
1009 | 0 | ggml_backend_tensor_set(r_l[il], io.read(cell_count * r_size_row), head * r_size_row, cell_count * r_size_row); |
1010 | 0 | } |
1011 | 0 | } |
1012 | | |
1013 | 0 | if (!s_trans) { |
1014 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
1015 | | // skip null layers |
1016 | 0 | if (s_l[il] == nullptr) continue; |
1017 | | |
1018 | | // Read type of value |
1019 | 0 | int32_t s_type_i_ref; |
1020 | 0 | io.read_to(&s_type_i_ref, sizeof(s_type_i_ref)); |
1021 | 0 | const int32_t s_type_i = (int32_t)s_l[il]->type; |
1022 | |
|
1023 | 0 | if (s_type_i != s_type_i_ref) { |
1024 | 0 | LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il); |
1025 | 0 | return false; |
1026 | 0 | } |
1027 | | |
1028 | | // Read row size of value |
1029 | 0 | uint64_t s_size_row_ref; |
1030 | 0 | io.read_to(&s_size_row_ref, sizeof(s_size_row_ref)); |
1031 | 0 | const size_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s()); |
1032 | 0 | if (s_size_row != s_size_row_ref) { |
1033 | 0 | LLAMA_LOG_ERROR("%s: mismatched s row size (%zu != %zu, layer %d)\n", __func__, s_size_row, (size_t) s_size_row_ref, il); |
1034 | 0 | return false; |
1035 | 0 | } |
1036 | | |
1037 | 0 | if (cell_count) { |
1038 | | // Read and set the values for the whole cell range |
1039 | 0 | ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_row), head * s_size_row, cell_count * s_size_row); |
1040 | 0 | } |
1041 | 0 | } |
1042 | 0 | } else { |
1043 | | // For each layer, read the values for each cell (transposed) |
1044 | 0 | for (uint32_t il = 0; il < n_layer; ++il) { |
1045 | | // skip null layers |
1046 | 0 | if (s_l[il] == nullptr) continue; |
1047 | | |
1048 | 0 | const uint32_t n_embd_s = hparams.n_embd_s(); |
1049 | | |
1050 | | // Read type of value |
1051 | 0 | int32_t s_type_i_ref; |
1052 | 0 | io.read_to(&s_type_i_ref, sizeof(s_type_i_ref)); |
1053 | 0 | const int32_t s_type_i = (int32_t)s_l[il]->type; |
1054 | 0 | if (s_type_i != s_type_i_ref) { |
1055 | 0 | LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il); |
1056 | 0 | return false; |
1057 | 0 | } |
1058 | | |
1059 | | // Read element size of value |
1060 | 0 | uint32_t s_size_el_ref; |
1061 | 0 | io.read_to(&s_size_el_ref, sizeof(s_size_el_ref)); |
1062 | 0 | const size_t s_size_el = ggml_type_size(s_l[il]->type); |
1063 | 0 | if (s_size_el != s_size_el_ref) { |
1064 | 0 | LLAMA_LOG_ERROR("%s: mismatched s element size (%zu != %zu, layer %d)\n", __func__, s_size_el, (size_t) s_size_el_ref, il); |
1065 | 0 | return false; |
1066 | 0 | } |
1067 | | |
1068 | | // Read state embedding size |
1069 | 0 | uint32_t n_embd_s_ref; |
1070 | 0 | io.read_to(&n_embd_s_ref, sizeof(n_embd_s_ref)); |
1071 | 0 | if (n_embd_s != n_embd_s_ref) { |
1072 | 0 | LLAMA_LOG_ERROR("%s: mismatched s embedding size (%u != %u, layer %d)\n", __func__, n_embd_s, n_embd_s_ref, il); |
1073 | 0 | return false; |
1074 | 0 | } |
1075 | | |
1076 | 0 | if (cell_count) { |
1077 | | // For each row in the transposed matrix, read the values for the whole cell range |
1078 | 0 | for (uint32_t j = 0; j < n_embd_s; ++j) { |
1079 | 0 | const size_t dst_offset = (head + j * size) * s_size_el; |
1080 | 0 | ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_el), dst_offset, cell_count * s_size_el); |
1081 | 0 | } |
1082 | 0 | } |
1083 | 0 | } |
1084 | 0 | } |
1085 | | |
1086 | 0 | return true; |
1087 | 0 | } |
1088 | | |
1089 | | // |
1090 | | // llama_memory_recurrent_context |
1091 | | // |
1092 | | |
1093 | 0 | llama_memory_recurrent_context::llama_memory_recurrent_context(llama_memory_status status) : status(status) {} |
1094 | | |
1095 | | llama_memory_recurrent_context::llama_memory_recurrent_context( |
1096 | 0 | llama_memory_recurrent * mem) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), is_full(true) { |
1097 | 0 | } |
1098 | | |
1099 | | llama_memory_recurrent_context::llama_memory_recurrent_context( |
1100 | | llama_memory_recurrent * mem, |
1101 | 0 | std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), ubatches(std::move(ubatches)) {} |
1102 | | |
1103 | 0 | llama_memory_recurrent_context::~llama_memory_recurrent_context() = default; |
1104 | | |
1105 | 0 | bool llama_memory_recurrent_context::next() { |
1106 | 0 | assert(status == LLAMA_MEMORY_STATUS_SUCCESS); |
1107 | |
|
1108 | 0 | if (++i_next >= ubatches.size()) { |
1109 | 0 | return false; |
1110 | 0 | } |
1111 | | |
1112 | 0 | return true; |
1113 | 0 | } |
1114 | | |
1115 | 0 | bool llama_memory_recurrent_context::apply() { |
1116 | 0 | assert(!llama_memory_status_is_fail(status)); |
1117 | | |
1118 | | // no ubatches -> this is an update |
1119 | 0 | if (ubatches.empty()) { |
1120 | | // recurrent cache never performs updates |
1121 | 0 | assert(status == LLAMA_MEMORY_STATUS_NO_UPDATE); |
1122 | |
|
1123 | 0 | return true; |
1124 | 0 | } |
1125 | | |
1126 | 0 | mem->find_slot(ubatches[i_next]); |
1127 | |
|
1128 | 0 | return true; |
1129 | 0 | } |
1130 | | |
1131 | 0 | llama_memory_status llama_memory_recurrent_context::get_status() const { |
1132 | 0 | return status; |
1133 | 0 | } |
1134 | | |
1135 | 0 | const llama_ubatch & llama_memory_recurrent_context::get_ubatch() const { |
1136 | 0 | assert(status == LLAMA_MEMORY_STATUS_SUCCESS); |
1137 | |
|
1138 | 0 | return ubatches[i_next]; |
1139 | 0 | } |
1140 | | |
1141 | 0 | uint32_t llama_memory_recurrent_context::get_n_rs() const { |
1142 | 0 | return is_full ? mem->size : mem->n; |
1143 | 0 | } |
1144 | | |
1145 | 0 | uint32_t llama_memory_recurrent_context::get_head() const { |
1146 | 0 | return is_full ? 0 : mem->head; |
1147 | 0 | } |
1148 | | |
1149 | 0 | int32_t llama_memory_recurrent_context::get_rs_z() const { |
1150 | 0 | return is_full ? 0 : mem->rs_z; |
1151 | 0 | } |
1152 | | |
1153 | 0 | uint32_t llama_memory_recurrent_context::get_size() const { |
1154 | 0 | return mem->size; |
1155 | 0 | } |
1156 | | |
1157 | 0 | ggml_tensor * llama_memory_recurrent_context::get_r_l(int32_t il) const { |
1158 | 0 | return mem->r_l[il]; |
1159 | 0 | } |
1160 | | |
1161 | 0 | ggml_tensor * llama_memory_recurrent_context::get_s_l(int32_t il) const { |
1162 | 0 | return mem->s_l[il]; |
1163 | 0 | } |
1164 | | |
1165 | 0 | int32_t llama_memory_recurrent_context::s_copy(int i) const { |
1166 | 0 | return mem->cells[i + mem->head].src0; |
1167 | 0 | } |