/src/llama.cpp/ggml/src/ggml-alloc.c
Line | Count | Source |
1 | | #include "ggml-alloc.h" |
2 | | #include "ggml-backend-impl.h" |
3 | | #include "ggml.h" |
4 | | #include "ggml-impl.h" |
5 | | #include <assert.h> |
6 | | #include <limits.h> |
7 | | #include <stdarg.h> |
8 | | #include <stdio.h> |
9 | | #include <stdlib.h> |
10 | | #include <string.h> |
11 | | |
12 | 0 | #define MAX(a, b) ((a) > (b) ? (a) : (b)) |
13 | | #define MAX_FREE_BLOCKS 256 |
14 | | |
15 | | //#define GGML_ALLOCATOR_DEBUG |
16 | | |
17 | | //#define AT_PRINTF(...) GGML_LOG_DEBUG(__VA_ARGS__) |
18 | | #define AT_PRINTF(...) |
19 | | |
20 | | |
21 | 0 | static bool ggml_is_view(const struct ggml_tensor * t) { |
22 | 0 | return t->view_src != NULL; |
23 | 0 | } |
24 | | |
25 | | // ops that return true for this function must not use restrict pointers for their backend implementations |
26 | 0 | bool ggml_op_can_inplace(enum ggml_op op) { |
27 | 0 | switch (op) { |
28 | 0 | case GGML_OP_SCALE: |
29 | 0 | case GGML_OP_DIAG_MASK_ZERO: |
30 | 0 | case GGML_OP_DIAG_MASK_INF: |
31 | 0 | case GGML_OP_ADD: |
32 | 0 | case GGML_OP_ADD_ID: |
33 | 0 | case GGML_OP_ADD1: |
34 | 0 | case GGML_OP_SUB: |
35 | 0 | case GGML_OP_MUL: |
36 | 0 | case GGML_OP_DIV: |
37 | 0 | case GGML_OP_SQR: |
38 | 0 | case GGML_OP_SQRT: |
39 | 0 | case GGML_OP_LOG: |
40 | 0 | case GGML_OP_UNARY: |
41 | 0 | case GGML_OP_ROPE: |
42 | 0 | case GGML_OP_ROPE_BACK: |
43 | 0 | case GGML_OP_SILU_BACK: |
44 | 0 | case GGML_OP_RMS_NORM: |
45 | 0 | case GGML_OP_RMS_NORM_BACK: |
46 | 0 | case GGML_OP_SOFT_MAX: |
47 | 0 | case GGML_OP_SOFT_MAX_BACK: |
48 | 0 | return true; |
49 | | |
50 | 0 | default: |
51 | 0 | return false; |
52 | 0 | } |
53 | 0 | } |
54 | | |
55 | 0 | static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) { |
56 | 0 | assert(alignment && !(alignment & (alignment - 1))); // power of 2 |
57 | 0 | size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment; |
58 | 0 | return offset + align; |
59 | 0 | } |
60 | | |
61 | | // tallocr |
62 | | |
63 | 0 | struct ggml_tallocr ggml_tallocr_new(ggml_backend_buffer_t buffer) { |
64 | 0 | void * base = ggml_backend_buffer_get_base(buffer); |
65 | 0 | size_t align = ggml_backend_buffer_get_alignment(buffer); |
66 | |
|
67 | 0 | assert(align && !(align & (align - 1))); // power of 2 |
68 | |
|
69 | 0 | struct ggml_tallocr talloc = (struct ggml_tallocr) { |
70 | 0 | /*.buffer = */ buffer, |
71 | 0 | /*.base = */ base, |
72 | 0 | /*.alignment = */ align, |
73 | 0 | /*.offset = */ aligned_offset(base, 0, align), |
74 | 0 | }; |
75 | 0 | return talloc; |
76 | 0 | } |
77 | | |
78 | 0 | enum ggml_status ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_tensor * tensor) { |
79 | 0 | size_t size = ggml_backend_buffer_get_alloc_size(talloc->buffer, tensor); |
80 | 0 | size = GGML_PAD(size, talloc->alignment); |
81 | |
|
82 | 0 | if (talloc->offset + size > ggml_backend_buffer_get_size(talloc->buffer)) { |
83 | 0 | GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %s (needed %zu, available %zu)\n", |
84 | 0 | __func__, tensor->name, size, ggml_backend_buffer_get_size(talloc->buffer) - talloc->offset); |
85 | 0 | GGML_ABORT("not enough space in the buffer"); |
86 | 0 | } |
87 | |
|
88 | 0 | void * addr = (char *)ggml_backend_buffer_get_base(talloc->buffer) + talloc->offset; |
89 | 0 | talloc->offset += size; |
90 | |
|
91 | 0 | assert(((uintptr_t)addr % talloc->alignment) == 0); |
92 | |
|
93 | 0 | return ggml_backend_tensor_alloc(talloc->buffer, tensor, addr); |
94 | 0 | } |
95 | | |
96 | | // dynamic tensor allocator |
97 | | |
98 | 0 | #define GGML_VBUFFER_MAX_CHUNKS 16 |
99 | | |
100 | | // relative memory address within an allocation that can be split into multiple buffers (chunks) |
101 | | struct buffer_address { |
102 | | int chunk; // index of a backend buffer |
103 | | size_t offset; // local memory offset within the buffer |
104 | | }; |
105 | | |
106 | | static const struct buffer_address GGML_BUFFER_ADDRESS_INVALID = { -1, SIZE_MAX }; |
107 | | |
108 | 0 | static bool ggml_buffer_address_less(struct buffer_address a, struct buffer_address b) { |
109 | 0 | return a.chunk != b.chunk ? a.chunk < b.chunk : a.offset < b.offset; |
110 | 0 | } |
111 | | |
112 | | struct free_block { |
113 | | size_t offset; |
114 | | size_t size; |
115 | | }; |
116 | | |
117 | | struct tallocr_chunk { |
118 | | struct free_block free_blocks[MAX_FREE_BLOCKS]; |
119 | | int n_free_blocks; |
120 | | size_t max_size; |
121 | | }; |
122 | | |
123 | | struct ggml_dyn_tallocr { |
124 | | size_t alignment; |
125 | | size_t max_chunk_size; |
126 | | struct tallocr_chunk * chunks[GGML_VBUFFER_MAX_CHUNKS]; |
127 | | int n_chunks; |
128 | | |
129 | | #ifdef GGML_ALLOCATOR_DEBUG |
130 | | struct { |
131 | | const struct ggml_tensor * tensor; |
132 | | struct buffer_address addr; |
133 | | } allocated_tensors[1024]; |
134 | | #endif |
135 | | }; |
136 | | |
137 | 0 | static void ggml_dyn_tallocr_insert_block(struct tallocr_chunk * chunk, size_t offset, size_t size) { |
138 | 0 | GGML_ASSERT(chunk->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks"); |
139 | | // insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster) |
140 | 0 | int insert_pos = 0; |
141 | 0 | while (insert_pos < chunk->n_free_blocks && chunk->free_blocks[insert_pos].offset < offset) { |
142 | 0 | insert_pos++; |
143 | 0 | } |
144 | | // shift all blocks from insert_pos onward to make room for the new block |
145 | 0 | for (int i = chunk->n_free_blocks; i > insert_pos; i--) { |
146 | 0 | chunk->free_blocks[i] = chunk->free_blocks[i-1]; |
147 | 0 | } |
148 | | // insert the new block |
149 | 0 | chunk->free_blocks[insert_pos].offset = offset; |
150 | 0 | chunk->free_blocks[insert_pos].size = size; |
151 | 0 | chunk->n_free_blocks++; |
152 | 0 | } |
153 | | |
154 | 0 | static void ggml_dyn_tallocr_remove_block(struct tallocr_chunk * chunk, int idx) { |
155 | | // shift all elements after idx by 1 to the left, overwriting the element at idx |
156 | 0 | for (int i = idx; i < chunk->n_free_blocks; i++) { |
157 | 0 | chunk->free_blocks[i] = chunk->free_blocks[i+1]; |
158 | 0 | } |
159 | 0 | chunk->n_free_blocks--; |
160 | 0 | } |
161 | | |
162 | 0 | static int ggml_dyn_tallocr_new_chunk(struct ggml_dyn_tallocr * alloc, size_t min_size) { |
163 | 0 | if (alloc->n_chunks >= GGML_VBUFFER_MAX_CHUNKS) { |
164 | 0 | return -1; |
165 | 0 | } |
166 | 0 | struct tallocr_chunk * chunk = calloc(1, sizeof(struct tallocr_chunk)); |
167 | 0 | chunk->n_free_blocks = 1; |
168 | 0 | chunk->free_blocks[0].offset = 0; |
169 | | // available space in a chunk is limited to max_chunk_size, but can be higher if: |
170 | | // 1. a single tensor exceeds the maximum, and cannot fit any other way |
171 | | // 2. we are running out of chunks |
172 | | // backends will either manage to allocate the larger size, or report an error. |
173 | 0 | chunk->free_blocks[0].size = MAX(min_size, alloc->max_chunk_size); |
174 | 0 | if (alloc->n_chunks == GGML_VBUFFER_MAX_CHUNKS - 1) { |
175 | 0 | chunk->free_blocks[0].size = SIZE_MAX/2; |
176 | 0 | } |
177 | 0 | alloc->chunks[alloc->n_chunks] = chunk; |
178 | 0 | alloc->n_chunks++; |
179 | 0 | return alloc->n_chunks - 1; |
180 | 0 | } |
181 | | |
182 | | #ifdef GGML_ALLOCATOR_DEBUG |
183 | | static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) { |
184 | | for (int i = 0; i < 1024; i++) { |
185 | | if (alloc->allocated_tensors[i].tensor == NULL) { |
186 | | alloc->allocated_tensors[i].tensor = tensor; |
187 | | alloc->allocated_tensors[i].addr = addr; |
188 | | return; |
189 | | } |
190 | | } |
191 | | GGML_ABORT("out of allocated_tensors"); |
192 | | } |
193 | | static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) { |
194 | | for (int i = 0; i < 1024; i++) { |
195 | | if (alloc->allocated_tensors[i].addr.chunk == addr.chunk && alloc->allocated_tensors[i].addr.offset == addr.offset) { |
196 | | alloc->allocated_tensors[i].tensor = NULL; |
197 | | return; |
198 | | } |
199 | | } |
200 | | GGML_ABORT("tried to free tensor %s not found\n", tensor->name); |
201 | | } |
202 | | #endif |
203 | | |
204 | 0 | static struct buffer_address ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) { |
205 | 0 | size = aligned_offset(NULL, size, alloc->alignment); |
206 | |
|
207 | 0 | AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size); |
208 | |
|
209 | 0 | int best_fit_chunk = -1; |
210 | 0 | int best_fit_block = -1; |
211 | 0 | size_t max_avail = 0; |
212 | | |
213 | | // find the best fitting free block besides the last block, within any chunk |
214 | 0 | for (int c = 0; c < alloc->n_chunks; ++c) { |
215 | 0 | struct tallocr_chunk * chunk = alloc->chunks[c]; |
216 | 0 | size_t best_fit_size = SIZE_MAX; |
217 | 0 | for (int i = 0; i < chunk->n_free_blocks - 1; i++) { |
218 | 0 | struct free_block * block = &chunk->free_blocks[i]; |
219 | 0 | max_avail = MAX(max_avail, block->size); |
220 | 0 | if (block->size >= size && block->size <= best_fit_size) { |
221 | 0 | best_fit_chunk = c; |
222 | 0 | best_fit_block = i; |
223 | 0 | best_fit_size = block->size; |
224 | 0 | } |
225 | 0 | } |
226 | 0 | } |
227 | |
|
228 | 0 | if (best_fit_block == -1) { |
229 | | // no suitable block found, try the last block (this may grow a chunks size) |
230 | 0 | int64_t best_reuse = INT64_MIN; |
231 | 0 | for (int c = 0; c < alloc->n_chunks; ++c) { |
232 | 0 | struct tallocr_chunk * chunk = alloc->chunks[c]; |
233 | 0 | if (chunk->n_free_blocks > 0) { |
234 | 0 | struct free_block * block = &chunk->free_blocks[chunk->n_free_blocks - 1]; |
235 | 0 | max_avail = MAX(max_avail, block->size); |
236 | 0 | int64_t reuse_factor = chunk->max_size - block->offset - size; |
237 | | // reuse_factor < 0 : amount of extra memory that needs to be allocated |
238 | | // reuse_factor = 0 : allocated free space exactly matches tensor size |
239 | | // reuse_factor > 0 : superfluous memory that will remain unused |
240 | 0 | bool better_reuse = best_reuse < 0 && reuse_factor > best_reuse; |
241 | 0 | bool better_fit = reuse_factor >= 0 && reuse_factor < best_reuse; |
242 | 0 | if (block->size >= size && (better_reuse || better_fit)) { |
243 | 0 | best_fit_chunk = c; |
244 | 0 | best_fit_block = chunk->n_free_blocks - 1; |
245 | 0 | best_reuse = reuse_factor; |
246 | 0 | } |
247 | 0 | } |
248 | 0 | } |
249 | 0 | } |
250 | |
|
251 | 0 | if (best_fit_block == -1) { |
252 | | // none of the existing chunks have enough space left |
253 | 0 | best_fit_chunk = ggml_dyn_tallocr_new_chunk(alloc, size); |
254 | 0 | best_fit_block = 0; |
255 | 0 | } |
256 | 0 | if (best_fit_chunk == -1) { |
257 | | // since the last chunk always has virtually endless memory, this should never happen |
258 | 0 | GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n", |
259 | 0 | __func__, size, max_avail); |
260 | 0 | GGML_ABORT("graph allocation: failed to reserve memory"); |
261 | 0 | } |
262 | |
|
263 | 0 | struct tallocr_chunk * chunk = alloc->chunks[best_fit_chunk]; |
264 | 0 | struct free_block * block = &chunk->free_blocks[best_fit_block]; |
265 | 0 | struct buffer_address addr = {.chunk = best_fit_chunk, .offset = block->offset }; |
266 | 0 | block->offset += size; |
267 | 0 | block->size -= size; |
268 | 0 | if (block->size == 0) { |
269 | | // remove block if empty |
270 | 0 | ggml_dyn_tallocr_remove_block(chunk, best_fit_block); |
271 | 0 | } |
272 | |
|
273 | 0 | AT_PRINTF("block %d, offset %zu, chunk %d\n", best_fit_block, addr.offset, addr.chunk); |
274 | |
|
275 | | #ifdef GGML_ALLOCATOR_DEBUG |
276 | | add_allocated_tensor(alloc, addr, tensor); |
277 | | size_t cur_max = addr.offset + size; |
278 | | if (cur_max > chunk->max_size) { |
279 | | // sort allocated_tensors by chunk/offset |
280 | | for (int i = 0; i < 1024; i++) { |
281 | | for (int j = i + 1; j < 1024; j++) { |
282 | | if (ggml_buffer_address_less(alloc->allocated_tensors[j].addr, alloc->allocated_tensors[i].addr)) { |
283 | | const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor; |
284 | | struct buffer_address tmp_addr = alloc->allocated_tensors[i].addr; |
285 | | alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor; |
286 | | alloc->allocated_tensors[i].addr = alloc->allocated_tensors[j].addr; |
287 | | alloc->allocated_tensors[j].tensor = tmp_tensor; |
288 | | alloc->allocated_tensors[j].addr = tmp_addr; |
289 | | } |
290 | | } |
291 | | } |
292 | | GGML_LOG_DEBUG("max_size[%d] = %.2f MB: tensors: ", addr.chunk, cur_max / 1024.0 / 1024.0); |
293 | | for (int i = 0; i < 1024; i++) { |
294 | | if (alloc->allocated_tensors[i].tensor) { |
295 | | GGML_LOG_DEBUG("%s [%d: %zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name, |
296 | | alloc->allocated_tensors[i].addr.chunk, |
297 | | alloc->allocated_tensors[i].addr.offset, |
298 | | alloc->allocated_tensors[i].addr.offset + ggml_nbytes(alloc->allocated_tensors[i].tensor), |
299 | | ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0); |
300 | | } |
301 | | } |
302 | | GGML_LOG_DEBUG("\n"); |
303 | | } |
304 | | #endif |
305 | |
|
306 | 0 | chunk->max_size = MAX(chunk->max_size, addr.offset + size); |
307 | |
|
308 | 0 | return addr; |
309 | | |
310 | 0 | GGML_UNUSED(tensor); |
311 | 0 | } |
312 | | |
313 | | // this is a very naive implementation, but for our case the number of free blocks should be very small |
314 | 0 | static void ggml_dyn_tallocr_free_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, size_t size, const struct ggml_tensor * tensor) { |
315 | 0 | size = aligned_offset(NULL, size, alloc->alignment); |
316 | |
|
317 | 0 | AT_PRINTF("%s: freeing %s at {chunk=%d, offset=%zu} (%zu bytes) - n_free_blocks = %d\n", |
318 | 0 | __func__, tensor->name, addr.chunk, addr.offset, size, alloc->chunks[addr.chunk]->n_free_blocks); |
319 | |
|
320 | | #ifdef GGML_ALLOCATOR_DEBUG |
321 | | remove_allocated_tensor(alloc, addr, tensor); |
322 | | #endif |
323 | |
|
324 | 0 | struct tallocr_chunk * chunk = alloc->chunks[addr.chunk]; |
325 | | |
326 | | // see if we can merge with an existing block |
327 | 0 | for (int i = 0; i < chunk->n_free_blocks; i++) { |
328 | 0 | struct free_block * block = &chunk->free_blocks[i]; |
329 | | // check if ptr is at the end of the block |
330 | 0 | if (block->offset + block->size == addr.offset) { |
331 | 0 | block->size += size; |
332 | | // check if we can merge with the next block |
333 | 0 | if (i < chunk->n_free_blocks - 1) { |
334 | 0 | struct free_block * next = &chunk->free_blocks[i+1]; |
335 | 0 | if (block->offset + block->size == next->offset) { |
336 | 0 | block->size += next->size; |
337 | 0 | ggml_dyn_tallocr_remove_block(chunk, i+1); |
338 | 0 | } |
339 | 0 | } |
340 | 0 | return; |
341 | 0 | } |
342 | | // check if ptr is at the beginning of the block |
343 | 0 | if (addr.offset + size == block->offset) { |
344 | 0 | block->offset = addr.offset; |
345 | 0 | block->size += size; |
346 | | // check if we can merge with the previous block |
347 | 0 | if (i > 0) { |
348 | 0 | struct free_block * prev = &chunk->free_blocks[i-1]; |
349 | 0 | if (prev->offset + prev->size == block->offset) { |
350 | 0 | prev->size += block->size; |
351 | 0 | ggml_dyn_tallocr_remove_block(chunk, i); |
352 | 0 | } |
353 | 0 | } |
354 | 0 | return; |
355 | 0 | } |
356 | 0 | } |
357 | | // otherwise, add a new block |
358 | 0 | ggml_dyn_tallocr_insert_block(chunk, addr.offset, size); |
359 | |
|
360 | 0 | GGML_UNUSED(tensor); |
361 | 0 | } |
362 | | |
363 | 0 | static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) { |
364 | 0 | for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; i++) { |
365 | 0 | free(alloc->chunks[i]); |
366 | 0 | alloc->chunks[i] = NULL; |
367 | 0 | } |
368 | 0 | alloc->n_chunks = 0; |
369 | |
|
370 | | #ifdef GGML_ALLOCATOR_DEBUG |
371 | | for (int i = 0; i < 1024; i++) { |
372 | | alloc->allocated_tensors[i].tensor = NULL; |
373 | | } |
374 | | #endif |
375 | 0 | } |
376 | | |
377 | 0 | static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment, size_t max_buffer_size) { |
378 | 0 | struct ggml_dyn_tallocr * alloc = (struct ggml_dyn_tallocr *)malloc(sizeof(struct ggml_dyn_tallocr)); |
379 | |
|
380 | 0 | *alloc = (struct ggml_dyn_tallocr) { |
381 | 0 | /*.alignment = */ alignment, |
382 | 0 | /*.max_chunk_size = */ MIN(max_buffer_size, SIZE_MAX/2), // clamp to avoid overflows |
383 | 0 | /*.chunks = */ {NULL}, |
384 | 0 | /*.n_chunks = */ 0, |
385 | | #ifdef GGML_ALLOCATOR_DEBUG |
386 | | /*.allocated_tensors = */ {{0}}, |
387 | | #endif |
388 | 0 | }; |
389 | |
|
390 | 0 | ggml_dyn_tallocr_reset(alloc); |
391 | |
|
392 | 0 | return alloc; |
393 | 0 | } |
394 | | |
395 | 0 | static void ggml_dyn_tallocr_free(struct ggml_dyn_tallocr * alloc) { |
396 | 0 | for (int i = 0; i < alloc->n_chunks; ++i) { |
397 | 0 | free(alloc->chunks[i]); |
398 | 0 | } |
399 | 0 | free(alloc); |
400 | 0 | } |
401 | | |
402 | 0 | static size_t ggml_dyn_tallocr_max_size(struct ggml_dyn_tallocr * alloc, int chunk) { |
403 | 0 | return chunk < alloc->n_chunks ? alloc->chunks[chunk]->max_size : 0; |
404 | 0 | } |
405 | | |
406 | | |
407 | | // virtual buffer with contiguous memory range, split into multiple backend buffers (chunks) |
408 | | |
409 | | struct vbuffer { |
410 | | ggml_backend_buffer_t chunks[GGML_VBUFFER_MAX_CHUNKS]; |
411 | | }; |
412 | | |
413 | 0 | static void ggml_vbuffer_free(struct vbuffer * buf) { |
414 | 0 | if (buf == NULL) { |
415 | 0 | return; |
416 | 0 | } |
417 | 0 | for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; ++i) { |
418 | 0 | ggml_backend_buffer_free(buf->chunks[i]); |
419 | 0 | } |
420 | 0 | free(buf); |
421 | 0 | } |
422 | | |
423 | 0 | static size_t ggml_vbuffer_chunk_size(struct vbuffer * buf, int chunk) { |
424 | 0 | return buf->chunks[chunk] ? ggml_backend_buffer_get_size(buf->chunks[chunk]) : 0; |
425 | 0 | } |
426 | | |
427 | 0 | static size_t ggml_vbuffer_size(struct vbuffer * buf) { |
428 | 0 | size_t size = 0; |
429 | 0 | for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) { |
430 | 0 | size += ggml_backend_buffer_get_size(buf->chunks[i]); |
431 | 0 | } |
432 | 0 | return size; |
433 | 0 | } |
434 | | |
435 | 0 | static struct vbuffer * ggml_vbuffer_alloc(ggml_backend_buffer_type_t buft, const struct ggml_dyn_tallocr * talloc, enum ggml_backend_buffer_usage usage) { |
436 | 0 | struct vbuffer * buf = (struct vbuffer *)calloc(1, sizeof(struct vbuffer)); |
437 | 0 | if (buf == NULL) { |
438 | 0 | return NULL; |
439 | 0 | } |
440 | | |
441 | 0 | for (int n = 0; n < talloc->n_chunks; n++) { |
442 | 0 | size_t chunk_size = talloc->chunks[n]->max_size; |
443 | 0 | buf->chunks[n] = ggml_backend_buft_alloc_buffer(buft, chunk_size); |
444 | 0 | if (buf->chunks[n] == NULL) { |
445 | 0 | ggml_vbuffer_free(buf); |
446 | 0 | return NULL; |
447 | 0 | } |
448 | 0 | ggml_backend_buffer_set_usage(buf->chunks[n], usage); |
449 | 0 | } |
450 | 0 | return buf; |
451 | 0 | } |
452 | | |
453 | 0 | static void ggml_vbuffer_tensor_alloc(struct vbuffer * buf, struct ggml_tensor * tensor, struct buffer_address buf_addr) { |
454 | 0 | void * base = ggml_backend_buffer_get_base(buf->chunks[buf_addr.chunk]); |
455 | 0 | void * addr = (char *)base + buf_addr.offset; |
456 | 0 | ggml_backend_tensor_alloc(buf->chunks[buf_addr.chunk], tensor, addr); |
457 | 0 | } |
458 | | |
459 | 0 | static void ggml_vbuffer_reset(struct vbuffer * buf) { |
460 | 0 | for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) { |
461 | 0 | ggml_backend_buffer_reset(buf->chunks[i]); |
462 | 0 | } |
463 | 0 | } |
464 | | |
465 | | |
466 | | ///////////////////////////////////// |
467 | | |
468 | | // graph allocator |
469 | | |
470 | | struct hash_node { |
471 | | int n_children; |
472 | | int n_views; |
473 | | int buffer_id; |
474 | | struct buffer_address addr; |
475 | | bool allocated; |
476 | | }; |
477 | | |
478 | | struct tensor_alloc { |
479 | | int buffer_id; |
480 | | struct buffer_address addr; |
481 | | size_t size_max; // 0 = pre-allocated, unused, or view |
482 | | }; |
483 | | |
484 | | struct leaf_alloc { |
485 | | struct tensor_alloc leaf; |
486 | | }; |
487 | | |
488 | | struct node_alloc { |
489 | | struct tensor_alloc dst; |
490 | | struct tensor_alloc src[GGML_MAX_SRC]; |
491 | | }; |
492 | | |
493 | | struct ggml_gallocr { |
494 | | ggml_backend_buffer_type_t * bufts; // [n_buffers] |
495 | | struct vbuffer ** buffers; // [n_buffers] |
496 | | struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers] |
497 | | int n_buffers; |
498 | | |
499 | | struct ggml_hash_set hash_set; |
500 | | struct hash_node * hash_values; // [hash_set.size] |
501 | | |
502 | | struct node_alloc * node_allocs; // [n_nodes] |
503 | | int n_nodes; |
504 | | |
505 | | struct leaf_alloc * leaf_allocs; // [n_leafs] |
506 | | int n_leafs; |
507 | | }; |
508 | | |
509 | 0 | ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs) { |
510 | 0 | ggml_gallocr_t galloc = (ggml_gallocr_t)calloc(1, sizeof(struct ggml_gallocr)); |
511 | 0 | GGML_ASSERT(galloc != NULL); |
512 | |
|
513 | 0 | galloc->bufts = calloc(n_bufs, sizeof(ggml_backend_buffer_type_t)); |
514 | 0 | GGML_ASSERT(galloc->bufts != NULL); |
515 | |
|
516 | 0 | galloc->buffers = calloc(n_bufs, sizeof(struct vbuffer *)); |
517 | 0 | GGML_ASSERT(galloc->buffers != NULL); |
518 | |
|
519 | 0 | galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *)); |
520 | 0 | GGML_ASSERT(galloc->buf_tallocs != NULL); |
521 | |
|
522 | 0 | for (int i = 0; i < n_bufs; i++) { |
523 | 0 | galloc->bufts[i] = bufts[i]; |
524 | 0 | galloc->buffers[i] = NULL; |
525 | | |
526 | | // check if the same buffer type is used multiple times and reuse the same allocator |
527 | 0 | for (int j = 0; j < i; j++) { |
528 | 0 | if (bufts[i] == bufts[j]) { |
529 | 0 | galloc->buf_tallocs[i] = galloc->buf_tallocs[j]; |
530 | 0 | break; |
531 | 0 | } |
532 | 0 | } |
533 | |
|
534 | 0 | if (galloc->buf_tallocs[i] == NULL) { |
535 | 0 | size_t alignment = ggml_backend_buft_get_alignment(bufts[i]); |
536 | 0 | size_t max_size = ggml_backend_buft_get_max_size(bufts[i]); |
537 | 0 | galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment, max_size); |
538 | 0 | } |
539 | 0 | } |
540 | 0 | galloc->n_buffers = n_bufs; |
541 | |
|
542 | 0 | return galloc; |
543 | 0 | } |
544 | | |
545 | 0 | ggml_gallocr_t ggml_gallocr_new(ggml_backend_buffer_type_t buft) { |
546 | 0 | return ggml_gallocr_new_n(&buft, 1); |
547 | 0 | } |
548 | | |
549 | 0 | void ggml_gallocr_free(ggml_gallocr_t galloc) { |
550 | 0 | if (galloc == NULL) { |
551 | 0 | return; |
552 | 0 | } |
553 | | |
554 | 0 | for (int i = 0; i < galloc->n_buffers; i++) { |
555 | 0 | if (galloc->buffers != NULL) { |
556 | | // skip if already freed |
557 | 0 | bool freed = false; |
558 | 0 | for (int j = 0; j < i; j++) { |
559 | 0 | if (galloc->buffers[j] == galloc->buffers[i]) { |
560 | 0 | freed = true; |
561 | 0 | break; |
562 | 0 | } |
563 | 0 | } |
564 | 0 | if (!freed) { |
565 | 0 | ggml_vbuffer_free(galloc->buffers[i]); |
566 | 0 | } |
567 | 0 | } |
568 | 0 | if (galloc->buf_tallocs != NULL) { |
569 | | // skip if already freed |
570 | 0 | bool freed = false; |
571 | 0 | for (int j = 0; j < i; j++) { |
572 | 0 | if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) { |
573 | 0 | freed = true; |
574 | 0 | break; |
575 | 0 | } |
576 | 0 | } |
577 | 0 | if (!freed) { |
578 | 0 | ggml_dyn_tallocr_free(galloc->buf_tallocs[i]); |
579 | 0 | } |
580 | 0 | } |
581 | 0 | } |
582 | |
|
583 | 0 | ggml_hash_set_free(&galloc->hash_set); |
584 | 0 | free(galloc->hash_values); |
585 | 0 | free(galloc->bufts); |
586 | 0 | free(galloc->buffers); |
587 | 0 | free(galloc->buf_tallocs); |
588 | 0 | free(galloc->node_allocs); |
589 | 0 | free(galloc->leaf_allocs); |
590 | 0 | free(galloc); |
591 | 0 | } |
592 | | |
593 | | typedef struct ggml_gallocr * ggml_gallocr_t; |
594 | | |
595 | 0 | static struct hash_node * ggml_gallocr_hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) { |
596 | 0 | size_t i = ggml_hash_find_or_insert(&galloc->hash_set, t); |
597 | 0 | return &galloc->hash_values[i]; |
598 | 0 | } |
599 | | |
600 | 0 | static bool ggml_gallocr_is_own(ggml_gallocr_t galloc, struct ggml_tensor * t) { |
601 | 0 | return ggml_gallocr_hash_get(galloc, t)->allocated; |
602 | 0 | } |
603 | | |
604 | 0 | static bool ggml_gallocr_is_allocated(ggml_gallocr_t galloc, struct ggml_tensor * t) { |
605 | 0 | return t->data != NULL || ggml_gallocr_hash_get(galloc, t)->allocated; |
606 | 0 | } |
607 | | |
608 | | // free the extra space at the end if the new tensor is smaller |
609 | 0 | static void ggml_gallocr_free_extra_space(ggml_gallocr_t galloc, struct ggml_tensor * node, struct ggml_tensor * parent) { |
610 | 0 | struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); |
611 | 0 | struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); |
612 | |
|
613 | 0 | size_t parent_size = ggml_backend_buft_get_alloc_size(galloc->bufts[p_hn->buffer_id], parent); |
614 | 0 | size_t node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node); |
615 | |
|
616 | 0 | GGML_ASSERT(parent_size >= node_size); |
617 | |
|
618 | 0 | if (parent_size > node_size) { |
619 | 0 | struct ggml_dyn_tallocr * p_alloc = galloc->buf_tallocs[p_hn->buffer_id]; |
620 | 0 | struct buffer_address p_addr = p_hn->addr; |
621 | 0 | p_addr.offset += node_size; |
622 | 0 | size_t extra_size = parent_size - node_size; |
623 | 0 | AT_PRINTF("freeing extra %zu bytes from parent %s for %s\n", extra_size, parent->name, node->name); |
624 | 0 | ggml_dyn_tallocr_free_tensor(p_alloc, p_addr, extra_size, parent); |
625 | 0 | } |
626 | 0 | } |
627 | | |
628 | 0 | static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) { |
629 | 0 | GGML_ASSERT(buffer_id >= 0); |
630 | 0 | struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); |
631 | |
|
632 | 0 | if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_is_view(node)) { |
633 | 0 | hn->allocated = true; |
634 | 0 | assert(hn->addr.offset == 0); |
635 | | |
636 | | // try to reuse a parent's buffer (inplace) |
637 | 0 | if (ggml_op_can_inplace(node->op)) { |
638 | 0 | for (int i = 0; i < GGML_MAX_SRC; i++) { |
639 | 0 | struct ggml_tensor * parent = node->src[i]; |
640 | 0 | if (parent == NULL) { |
641 | 0 | continue; |
642 | 0 | } |
643 | | |
644 | | // if the node's data is external, then we cannot re-use it |
645 | 0 | if (!ggml_gallocr_is_own(galloc, parent)) { |
646 | 0 | AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data); |
647 | 0 | continue; |
648 | 0 | } |
649 | | |
650 | | // outputs cannot be reused |
651 | 0 | if (parent->flags & GGML_TENSOR_FLAG_OUTPUT || (parent->view_src != NULL && parent->view_src->flags & GGML_TENSOR_FLAG_OUTPUT)) { |
652 | 0 | AT_PRINTF("not reusing parent %s for %s as it is an output\n", parent->name, node->name); |
653 | 0 | continue; |
654 | 0 | } |
655 | | |
656 | 0 | if (!ggml_are_same_layout(node, parent)) { |
657 | 0 | AT_PRINTF("not reusing parent %s for %s as layouts are different\n", parent->name, node->name); |
658 | 0 | continue; |
659 | 0 | } |
660 | | |
661 | 0 | struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); |
662 | 0 | if (p_hn->n_children == 1 && p_hn->n_views == 0) { |
663 | 0 | if (ggml_is_view(parent)) { |
664 | 0 | struct ggml_tensor * view_src = parent->view_src; |
665 | 0 | struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src); |
666 | 0 | if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) { |
667 | 0 | AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name); |
668 | 0 | assert(view_src_hn->addr.chunk == p_hn->addr.chunk && view_src_hn->addr.offset == p_hn->addr.offset); |
669 | 0 | hn->buffer_id = p_hn->buffer_id; |
670 | 0 | hn->addr = p_hn->addr; |
671 | 0 | p_hn->allocated = false; // avoid freeing the parent |
672 | 0 | view_src_hn->allocated = false; |
673 | 0 | ggml_gallocr_free_extra_space(galloc, node, view_src); |
674 | 0 | return; |
675 | 0 | } |
676 | 0 | } else { |
677 | 0 | AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name); |
678 | 0 | hn->buffer_id = p_hn->buffer_id; |
679 | 0 | hn->addr = p_hn->addr; |
680 | 0 | p_hn->allocated = false; // avoid freeing the parent |
681 | 0 | ggml_gallocr_free_extra_space(galloc, node, parent); |
682 | 0 | return; |
683 | 0 | } |
684 | 0 | } |
685 | 0 | } |
686 | 0 | } |
687 | | // allocate tensor from the buffer |
688 | 0 | struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id]; |
689 | 0 | ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id]; |
690 | 0 | size_t size = ggml_backend_buft_get_alloc_size(buft, node); |
691 | 0 | hn->buffer_id = buffer_id; |
692 | 0 | hn->addr = ggml_dyn_tallocr_alloc(alloc, size, node); |
693 | 0 | } |
694 | 0 | } |
695 | | |
696 | 0 | static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) { |
697 | | // graph outputs are never freed |
698 | 0 | if (node->flags & GGML_TENSOR_FLAG_OUTPUT) { |
699 | 0 | AT_PRINTF("not freeing output %s\n", node->name); |
700 | 0 | return; |
701 | 0 | } |
702 | | |
703 | 0 | struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); |
704 | 0 | int buffer_id = hn->buffer_id; |
705 | 0 | struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id]; |
706 | 0 | ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id]; |
707 | 0 | size_t size = ggml_backend_buft_get_alloc_size(buft, node); |
708 | 0 | ggml_dyn_tallocr_free_tensor(alloc, hn->addr, size, node); |
709 | 0 | hn->allocated = false; |
710 | 0 | } |
711 | | |
712 | 0 | static int get_node_buffer_id(const int * node_buffer_ids, int i) { |
713 | 0 | return node_buffer_ids ? node_buffer_ids[i] : 0; |
714 | 0 | } |
715 | | |
716 | 0 | static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { |
717 | | // clear hash tables |
718 | 0 | ggml_hash_set_reset(&galloc->hash_set); |
719 | 0 | memset(galloc->hash_values, 0, sizeof(struct hash_node) * galloc->hash_set.size); |
720 | | |
721 | | // allocate leafs |
722 | | // these may be tensors that the application is not using in the graph, but may still want to allocate for other purposes |
723 | 0 | for (int i = 0; i < graph->n_leafs; i++) { |
724 | 0 | struct ggml_tensor * leaf = graph->leafs[i]; |
725 | 0 | ggml_gallocr_allocate_node(galloc, leaf, get_node_buffer_id(leaf_buffer_ids, i)); |
726 | 0 | } |
727 | | |
728 | | // count number of children and views |
729 | | // allocate other graph inputs and leafs first to avoid overwriting them |
730 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
731 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
732 | | |
733 | | // TODO: better way to add external dependencies |
734 | | // GGML_OP_NONE does not appear normally in the graph nodes, but is used by ggml-backend to add dependencies to |
735 | | // control when some tensors are allocated and freed. in this case, the dependencies are in `src`, but the node |
736 | | // itself is never used and should not be considered a dependency |
737 | 0 | if (ggml_is_view(node) && node->op != GGML_OP_NONE) { |
738 | 0 | struct ggml_tensor * view_src = node->view_src; |
739 | 0 | ggml_gallocr_hash_get(galloc, view_src)->n_views += 1; |
740 | 0 | } |
741 | |
|
742 | 0 | if (node->flags & GGML_TENSOR_FLAG_INPUT) { |
743 | 0 | ggml_gallocr_allocate_node(galloc, graph->nodes[i], get_node_buffer_id(node_buffer_ids, i)); |
744 | 0 | } |
745 | |
|
746 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
747 | 0 | struct ggml_tensor * src = node->src[j]; |
748 | 0 | if (src == NULL) { |
749 | 0 | continue; |
750 | 0 | } |
751 | | |
752 | 0 | ggml_gallocr_hash_get(galloc, src)->n_children += 1; |
753 | | |
754 | | // allocate explicit inputs |
755 | 0 | if (src->flags & GGML_TENSOR_FLAG_INPUT) { |
756 | 0 | ggml_gallocr_allocate_node(galloc, src, get_node_buffer_id(node_buffer_ids, i)); |
757 | 0 | } |
758 | 0 | } |
759 | 0 | } |
760 | | |
761 | | // allocate tensors |
762 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
763 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
764 | 0 | int buffer_id = get_node_buffer_id(node_buffer_ids, i); |
765 | | |
766 | | // allocate parents (only leafs need to be allocated at this point) |
767 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
768 | 0 | struct ggml_tensor * parent = node->src[j]; |
769 | 0 | if (parent == NULL) { |
770 | 0 | continue; |
771 | 0 | } |
772 | 0 | ggml_gallocr_allocate_node(galloc, parent, buffer_id); |
773 | 0 | } |
774 | | |
775 | | // allocate node |
776 | 0 | ggml_gallocr_allocate_node(galloc, node, buffer_id); |
777 | |
|
778 | 0 | AT_PRINTF("exec: %s (%s) <= ", ggml_op_desc(node), node->name); |
779 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
780 | 0 | struct ggml_tensor * parent = node->src[j]; |
781 | 0 | if (parent == NULL) { |
782 | 0 | continue; |
783 | 0 | } |
784 | 0 | AT_PRINTF("%s", parent->name); |
785 | 0 | if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) { |
786 | 0 | AT_PRINTF(", "); |
787 | 0 | } |
788 | 0 | } |
789 | 0 | AT_PRINTF("\n"); |
790 | | |
791 | | // update parents |
792 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
793 | 0 | struct ggml_tensor * parent = node->src[j]; |
794 | 0 | if (parent == NULL) { |
795 | 0 | continue; |
796 | 0 | } |
797 | 0 | struct hash_node * p_hn = ggml_gallocr_hash_get(galloc, parent); |
798 | 0 | p_hn->n_children -= 1; |
799 | |
|
800 | 0 | AT_PRINTF("parent %s: %d children, %d views, allocated: %d\n", |
801 | 0 | parent->name, p_hn->n_children, p_hn->n_views, p_hn->allocated); |
802 | |
|
803 | 0 | if (p_hn->n_children == 0 && p_hn->n_views == 0) { |
804 | 0 | if (ggml_is_view(parent)) { |
805 | 0 | struct ggml_tensor * view_src = parent->view_src; |
806 | 0 | struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src); |
807 | 0 | view_src_hn->n_views -= 1; |
808 | 0 | AT_PRINTF("view_src %s: %d children, %d views\n", |
809 | 0 | view_src->name, view_src_hn->n_children, view_src_hn->n_views); |
810 | 0 | if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) { |
811 | 0 | ggml_gallocr_free_node(galloc, view_src); |
812 | 0 | } |
813 | 0 | } |
814 | 0 | else if (p_hn->allocated) { |
815 | 0 | ggml_gallocr_free_node(galloc, parent); |
816 | 0 | } |
817 | 0 | } |
818 | 0 | AT_PRINTF("\n"); |
819 | 0 | } |
820 | 0 | } |
821 | 0 | } |
822 | | |
823 | 0 | bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, const int * node_buffer_ids, const int * leaf_buffer_ids) { |
824 | 0 | size_t min_hash_size = graph->n_nodes + graph->n_leafs; |
825 | | // add 25% margin to avoid hash collisions |
826 | 0 | min_hash_size += min_hash_size / 4; |
827 | | |
828 | | // initialize hash table |
829 | 0 | if (galloc->hash_set.size < min_hash_size) { |
830 | 0 | ggml_hash_set_free(&galloc->hash_set); |
831 | 0 | galloc->hash_set = ggml_hash_set_new(min_hash_size); |
832 | 0 | GGML_ASSERT(galloc->hash_set.keys != NULL); |
833 | |
|
834 | 0 | free(galloc->hash_values); |
835 | 0 | galloc->hash_values = malloc(sizeof(struct hash_node) * galloc->hash_set.size); |
836 | 0 | GGML_ASSERT(galloc->hash_values != NULL); |
837 | 0 | } |
838 | | |
839 | | // reset allocators |
840 | 0 | for (int i = 0; i < galloc->n_buffers; i++) { |
841 | 0 | ggml_dyn_tallocr_reset(galloc->buf_tallocs[i]); |
842 | 0 | } |
843 | | |
844 | | // allocate in hash table |
845 | 0 | ggml_gallocr_alloc_graph_impl(galloc, graph, node_buffer_ids, leaf_buffer_ids); |
846 | | |
847 | | // set the node_allocs from the hash table |
848 | 0 | if (galloc->n_nodes < graph->n_nodes) { |
849 | 0 | free(galloc->node_allocs); |
850 | 0 | galloc->node_allocs = calloc(graph->n_nodes, sizeof(struct node_alloc)); |
851 | 0 | GGML_ASSERT(galloc->node_allocs != NULL); |
852 | 0 | } |
853 | 0 | galloc->n_nodes = graph->n_nodes; |
854 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
855 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
856 | 0 | struct node_alloc * node_alloc = &galloc->node_allocs[i]; |
857 | 0 | if (node->view_src || node->data) { |
858 | 0 | node_alloc->dst.buffer_id = -1; |
859 | 0 | node_alloc->dst.addr = GGML_BUFFER_ADDRESS_INVALID; |
860 | 0 | node_alloc->dst.size_max = 0; |
861 | 0 | } else { |
862 | 0 | struct hash_node * hn = ggml_gallocr_hash_get(galloc, node); |
863 | 0 | node_alloc->dst.buffer_id = hn->buffer_id; |
864 | 0 | node_alloc->dst.addr = hn->addr; |
865 | 0 | node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node); |
866 | 0 | } |
867 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
868 | 0 | struct ggml_tensor * src = node->src[j]; |
869 | 0 | if (!src || src->view_src || src->data) { |
870 | 0 | node_alloc->src[j].buffer_id = -1; |
871 | 0 | node_alloc->src[j].addr = GGML_BUFFER_ADDRESS_INVALID; |
872 | 0 | node_alloc->src[j].size_max = 0; |
873 | 0 | } else { |
874 | 0 | struct hash_node * hn = ggml_gallocr_hash_get(galloc, src); |
875 | 0 | node_alloc->src[j].buffer_id = hn->buffer_id; |
876 | 0 | node_alloc->src[j].addr = hn->addr; |
877 | 0 | node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src); |
878 | 0 | } |
879 | 0 | } |
880 | 0 | } |
881 | 0 | if (galloc->n_leafs < graph->n_leafs) { |
882 | 0 | free(galloc->leaf_allocs); |
883 | 0 | galloc->leaf_allocs = calloc(graph->n_leafs, sizeof(galloc->leaf_allocs[0])); |
884 | 0 | GGML_ASSERT(galloc->leaf_allocs != NULL); |
885 | 0 | } |
886 | 0 | galloc->n_leafs = graph->n_leafs; |
887 | 0 | for (int i = 0; i < graph->n_leafs; i++) { |
888 | 0 | struct ggml_tensor * leaf = graph->leafs[i]; |
889 | 0 | struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf); |
890 | 0 | if (leaf->view_src || leaf->data) { |
891 | 0 | galloc->leaf_allocs[i].leaf.buffer_id = -1; |
892 | 0 | galloc->leaf_allocs[i].leaf.addr = GGML_BUFFER_ADDRESS_INVALID; |
893 | 0 | galloc->leaf_allocs[i].leaf.size_max = 0; |
894 | 0 | } else { |
895 | 0 | galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id; |
896 | 0 | galloc->leaf_allocs[i].leaf.addr = hn->addr; |
897 | 0 | galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf); |
898 | 0 | } |
899 | 0 | } |
900 | | |
901 | | // reallocate buffers if needed |
902 | 0 | for (int i = 0; i < galloc->n_buffers; i++) { |
903 | | // if the buffer type is used multiple times, we reuse the same buffer |
904 | 0 | for (int j = 0; j < i; j++) { |
905 | 0 | if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) { |
906 | 0 | galloc->buffers[i] = galloc->buffers[j]; |
907 | 0 | break; |
908 | 0 | } |
909 | 0 | } |
910 | | |
911 | | // even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views |
912 | 0 | bool realloc = galloc->buffers[i] == NULL; |
913 | 0 | size_t new_size = 0; |
914 | 0 | for (int c = 0; c < galloc->buf_tallocs[i]->n_chunks; c++) { |
915 | 0 | size_t cur_chunk_size = galloc->buffers[i] ? ggml_vbuffer_chunk_size(galloc->buffers[i], c) : 0; |
916 | 0 | size_t new_chunk_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i], c); |
917 | 0 | new_size += new_chunk_size; |
918 | 0 | if (new_chunk_size > cur_chunk_size) { |
919 | 0 | realloc = true; |
920 | 0 | } |
921 | 0 | } |
922 | 0 | if (realloc) { |
923 | | #ifndef NDEBUG |
924 | | size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0; |
925 | | GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); |
926 | | #endif |
927 | |
|
928 | 0 | ggml_vbuffer_free(galloc->buffers[i]); |
929 | 0 | galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE); |
930 | 0 | if (galloc->buffers[i] == NULL) { |
931 | 0 | GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size); |
932 | 0 | return false; |
933 | 0 | } |
934 | 0 | } |
935 | 0 | } |
936 | | |
937 | 0 | return true; |
938 | 0 | } |
939 | | |
940 | 0 | bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) { |
941 | 0 | return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL); |
942 | 0 | } |
943 | | |
944 | 0 | static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) { |
945 | 0 | int buffer_id = tensor_alloc->buffer_id; |
946 | 0 | assert(tensor->data || tensor->view_src || ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max); |
947 | |
|
948 | 0 | if (tensor->view_src != NULL) { |
949 | 0 | if (tensor->buffer == NULL) { |
950 | 0 | assert(tensor_alloc->addr.offset == SIZE_MAX); |
951 | 0 | if (tensor->view_src->buffer == NULL) { |
952 | | // this tensor was allocated without ggml-backend |
953 | 0 | return; |
954 | 0 | } |
955 | 0 | ggml_backend_view_init(tensor); |
956 | 0 | } |
957 | 0 | } else { |
958 | 0 | if (tensor->data == NULL) { |
959 | 0 | assert(tensor_alloc->addr.offset != SIZE_MAX); |
960 | 0 | assert(ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max); |
961 | 0 | ggml_vbuffer_tensor_alloc(galloc->buffers[buffer_id], tensor, tensor_alloc->addr); |
962 | 0 | } else { |
963 | 0 | if (tensor->buffer == NULL) { |
964 | | // this tensor was allocated without ggml-backend |
965 | 0 | return; |
966 | 0 | } |
967 | 0 | } |
968 | 0 | } |
969 | 0 | } |
970 | | |
971 | 0 | static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) { |
972 | 0 | size_t node_size = 0; |
973 | 0 | if (!node->data && !node->view_src) { |
974 | | // If we previously had data but don't now then reallocate |
975 | 0 | if (talloc->buffer_id < 0) { |
976 | 0 | return false; |
977 | 0 | } |
978 | 0 | node_size = ggml_backend_buft_get_alloc_size(galloc->bufts[talloc->buffer_id], node); |
979 | 0 | } |
980 | 0 | return talloc->size_max >= node_size; |
981 | 0 | } |
982 | | |
983 | 0 | static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph * graph) { |
984 | 0 | if (galloc->n_nodes != graph->n_nodes) { |
985 | | #ifndef NDEBUG |
986 | | GGML_LOG_DEBUG("%s: graph has different number of nodes\n", __func__); |
987 | | #endif |
988 | 0 | return true; |
989 | 0 | } |
990 | | |
991 | 0 | if (galloc->n_leafs != graph->n_leafs) { |
992 | | #ifndef NDEBUG |
993 | | GGML_LOG_DEBUG("%s: graph has different number of leafs\n", __func__); |
994 | | #endif |
995 | 0 | return true; |
996 | 0 | } |
997 | | |
998 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
999 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1000 | 0 | struct node_alloc * node_alloc = &galloc->node_allocs[i]; |
1001 | |
|
1002 | 0 | if (!ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) { |
1003 | | #ifndef NDEBUG |
1004 | | GGML_LOG_DEBUG("%s: node %s is not valid\n", __func__, node->name); |
1005 | | #endif |
1006 | 0 | return true; |
1007 | 0 | } |
1008 | | |
1009 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1010 | 0 | struct ggml_tensor * src = node->src[j]; |
1011 | 0 | if (src == NULL) { |
1012 | 0 | continue; |
1013 | 0 | } |
1014 | 0 | if (!ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) { |
1015 | | #ifndef NDEBUG |
1016 | | GGML_LOG_DEBUG("%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name); |
1017 | | #endif |
1018 | 0 | return true; |
1019 | 0 | } |
1020 | 0 | } |
1021 | 0 | } |
1022 | | |
1023 | 0 | return false; |
1024 | 0 | } |
1025 | | |
1026 | 0 | bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph) { |
1027 | 0 | if (ggml_gallocr_needs_realloc(galloc, graph)) { |
1028 | 0 | if (galloc->n_buffers == 1) { |
1029 | | #ifndef NDEBUG |
1030 | | GGML_LOG_DEBUG("%s: reallocating buffers automatically\n", __func__); |
1031 | | #endif |
1032 | 0 | if (!ggml_gallocr_reserve(galloc, graph)) { |
1033 | 0 | return false; |
1034 | 0 | } |
1035 | 0 | } else { |
1036 | | #ifndef NDEBUG |
1037 | | GGML_LOG_DEBUG("%s: cannot reallocate multi buffer graph automatically, call reserve\n", __func__); |
1038 | | #endif |
1039 | 0 | return false; |
1040 | 0 | } |
1041 | 0 | } |
1042 | | |
1043 | | // reset buffers |
1044 | 0 | for (int i = 0; i < galloc->n_buffers; i++) { |
1045 | 0 | if (galloc->buffers[i] != NULL) { |
1046 | 0 | ggml_vbuffer_reset(galloc->buffers[i]); |
1047 | 0 | } |
1048 | 0 | } |
1049 | | |
1050 | | // allocate the graph tensors from the previous assignments |
1051 | | // leafs |
1052 | 0 | for (int i = 0; i < graph->n_leafs; i++) { |
1053 | 0 | struct ggml_tensor * leaf = graph->leafs[i]; |
1054 | 0 | struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i]; |
1055 | 0 | ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf); |
1056 | 0 | } |
1057 | | // nodes |
1058 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
1059 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1060 | 0 | struct node_alloc * node_alloc = &galloc->node_allocs[i]; |
1061 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1062 | 0 | struct ggml_tensor * src = node->src[j]; |
1063 | 0 | if (src == NULL) { |
1064 | 0 | continue; |
1065 | 0 | } |
1066 | 0 | ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]); |
1067 | 0 | } |
1068 | 0 | ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst); |
1069 | 0 | } |
1070 | |
|
1071 | 0 | return true; |
1072 | 0 | } |
1073 | | |
1074 | 0 | size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) { |
1075 | 0 | GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers); |
1076 | |
|
1077 | 0 | if (galloc->buffers[buffer_id] == NULL) { |
1078 | 0 | return 0; |
1079 | 0 | } |
1080 | | |
1081 | 0 | for (int i = 0; i < buffer_id; i++) { |
1082 | 0 | if (galloc->buffers[i] == galloc->buffers[buffer_id]) { |
1083 | | // this buffer is the same as a previous one due to the same buffer type being used multiple times |
1084 | | // only return the buffer size the first time it appears to avoid double counting |
1085 | 0 | return 0; |
1086 | 0 | } |
1087 | 0 | } |
1088 | | |
1089 | 0 | return ggml_vbuffer_size(galloc->buffers[buffer_id]); |
1090 | 0 | } |
1091 | | |
1092 | | // utils |
1093 | | |
1094 | 0 | static void free_buffers(ggml_backend_buffer_t ** buffers, const size_t * n_buffers) { |
1095 | 0 | for (size_t i = 0; i < *n_buffers; i++) { |
1096 | 0 | ggml_backend_buffer_free((*buffers)[i]); |
1097 | 0 | } |
1098 | 0 | free(*buffers); |
1099 | 0 | } |
1100 | | |
1101 | | static bool alloc_tensor_range(struct ggml_context * ctx, |
1102 | | struct ggml_tensor * first, struct ggml_tensor * last, |
1103 | | ggml_backend_buffer_type_t buft, size_t size, |
1104 | 0 | ggml_backend_buffer_t ** buffers, size_t * n_buffers) { |
1105 | |
|
1106 | 0 | ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size); |
1107 | 0 | if (buffer == NULL) { |
1108 | 0 | GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(buft), size); |
1109 | 0 | free_buffers(buffers, n_buffers); |
1110 | 0 | return false; |
1111 | 0 | } |
1112 | | |
1113 | 0 | *buffers = realloc(*buffers, sizeof(ggml_backend_buffer_t) * (*n_buffers + 1)); |
1114 | 0 | (*buffers)[(*n_buffers)++] = buffer; |
1115 | |
|
1116 | 0 | struct ggml_tallocr tallocr = ggml_tallocr_new(buffer); |
1117 | |
|
1118 | 0 | for (struct ggml_tensor * t = first; t != last; t = ggml_get_next_tensor(ctx, t)) { |
1119 | 0 | enum ggml_status status = GGML_STATUS_SUCCESS; |
1120 | 0 | if (t->data == NULL) { |
1121 | 0 | if (t->view_src == NULL) { |
1122 | 0 | status = ggml_tallocr_alloc(&tallocr, t); |
1123 | 0 | } else if (t->buffer == NULL) { |
1124 | 0 | status = ggml_backend_view_init(t); |
1125 | 0 | } |
1126 | 0 | } else { |
1127 | 0 | if (t->view_src != NULL && t->buffer == NULL) { |
1128 | | // view of a pre-allocated tensor |
1129 | 0 | status = ggml_backend_view_init(t); |
1130 | 0 | } |
1131 | 0 | } |
1132 | 0 | if (status != GGML_STATUS_SUCCESS) { |
1133 | 0 | GGML_LOG_ERROR("%s: failed to initialize tensor %s\n", __func__, t->name); |
1134 | 0 | free_buffers(buffers, n_buffers); |
1135 | 0 | return false; |
1136 | 0 | } |
1137 | 0 | } |
1138 | | |
1139 | 0 | return true; |
1140 | 0 | } |
1141 | | |
1142 | 0 | ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft) { |
1143 | 0 | GGML_ASSERT(ggml_get_no_alloc(ctx) == true); |
1144 | |
|
1145 | 0 | size_t alignment = ggml_backend_buft_get_alignment(buft); |
1146 | 0 | size_t max_size = ggml_backend_buft_get_max_size(buft); |
1147 | |
|
1148 | 0 | ggml_backend_buffer_t * buffers = NULL; |
1149 | 0 | size_t n_buffers = 0; |
1150 | |
|
1151 | 0 | size_t cur_buf_size = 0; |
1152 | 0 | struct ggml_tensor * first = ggml_get_first_tensor(ctx); |
1153 | 0 | for (struct ggml_tensor * t = first; t != NULL; t = ggml_get_next_tensor(ctx, t)) { |
1154 | 0 | size_t this_size = 0; |
1155 | 0 | if (t->data == NULL && t->view_src == NULL) { |
1156 | 0 | this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment); |
1157 | 0 | } |
1158 | |
|
1159 | 0 | if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) { |
1160 | | // allocate tensors in the current buffer |
1161 | 0 | if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) { |
1162 | 0 | return NULL; |
1163 | 0 | } |
1164 | 0 | first = t; |
1165 | 0 | cur_buf_size = this_size; |
1166 | 0 | } else { |
1167 | 0 | cur_buf_size += this_size; |
1168 | 0 | } |
1169 | 0 | } |
1170 | | |
1171 | | // allocate remaining tensors |
1172 | 0 | if (cur_buf_size > 0) { |
1173 | 0 | if (!alloc_tensor_range(ctx, first, NULL, buft, cur_buf_size, &buffers, &n_buffers)) { |
1174 | 0 | return NULL; |
1175 | 0 | } |
1176 | 0 | } |
1177 | | |
1178 | 0 | if (n_buffers == 0) { |
1179 | | #ifndef NDEBUG |
1180 | | GGML_LOG_DEBUG("%s: all tensors in the context are already allocated\n", __func__); |
1181 | | #endif |
1182 | 0 | return NULL; |
1183 | 0 | } |
1184 | | |
1185 | 0 | ggml_backend_buffer_t buffer; |
1186 | 0 | if (n_buffers == 1) { |
1187 | 0 | buffer = buffers[0]; |
1188 | 0 | } else { |
1189 | 0 | buffer = ggml_backend_multi_buffer_alloc_buffer(buffers, n_buffers); |
1190 | 0 | } |
1191 | 0 | free(buffers); |
1192 | 0 | return buffer; |
1193 | 0 | } |
1194 | | |
1195 | 0 | ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors(struct ggml_context * ctx, ggml_backend_t backend) { |
1196 | 0 | return ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_get_default_buffer_type(backend)); |
1197 | 0 | } |