/src/llama.cpp/ggml/src/ggml-backend.cpp
Line | Count | Source |
1 | | // Note: porting this file to C++ is a work in progress |
2 | | |
3 | | #ifdef _WIN32 |
4 | | #define WIN32_LEAN_AND_MEAN |
5 | | #ifndef NOMINMAX |
6 | | # define NOMINMAX |
7 | | #endif |
8 | | #include <windows.h> |
9 | | #endif |
10 | | |
11 | | #include "ggml-backend.h" |
12 | | #include "ggml-backend-impl.h" |
13 | | #include "ggml-alloc.h" |
14 | | #include "ggml-impl.h" |
15 | | |
16 | | #include <assert.h> |
17 | | #include <limits.h> |
18 | | #include <stdarg.h> |
19 | | #include <stdio.h> |
20 | | #include <stdlib.h> |
21 | | #include <string.h> |
22 | | #include <algorithm> |
23 | | #include <vector> |
24 | | |
25 | | #ifdef __APPLE__ |
26 | | #include <sys/types.h> |
27 | | #include <sys/sysctl.h> |
28 | | #endif |
29 | | |
30 | | |
31 | | // backend buffer type |
32 | | |
33 | 0 | const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) { |
34 | 0 | GGML_ASSERT(buft); |
35 | 0 | return buft->iface.get_name(buft); |
36 | 0 | } |
37 | | |
38 | 0 | ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { |
39 | 0 | GGML_ASSERT(buft); |
40 | 0 | if (size == 0) { |
41 | | // return a dummy buffer for zero-sized allocations |
42 | 0 | return ggml_backend_buffer_init(buft, {}, NULL, 0); |
43 | 0 | } |
44 | 0 | return buft->iface.alloc_buffer(buft, size); |
45 | 0 | } |
46 | | |
47 | 0 | size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) { |
48 | 0 | GGML_ASSERT(buft); |
49 | 0 | return buft->iface.get_alignment(buft); |
50 | 0 | } |
51 | | |
52 | 0 | size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) { |
53 | 0 | GGML_ASSERT(buft); |
54 | | // get_max_size is optional, defaults to SIZE_MAX |
55 | 0 | if (buft->iface.get_max_size) { |
56 | 0 | return buft->iface.get_max_size(buft); |
57 | 0 | } |
58 | 0 | return SIZE_MAX; |
59 | 0 | } |
60 | | |
61 | 0 | size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) { |
62 | 0 | GGML_ASSERT(buft); |
63 | | // get_alloc_size is optional, defaults to ggml_nbytes |
64 | 0 | if (buft->iface.get_alloc_size) { |
65 | 0 | size_t size = buft->iface.get_alloc_size(buft, tensor); |
66 | 0 | assert(size >= ggml_nbytes(tensor)); |
67 | 0 | return size; |
68 | 0 | } |
69 | 0 | return ggml_nbytes(tensor); |
70 | 0 | } |
71 | | |
72 | 0 | bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) { |
73 | 0 | GGML_ASSERT(buft); |
74 | 0 | if (buft->iface.is_host) { |
75 | 0 | return buft->iface.is_host(buft); |
76 | 0 | } |
77 | 0 | return false; |
78 | 0 | } |
79 | | |
80 | 0 | ggml_backend_dev_t ggml_backend_buft_get_device(ggml_backend_buffer_type_t buft) { |
81 | 0 | GGML_ASSERT(buft); |
82 | 0 | return buft->device; |
83 | 0 | } |
84 | | |
85 | | // backend buffer |
86 | | |
87 | | ggml_backend_buffer_t ggml_backend_buffer_init( |
88 | | ggml_backend_buffer_type_t buft, |
89 | | struct ggml_backend_buffer_i iface, |
90 | | void * context, |
91 | 0 | size_t size) { |
92 | 0 | ggml_backend_buffer_t buffer = new ggml_backend_buffer { |
93 | 0 | /* .interface = */ iface, |
94 | 0 | /* .buft = */ buft, |
95 | 0 | /* .context = */ context, |
96 | 0 | /* .size = */ size, |
97 | 0 | /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY |
98 | 0 | }; |
99 | |
|
100 | 0 | return buffer; |
101 | 0 | } |
102 | | |
103 | 0 | const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) { |
104 | 0 | return ggml_backend_buft_name(ggml_backend_buffer_get_type(buffer)); |
105 | 0 | } |
106 | | |
107 | 0 | void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { |
108 | 0 | if (buffer == NULL) { |
109 | 0 | return; |
110 | 0 | } |
111 | | |
112 | 0 | if (buffer->iface.free_buffer != NULL) { |
113 | 0 | buffer->iface.free_buffer(buffer); |
114 | 0 | } |
115 | 0 | delete buffer; |
116 | 0 | } |
117 | | |
118 | 0 | size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { |
119 | 0 | GGML_ASSERT(buffer); |
120 | 0 | return buffer->size; |
121 | 0 | } |
122 | | |
123 | 0 | void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) { |
124 | 0 | GGML_ASSERT(buffer); |
125 | | // get_base is optional if the buffer is zero-sized |
126 | 0 | if (buffer->size == 0) { |
127 | 0 | return NULL; |
128 | 0 | } |
129 | | |
130 | | // FIXME JG: a multi_buffer has a non-zero size, according to the above comment get_base is not optional, |
131 | | // I don't know whether the above comment is correct |
132 | 0 | if (!buffer->iface.get_base) { |
133 | 0 | return NULL; |
134 | 0 | } |
135 | | |
136 | 0 | void * base = buffer->iface.get_base(buffer); |
137 | |
|
138 | 0 | GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL"); |
139 | |
|
140 | 0 | return base; |
141 | 0 | } |
142 | | |
143 | 0 | enum ggml_status ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { |
144 | 0 | GGML_ASSERT(buffer); |
145 | | // init_tensor is optional |
146 | 0 | if (buffer->iface.init_tensor) { |
147 | 0 | return buffer->iface.init_tensor(buffer, tensor); |
148 | 0 | } |
149 | 0 | return GGML_STATUS_SUCCESS; |
150 | 0 | } |
151 | | |
152 | 0 | void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { |
153 | 0 | GGML_ASSERT(buffer); |
154 | | // clear is optional if the buffer is zero-sized |
155 | 0 | if (buffer->size == 0) { |
156 | 0 | return; |
157 | 0 | } |
158 | | |
159 | 0 | buffer->iface.clear(buffer, value); |
160 | 0 | } |
161 | | |
162 | 0 | size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) { |
163 | 0 | return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer)); |
164 | 0 | } |
165 | | |
166 | 0 | size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) { |
167 | 0 | return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer)); |
168 | 0 | } |
169 | | |
170 | 0 | size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor) { |
171 | 0 | return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor); |
172 | 0 | } |
173 | | |
174 | 0 | bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) { |
175 | 0 | return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer)); |
176 | 0 | } |
177 | | |
178 | 0 | void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { |
179 | 0 | GGML_ASSERT(buffer); |
180 | 0 | buffer->usage = usage; |
181 | | |
182 | | // FIXME: add a generic callback to the buffer interface |
183 | 0 | if (ggml_backend_buffer_is_multi_buffer(buffer)) { |
184 | 0 | ggml_backend_multi_buffer_set_usage(buffer, usage); |
185 | 0 | } |
186 | 0 | } |
187 | | |
188 | 0 | enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage(ggml_backend_buffer_t buffer) { |
189 | 0 | GGML_ASSERT(buffer); |
190 | 0 | return buffer->usage; |
191 | 0 | } |
192 | | |
193 | 0 | ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) { |
194 | 0 | GGML_ASSERT(buffer); |
195 | 0 | return buffer->buft; |
196 | 0 | } |
197 | | |
198 | 0 | void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) { |
199 | 0 | GGML_ASSERT(buffer); |
200 | 0 | if (buffer->iface.reset) { |
201 | 0 | buffer->iface.reset(buffer); |
202 | 0 | } |
203 | 0 | } |
204 | | |
205 | 0 | bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) { |
206 | 0 | ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer; |
207 | 0 | if (dst_buf->iface.cpy_tensor) { |
208 | 0 | return dst_buf->iface.cpy_tensor(dst_buf, src, dst); |
209 | 0 | } |
210 | 0 | return false; |
211 | 0 | } |
212 | | |
213 | | // backend |
214 | | |
215 | 0 | ggml_guid_t ggml_backend_guid(ggml_backend_t backend) { |
216 | 0 | if (backend == NULL) { |
217 | 0 | return NULL; |
218 | 0 | } |
219 | 0 | return backend->guid; |
220 | 0 | } |
221 | | |
222 | 0 | const char * ggml_backend_name(ggml_backend_t backend) { |
223 | 0 | if (backend == NULL) { |
224 | 0 | return "NULL"; |
225 | 0 | } |
226 | 0 | return backend->iface.get_name(backend); |
227 | 0 | } |
228 | | |
229 | 0 | void ggml_backend_free(ggml_backend_t backend) { |
230 | 0 | if (backend == NULL) { |
231 | 0 | return; |
232 | 0 | } |
233 | | |
234 | 0 | backend->iface.free(backend); |
235 | 0 | } |
236 | | |
237 | 0 | ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend) { |
238 | 0 | GGML_ASSERT(backend); |
239 | 0 | return ggml_backend_dev_buffer_type(backend->device); |
240 | 0 | } |
241 | | |
242 | 0 | ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) { |
243 | 0 | return ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), size); |
244 | 0 | } |
245 | | |
246 | 0 | size_t ggml_backend_get_alignment(ggml_backend_t backend) { |
247 | 0 | return ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend)); |
248 | 0 | } |
249 | | |
250 | 0 | size_t ggml_backend_get_max_size(ggml_backend_t backend) { |
251 | 0 | return ggml_backend_buft_get_max_size(ggml_backend_get_default_buffer_type(backend)); |
252 | 0 | } |
253 | | |
254 | 0 | void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { |
255 | 0 | GGML_ASSERT(backend); |
256 | 0 | GGML_ASSERT(tensor); |
257 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
258 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
259 | |
|
260 | 0 | if (backend->iface.set_tensor_async == NULL) { |
261 | 0 | ggml_backend_tensor_set(tensor, data, offset, size); |
262 | 0 | } else { |
263 | 0 | backend->iface.set_tensor_async(backend, tensor, data, offset, size); |
264 | 0 | } |
265 | 0 | } |
266 | | |
267 | 0 | void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { |
268 | 0 | GGML_ASSERT(backend); |
269 | 0 | GGML_ASSERT(tensor); |
270 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
271 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); |
272 | |
|
273 | 0 | if (backend->iface.get_tensor_async == NULL) { |
274 | 0 | ggml_backend_tensor_get(tensor, data, offset, size); |
275 | 0 | } else { |
276 | 0 | backend->iface.get_tensor_async(backend, tensor, data, offset, size); |
277 | 0 | } |
278 | 0 | } |
279 | | |
280 | 0 | void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { |
281 | 0 | GGML_ASSERT(tensor); |
282 | 0 | ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
283 | |
|
284 | 0 | if (size == 0) { |
285 | 0 | return; |
286 | 0 | } |
287 | | |
288 | 0 | GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
289 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
290 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
291 | |
|
292 | 0 | buf->iface.set_tensor(buf, tensor, data, offset, size); |
293 | 0 | } |
294 | | |
295 | 0 | void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { |
296 | 0 | GGML_ASSERT(tensor); |
297 | 0 | ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
298 | |
|
299 | 0 | if (size == 0) { |
300 | 0 | return; |
301 | 0 | } |
302 | | |
303 | 0 | GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
304 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
305 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); |
306 | |
|
307 | 0 | buf->iface.get_tensor(buf, tensor, data, offset, size); |
308 | 0 | } |
309 | | |
310 | 0 | void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { |
311 | 0 | GGML_ASSERT(tensor); |
312 | 0 | ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
313 | |
|
314 | 0 | if (size == 0) { |
315 | 0 | return; |
316 | 0 | } |
317 | | |
318 | 0 | GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
319 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
320 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
321 | 0 | GGML_ASSERT(buf->iface.memset_tensor != NULL && "memset not implemented by backend buffer"); |
322 | |
|
323 | 0 | buf->iface.memset_tensor(buf, tensor, value, offset, size); |
324 | 0 | } |
325 | | |
326 | 0 | void ggml_backend_synchronize(ggml_backend_t backend) { |
327 | 0 | GGML_ASSERT(backend); |
328 | 0 | if (backend->iface.synchronize == NULL) { |
329 | 0 | return; |
330 | 0 | } |
331 | | |
332 | 0 | backend->iface.synchronize(backend); |
333 | 0 | } |
334 | | |
335 | 0 | ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
336 | 0 | GGML_ASSERT(backend); |
337 | 0 | GGML_ASSERT(backend->iface.graph_plan_create != NULL); |
338 | |
|
339 | 0 | return backend->iface.graph_plan_create(backend, cgraph); |
340 | 0 | } |
341 | | |
342 | 0 | void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { |
343 | 0 | GGML_ASSERT(backend); |
344 | 0 | GGML_ASSERT(backend->iface.graph_plan_free != NULL); |
345 | |
|
346 | 0 | backend->iface.graph_plan_free(backend, plan); |
347 | 0 | } |
348 | | |
349 | 0 | enum ggml_status ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { |
350 | 0 | GGML_ASSERT(backend); |
351 | 0 | GGML_ASSERT(backend->iface.graph_plan_compute != NULL); |
352 | |
|
353 | 0 | return backend->iface.graph_plan_compute(backend, plan); |
354 | 0 | } |
355 | | |
356 | 0 | enum ggml_status ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
357 | 0 | enum ggml_status err = ggml_backend_graph_compute_async(backend, cgraph); |
358 | 0 | ggml_backend_synchronize(backend); |
359 | 0 | return err; |
360 | 0 | } |
361 | | |
362 | 0 | enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
363 | 0 | GGML_ASSERT(backend); |
364 | 0 | return backend->iface.graph_compute(backend, cgraph); |
365 | 0 | } |
366 | | |
367 | 0 | bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { |
368 | 0 | GGML_ASSERT(backend); |
369 | 0 | return ggml_backend_dev_supports_op(backend->device, op); |
370 | 0 | } |
371 | | |
372 | 0 | bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) { |
373 | 0 | GGML_ASSERT(backend); |
374 | 0 | return ggml_backend_dev_supports_buft(backend->device, buft); |
375 | 0 | } |
376 | | |
377 | 0 | bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op) { |
378 | 0 | GGML_ASSERT(backend); |
379 | 0 | return ggml_backend_dev_offload_op(backend->device, op); |
380 | 0 | } |
381 | | |
382 | 0 | ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend) { |
383 | 0 | GGML_ASSERT(backend); |
384 | 0 | return backend->device; |
385 | 0 | } |
386 | | |
387 | | // backend copy |
388 | | |
389 | 0 | void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) { |
390 | 0 | GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); |
391 | |
|
392 | 0 | if (src == dst) { |
393 | 0 | return; |
394 | 0 | } |
395 | | |
396 | 0 | if (ggml_backend_buffer_is_host(src->buffer)) { |
397 | 0 | ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); |
398 | 0 | } else if (ggml_backend_buffer_is_host(dst->buffer)) { |
399 | 0 | ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); |
400 | 0 | } else if (!ggml_backend_buffer_copy_tensor(src, dst)) { |
401 | | #ifndef NDEBUG |
402 | | GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer)); |
403 | | #endif |
404 | 0 | size_t nbytes = ggml_nbytes(src); |
405 | 0 | void * data = malloc(nbytes); |
406 | 0 | ggml_backend_tensor_get(src, data, 0, nbytes); |
407 | 0 | ggml_backend_tensor_set(dst, data, 0, nbytes); |
408 | 0 | free(data); |
409 | 0 | } |
410 | 0 | } |
411 | | |
412 | 0 | void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst) { |
413 | 0 | GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); |
414 | |
|
415 | 0 | if (src == dst) { |
416 | 0 | return; |
417 | 0 | } |
418 | | |
419 | 0 | GGML_ASSERT(backend_dst); |
420 | 0 | if (backend_dst->iface.cpy_tensor_async != NULL) { |
421 | 0 | if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) { |
422 | 0 | return; |
423 | 0 | } |
424 | 0 | } |
425 | | |
426 | | // an async copy would normally happen after all the queued operations on both backends are completed |
427 | | // to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy |
428 | 0 | ggml_backend_synchronize(backend_src); |
429 | 0 | ggml_backend_synchronize(backend_dst); |
430 | 0 | ggml_backend_tensor_copy(src, dst); |
431 | 0 | } |
432 | | |
433 | | // events |
434 | | |
435 | 0 | ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device) { |
436 | | // null device is allowed for the transition period to the device interface |
437 | 0 | if (device == NULL || device->iface.event_new == NULL) { |
438 | 0 | return NULL; |
439 | 0 | } |
440 | 0 | return device->iface.event_new(device); |
441 | 0 | } |
442 | | |
443 | 0 | void ggml_backend_event_free(ggml_backend_event_t event) { |
444 | 0 | if (event == NULL) { |
445 | 0 | return; |
446 | 0 | } |
447 | 0 | event->device->iface.event_free(event->device, event); |
448 | 0 | } |
449 | | |
450 | 0 | void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend) { |
451 | 0 | GGML_ASSERT(backend); |
452 | 0 | GGML_ASSERT(backend->iface.event_record != NULL); |
453 | |
|
454 | 0 | backend->iface.event_record(backend, event); |
455 | 0 | } |
456 | | |
457 | 0 | void ggml_backend_event_synchronize(ggml_backend_event_t event) { |
458 | 0 | GGML_ASSERT(event); |
459 | 0 | GGML_ASSERT(event->device->iface.event_synchronize); |
460 | |
|
461 | 0 | event->device->iface.event_synchronize(event->device, event); |
462 | 0 | } |
463 | | |
464 | 0 | void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event) { |
465 | 0 | GGML_ASSERT(backend); |
466 | 0 | GGML_ASSERT(backend->iface.event_wait != NULL); |
467 | |
|
468 | 0 | backend->iface.event_wait(backend, event); |
469 | 0 | } |
470 | | |
471 | 0 | static void ggml_backend_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
472 | 0 | GGML_ASSERT(backend); |
473 | 0 | if (backend->iface.graph_optimize != NULL) { |
474 | 0 | backend->iface.graph_optimize(backend, cgraph); |
475 | 0 | } |
476 | 0 | } |
477 | | |
478 | | // Backend device |
479 | | |
480 | 0 | const char * ggml_backend_dev_name(ggml_backend_dev_t device) { |
481 | 0 | GGML_ASSERT(device); |
482 | 0 | return device->iface.get_name(device); |
483 | 0 | } |
484 | | |
485 | 0 | const char * ggml_backend_dev_description(ggml_backend_dev_t device) { |
486 | 0 | GGML_ASSERT(device); |
487 | 0 | return device->iface.get_description(device); |
488 | 0 | } |
489 | | |
490 | 0 | void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total) { |
491 | 0 | GGML_ASSERT(device); |
492 | 0 | device->iface.get_memory(device, free, total); |
493 | 0 | } |
494 | | |
495 | 319 | enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) { |
496 | 319 | GGML_ASSERT(device); |
497 | 319 | return device->iface.get_type(device); |
498 | 319 | } |
499 | | |
500 | 0 | void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) { |
501 | 0 | memset(props, 0, sizeof(*props)); |
502 | 0 | device->iface.get_props(device, props); |
503 | 0 | } |
504 | | |
505 | 0 | ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device) { |
506 | 0 | GGML_ASSERT(device); |
507 | 0 | return device->reg; |
508 | 0 | } |
509 | | |
510 | 0 | ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params) { |
511 | 0 | GGML_ASSERT(device); |
512 | 0 | return device->iface.init_backend(device, params); |
513 | 0 | } |
514 | | |
515 | 0 | ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device) { |
516 | 0 | GGML_ASSERT(device); |
517 | 0 | return device->iface.get_buffer_type(device); |
518 | 0 | } |
519 | | |
520 | 0 | ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) { |
521 | 0 | GGML_ASSERT(device); |
522 | 0 | if (device->iface.get_host_buffer_type == NULL) { |
523 | 0 | return NULL; |
524 | 0 | } |
525 | | |
526 | 0 | return device->iface.get_host_buffer_type(device); |
527 | 0 | } |
528 | | |
529 | 0 | ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) { |
530 | 0 | GGML_ASSERT(device); |
531 | 0 | return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size); |
532 | 0 | } |
533 | | |
534 | 0 | bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op) { |
535 | 0 | GGML_ASSERT(device); |
536 | 0 | return device->iface.supports_op(device, op); |
537 | 0 | } |
538 | | |
539 | 0 | bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft) { |
540 | 0 | GGML_ASSERT(device); |
541 | 0 | return device->iface.supports_buft(device, buft); |
542 | 0 | } |
543 | | |
544 | 0 | bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op) { |
545 | 0 | GGML_ASSERT(device); |
546 | 0 | if (device->iface.offload_op != NULL) { |
547 | 0 | return device->iface.offload_op(device, op); |
548 | 0 | } |
549 | | |
550 | 0 | return false; |
551 | 0 | } |
552 | | |
553 | | // Backend (reg) |
554 | | |
555 | 0 | const char * ggml_backend_reg_name(ggml_backend_reg_t reg) { |
556 | 0 | GGML_ASSERT(reg); |
557 | 0 | return reg->iface.get_name(reg); |
558 | 0 | } |
559 | | |
560 | 6 | size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg) { |
561 | 6 | GGML_ASSERT(reg); |
562 | 6 | return reg->iface.get_device_count(reg); |
563 | 6 | } |
564 | | |
565 | 3 | ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index) { |
566 | 3 | GGML_ASSERT(reg); |
567 | 3 | return reg->iface.get_device(reg, index); |
568 | 3 | } |
569 | | |
570 | 0 | void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) { |
571 | 0 | GGML_ASSERT(reg); |
572 | 0 | if (!reg->iface.get_proc_address) { |
573 | 0 | return NULL; |
574 | 0 | } |
575 | 0 | return reg->iface.get_proc_address(reg, name); |
576 | 0 | } |
577 | | |
578 | | // multi-buffer buffer |
579 | | |
580 | | struct ggml_backend_multi_buffer_context { |
581 | | ggml_backend_buffer_t * buffers; |
582 | | size_t n_buffers; |
583 | | }; |
584 | | |
585 | 0 | static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) { |
586 | 0 | GGML_ASSERT(buffer); |
587 | 0 | ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; |
588 | 0 | for (size_t i = 0; i < ctx->n_buffers; i++) { |
589 | 0 | ggml_backend_buffer_free(ctx->buffers[i]); |
590 | 0 | } |
591 | |
|
592 | 0 | free(ctx->buffers); |
593 | 0 | free(ctx); |
594 | 0 | } |
595 | | |
596 | 0 | static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { |
597 | 0 | GGML_ASSERT(buffer); |
598 | 0 | ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; |
599 | 0 | for (size_t i = 0; i < ctx->n_buffers; i++) { |
600 | 0 | ggml_backend_buffer_clear(ctx->buffers[i], value); |
601 | 0 | } |
602 | 0 | } |
603 | | |
604 | | static const struct ggml_backend_buffer_i ggml_backend_multi_buffer_i = { |
605 | | /* .free_buffer = */ ggml_backend_multi_buffer_free_buffer, |
606 | | /* .get_base = */ NULL, |
607 | | /* .init_tensor = */ NULL, |
608 | | /* .memset_tensor = */ NULL, |
609 | | /* .set_tensor = */ NULL, |
610 | | /* .get_tensor = */ NULL, |
611 | | /* .cpy_tensor = */ NULL, |
612 | | /* .clear = */ ggml_backend_multi_buffer_clear, |
613 | | /* .reset = */ NULL, |
614 | | }; |
615 | | |
616 | 0 | ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers) { |
617 | 0 | ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) malloc(sizeof(struct ggml_backend_multi_buffer_context)); |
618 | 0 | ctx->n_buffers = n_buffers; |
619 | 0 | ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t)); |
620 | |
|
621 | 0 | GGML_ASSERT(ctx->buffers != NULL); |
622 | |
|
623 | 0 | size_t total_size = 0; |
624 | 0 | for (size_t i = 0; i < n_buffers; i++) { |
625 | 0 | ctx->buffers[i] = buffers[i]; |
626 | 0 | total_size += ggml_backend_buffer_get_size(buffers[i]); |
627 | 0 | } |
628 | |
|
629 | 0 | return ggml_backend_buffer_init(buffers[0]->buft, ggml_backend_multi_buffer_i, ctx, total_size); |
630 | 0 | } |
631 | | |
632 | 0 | bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer) { |
633 | 0 | GGML_ASSERT(buffer); |
634 | 0 | return buffer->iface.free_buffer == ggml_backend_multi_buffer_free_buffer; |
635 | 0 | } |
636 | | |
637 | 0 | void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { |
638 | 0 | GGML_ASSERT(buffer); |
639 | 0 | GGML_ASSERT(ggml_backend_buffer_is_multi_buffer(buffer)); |
640 | 0 | ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; |
641 | 0 | for (size_t i = 0; i < ctx->n_buffers; i++) { |
642 | 0 | ggml_backend_buffer_set_usage(ctx->buffers[i], usage); |
643 | 0 | } |
644 | 0 | } |
645 | | |
646 | | // creates a copy of the tensor with the same memory layout |
647 | 0 | static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) { |
648 | 0 | struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor); |
649 | 0 | for (int i = 0; i < GGML_MAX_DIMS; i++) { |
650 | 0 | dup->nb[i] = tensor->nb[i]; |
651 | 0 | } |
652 | 0 | return dup; |
653 | 0 | } |
654 | | |
655 | 0 | static bool ggml_is_view_op(enum ggml_op op) { |
656 | 0 | return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE; |
657 | 0 | } |
658 | | |
659 | | // scheduler |
660 | | |
661 | | #ifndef GGML_SCHED_MAX_BACKENDS |
662 | | #define GGML_SCHED_MAX_BACKENDS 16 |
663 | | #endif |
664 | | |
665 | | #ifndef GGML_SCHED_MAX_SPLIT_INPUTS |
666 | 0 | #define GGML_SCHED_MAX_SPLIT_INPUTS 30 |
667 | | #endif |
668 | | |
669 | | #ifndef GGML_SCHED_MAX_COPIES |
670 | | #define GGML_SCHED_MAX_COPIES 4 |
671 | | #endif |
672 | | |
673 | | struct ggml_backend_sched_split { |
674 | | int backend_id; |
675 | | int i_start; |
676 | | int i_end; |
677 | | struct ggml_tensor * inputs[GGML_SCHED_MAX_SPLIT_INPUTS]; |
678 | | int n_inputs; |
679 | | // graph view of this split |
680 | | struct ggml_cgraph graph; |
681 | | }; |
682 | | |
683 | | struct ggml_backend_sched { |
684 | | bool is_reset; // true if the scheduler has been reset since the last graph split |
685 | | bool is_alloc; |
686 | | |
687 | | int n_backends; |
688 | | |
689 | | ggml_backend_t backends[GGML_SCHED_MAX_BACKENDS]; |
690 | | ggml_backend_buffer_type_t bufts[GGML_SCHED_MAX_BACKENDS]; |
691 | | ggml_gallocr_t galloc; |
692 | | |
693 | | // hash map of the nodes in the graph |
694 | | struct ggml_hash_set hash_set; |
695 | | int * hv_tensor_backend_ids; // [hash_set.size] |
696 | | struct ggml_tensor ** hv_tensor_copies; // [hash_set.size][n_backends][n_copies] |
697 | | |
698 | | int * node_backend_ids; // [graph_size] |
699 | | int * leaf_backend_ids; // [graph_size] |
700 | | |
701 | | int * prev_node_backend_ids; // [graph_size] |
702 | | int * prev_leaf_backend_ids; // [graph_size] |
703 | | |
704 | | // copy of the graph with modified inputs |
705 | | struct ggml_cgraph graph; |
706 | | |
707 | | // graph splits |
708 | | struct ggml_backend_sched_split * splits; |
709 | | int n_splits; |
710 | | int splits_capacity; |
711 | | |
712 | | // pipeline parallelism support |
713 | | int n_copies; |
714 | | int cur_copy; |
715 | | int next_copy; |
716 | | ggml_backend_event_t events[GGML_SCHED_MAX_BACKENDS][GGML_SCHED_MAX_COPIES]; |
717 | | struct ggml_tensor * graph_inputs[GGML_SCHED_MAX_SPLIT_INPUTS]; |
718 | | int n_graph_inputs; |
719 | | |
720 | | struct ggml_context * ctx; |
721 | | |
722 | | ggml_backend_sched_eval_callback callback_eval; |
723 | | void * callback_eval_user_data; |
724 | | |
725 | | char * context_buffer; |
726 | | size_t context_buffer_size; |
727 | | |
728 | | bool op_offload; |
729 | | |
730 | | int debug; |
731 | | |
732 | | // used for debugging graph reallocations [GGML_SCHED_DEBUG_REALLOC] |
733 | | // ref: https://github.com/ggml-org/llama.cpp/pull/17617 |
734 | | int debug_realloc; |
735 | | int debug_graph_size; |
736 | | int debug_prev_graph_size; |
737 | | }; |
738 | | |
739 | 0 | #define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor) |
740 | 0 | #define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)] |
741 | 0 | #define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)] |
742 | 0 | #define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id) |
743 | | |
744 | | // returns the priority of the backend, lower id is higher priority |
745 | 0 | static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backend_t backend) { |
746 | 0 | for (int i = 0; i < sched->n_backends; i++) { |
747 | 0 | if (sched->backends[i] == backend) { |
748 | 0 | return i; |
749 | 0 | } |
750 | 0 | } |
751 | 0 | return -1; |
752 | 0 | } |
753 | | |
754 | 0 | static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor, const struct ggml_tensor * op) { |
755 | 0 | ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
756 | 0 | if (buffer == NULL) { |
757 | 0 | return -1; |
758 | 0 | } |
759 | | |
760 | | // find highest prio backend that supports the buffer type and the op |
761 | 0 | for (int i = 0; i < sched->n_backends; i++) { |
762 | 0 | if (ggml_backend_supports_buft(sched->backends[i], buffer->buft) && |
763 | 0 | ggml_backend_supports_op(sched->backends[i], op)) { |
764 | 0 | return i; |
765 | 0 | } |
766 | 0 | } |
767 | | |
768 | | #ifndef NDEBUG |
769 | | GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n", |
770 | | __func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name); |
771 | | #endif |
772 | | |
773 | 0 | return -1; |
774 | 0 | } |
775 | | |
776 | | #if 0 |
777 | | #define GGML_SCHED_MAX_SPLITS_DEBUG 4096 |
778 | | static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS_DEBUG*GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only |
779 | | #define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__) |
780 | | #define GET_CAUSE(node) causes[hash_id(node)] |
781 | | #else |
782 | | #define SET_CAUSE(node, ...) |
783 | | #define GET_CAUSE(node) "" |
784 | | #endif |
785 | | |
786 | | // returns the backend that should be used for the node based on the current locations |
787 | 0 | static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * tensor) { |
788 | | // assign pre-allocated nodes to their backend |
789 | 0 | int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor, tensor); |
790 | 0 | if (cur_backend_id != -1) { |
791 | 0 | SET_CAUSE(tensor, "1.dst"); |
792 | 0 | return cur_backend_id; |
793 | 0 | } |
794 | | |
795 | | // view_src |
796 | 0 | if (tensor->view_src != NULL) { |
797 | 0 | cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor); |
798 | 0 | if (cur_backend_id != -1) { |
799 | 0 | SET_CAUSE(tensor, "1.vsrc"); |
800 | 0 | return cur_backend_id; |
801 | 0 | } |
802 | 0 | } |
803 | | |
804 | 0 | if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) { |
805 | | // since the tensor is pre-allocated, it cannot be moved to another backend |
806 | 0 | ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
807 | 0 | GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, ggml_backend_buffer_name(buffer), ggml_op_name(tensor->op)); |
808 | 0 | } |
809 | | |
810 | | // graph input |
811 | 0 | if (tensor->flags & GGML_TENSOR_FLAG_INPUT) { |
812 | 0 | cur_backend_id = sched->n_backends - 1; // last backend (assumed CPU) |
813 | 0 | SET_CAUSE(tensor, "1.inp"); |
814 | 0 | return cur_backend_id; |
815 | 0 | } |
816 | | |
817 | | // operations with weights are preferably run on the same backend as the weights |
818 | 0 | for (int i = 0; i < GGML_MAX_SRC; i++) { |
819 | 0 | const struct ggml_tensor * src = tensor->src[i]; |
820 | 0 | if (src == NULL) { |
821 | 0 | continue; |
822 | 0 | } |
823 | | // skip ROPE since the rope freqs tensor is too small to choose a backend based on it |
824 | | // not an ideal solution |
825 | 0 | if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { |
826 | 0 | int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor); |
827 | | // check if a backend with higher prio wants to offload the op |
828 | 0 | if (sched->op_offload && src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) { |
829 | 0 | for (int b = 0; b < src_backend_id; b++) { |
830 | 0 | if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) { |
831 | 0 | SET_CAUSE(tensor, "1.off"); |
832 | 0 | return b; |
833 | 0 | } |
834 | 0 | } |
835 | 0 | } |
836 | 0 | SET_CAUSE(tensor, "1.wgt%d", i); |
837 | 0 | return src_backend_id; |
838 | 0 | } |
839 | 0 | } |
840 | | |
841 | 0 | return -1; |
842 | 0 | } |
843 | | |
844 | 0 | static char * fmt_size(size_t size) { |
845 | 0 | static char buffer[128]; |
846 | 0 | if (size >= 1024*1024) { |
847 | 0 | snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024); |
848 | 0 | } else { |
849 | 0 | snprintf(buffer, sizeof(buffer), "%zuK", size/1024); |
850 | 0 | } |
851 | 0 | return buffer; |
852 | 0 | } |
853 | | |
854 | 0 | static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
855 | 0 | int cur_split = 0; |
856 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
857 | 0 | if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) { |
858 | 0 | ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id]; |
859 | 0 | GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs", cur_split, ggml_backend_name(split_backend), |
860 | 0 | sched->splits[cur_split].n_inputs); |
861 | 0 | for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) { |
862 | 0 | if (j == 0) { |
863 | 0 | GGML_LOG_DEBUG(": "); |
864 | 0 | } |
865 | 0 | GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name, |
866 | 0 | fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j]))); |
867 | 0 | } |
868 | 0 | GGML_LOG_DEBUG("\n"); |
869 | 0 | cur_split++; |
870 | 0 | } |
871 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
872 | 0 | if (ggml_is_view_op(node->op)) { |
873 | 0 | continue; |
874 | 0 | } |
875 | 0 | if (sched->debug > 1) { |
876 | 0 | ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node); |
877 | 0 | GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s] use=%d:", i, ggml_op_name(node->op), node->name, |
878 | 0 | fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node), |
879 | 0 | graph->use_counts[ggml_hash_find(&graph->visited_hash_set, node)]); |
880 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
881 | 0 | struct ggml_tensor * src = node->src[j]; |
882 | 0 | if (src == NULL) { |
883 | 0 | continue; |
884 | 0 | } |
885 | 0 | ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend(sched, src); |
886 | 0 | GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name, |
887 | 0 | fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src)); |
888 | 0 | } |
889 | 0 | GGML_LOG_DEBUG("\n"); |
890 | 0 | } |
891 | 0 | } |
892 | 0 | } |
893 | | |
894 | 0 | static bool ggml_backend_sched_buffer_supported(ggml_backend_sched_t sched, struct ggml_tensor * t, int backend_id) { |
895 | 0 | ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer; |
896 | 0 | ggml_backend_buffer_type_t buft = NULL; |
897 | |
|
898 | 0 | if (buf) { |
899 | | // the tensor is already allocated |
900 | 0 | buft = buf->buft; |
901 | 0 | } else { |
902 | | // see if the tensor already has a backend assigned, and use the buffer type of that backend |
903 | 0 | int tensor_backend_id = tensor_backend_id(t); |
904 | 0 | if (tensor_backend_id == -1 && t->view_src) { |
905 | 0 | tensor_backend_id = tensor_backend_id(t->view_src); |
906 | 0 | } |
907 | 0 | if (tensor_backend_id != -1) { |
908 | 0 | buft = sched->bufts[tensor_backend_id]; |
909 | 0 | } |
910 | 0 | } |
911 | |
|
912 | 0 | return buft != NULL && ggml_backend_supports_buft(sched->backends[backend_id], buft); |
913 | 0 | } |
914 | | |
915 | 0 | static void ggml_backend_sched_set_if_supported(ggml_backend_sched_t sched, struct ggml_tensor * node, int cur_backend_id, int * node_backend_id) { |
916 | 0 | if (ggml_backend_supports_op(sched->backends[cur_backend_id], node)) { |
917 | 0 | *node_backend_id = cur_backend_id; |
918 | 0 | SET_CAUSE(node, "2.sup"); |
919 | 0 | } |
920 | 0 | } |
921 | | |
922 | | // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend |
923 | 0 | void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
924 | | // reset splits |
925 | 0 | sched->n_splits = 0; |
926 | 0 | sched->n_graph_inputs = 0; |
927 | 0 | sched->is_reset = false; |
928 | |
|
929 | 0 | struct ggml_init_params params = { |
930 | 0 | /* .mem_size = */ sched->context_buffer_size, |
931 | 0 | /* .mem_buffer = */ sched->context_buffer, |
932 | 0 | /* .no_alloc = */ true |
933 | 0 | }; |
934 | |
|
935 | 0 | ggml_free(sched->ctx); |
936 | |
|
937 | 0 | sched->ctx = ggml_init(params); |
938 | 0 | if (sched->ctx == NULL) { |
939 | 0 | GGML_ABORT("%s: failed to initialize context\n", __func__); |
940 | 0 | } |
941 | | |
942 | | // pass 1: assign backends to ops with pre-allocated inputs |
943 | 0 | for (int i = 0; i < graph->n_leafs; i++) { |
944 | 0 | struct ggml_tensor * leaf = graph->leafs[i]; |
945 | 0 | int * leaf_backend_id = &tensor_backend_id(leaf); |
946 | | // do not overwrite user assignments |
947 | 0 | if (*leaf_backend_id == -1) { |
948 | 0 | *leaf_backend_id = ggml_backend_sched_backend_id_from_cur(sched, leaf); |
949 | 0 | } |
950 | 0 | } |
951 | |
|
952 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
953 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
954 | 0 | int * node_backend_id = &tensor_backend_id(node); |
955 | | // do not overwrite user assignments |
956 | 0 | if (*node_backend_id == -1) { |
957 | 0 | *node_backend_id = ggml_backend_sched_backend_id_from_cur(sched, node); |
958 | |
|
959 | | #if 0 |
960 | | // src |
961 | | if (node->op == GGML_OP_NONE) { |
962 | | continue; |
963 | | } |
964 | | |
965 | | for (int j = 0; j < GGML_MAX_SRC; j++) { |
966 | | struct ggml_tensor * src = node->src[j]; |
967 | | if (src == NULL) { |
968 | | continue; |
969 | | } |
970 | | int * src_backend_id = &tensor_backend_id(src); |
971 | | if (*src_backend_id == -1) { |
972 | | *src_backend_id = ggml_backend_sched_backend_id_from_cur(sched, src); |
973 | | } |
974 | | } |
975 | | #endif |
976 | 0 | } |
977 | 0 | } |
978 | | |
979 | | // pass 2: expand current backend assignments |
980 | | // assign the same backend to adjacent nodes |
981 | | // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend) |
982 | | // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops |
983 | | // ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known |
984 | | // expand gpu down |
985 | 0 | { |
986 | 0 | int cur_backend_id = -1; |
987 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
988 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
989 | 0 | if (ggml_is_view_op(node->op)) { |
990 | 0 | continue; |
991 | 0 | } |
992 | 0 | int * node_backend_id = &tensor_backend_id(node); |
993 | 0 | if (*node_backend_id != -1) { |
994 | 0 | if (*node_backend_id == sched->n_backends - 1) { |
995 | | // skip cpu (lowest prio backend) |
996 | 0 | cur_backend_id = -1; |
997 | 0 | } else { |
998 | 0 | cur_backend_id = *node_backend_id; |
999 | 0 | } |
1000 | 0 | } else if (cur_backend_id != -1) { |
1001 | 0 | ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
1002 | 0 | } |
1003 | 0 | } |
1004 | 0 | } |
1005 | | // expand gpu up |
1006 | 0 | { |
1007 | 0 | int cur_backend_id = -1; |
1008 | 0 | for (int i = graph->n_nodes - 1; i >= 0; i--) { |
1009 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1010 | 0 | if (ggml_is_view_op(node->op)) { |
1011 | 0 | continue; |
1012 | 0 | } |
1013 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1014 | 0 | if (*node_backend_id != -1) { |
1015 | 0 | if (*node_backend_id == sched->n_backends - 1) { |
1016 | | // skip cpu (lowest prio backend) |
1017 | 0 | cur_backend_id = -1; |
1018 | 0 | } else { |
1019 | 0 | cur_backend_id = *node_backend_id; |
1020 | 0 | } |
1021 | 0 | } else if (cur_backend_id != -1) { |
1022 | 0 | ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
1023 | 0 | } |
1024 | 0 | } |
1025 | 0 | } |
1026 | | // expand rest down |
1027 | 0 | { |
1028 | 0 | int cur_backend_id = -1; |
1029 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
1030 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1031 | 0 | if (ggml_is_view_op(node->op)) { |
1032 | 0 | continue; |
1033 | 0 | } |
1034 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1035 | 0 | if (*node_backend_id != -1) { |
1036 | 0 | cur_backend_id = *node_backend_id; |
1037 | 0 | } else if (cur_backend_id != -1) { |
1038 | 0 | ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
1039 | 0 | } |
1040 | 0 | } |
1041 | 0 | } |
1042 | | // expand rest up |
1043 | 0 | { |
1044 | 0 | int cur_backend_id = -1; |
1045 | 0 | for (int i = graph->n_nodes - 1; i >= 0; i--) { |
1046 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1047 | 0 | if (ggml_is_view_op(node->op)) { |
1048 | 0 | continue; |
1049 | 0 | } |
1050 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1051 | 0 | if (*node_backend_id != -1) { |
1052 | 0 | cur_backend_id = *node_backend_id; |
1053 | 0 | } else if (cur_backend_id != -1) { |
1054 | 0 | ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
1055 | 0 | } |
1056 | 0 | } |
1057 | 0 | } |
1058 | | |
1059 | | // pass 3: upgrade nodes to higher prio backends with compatible buffer types |
1060 | | // if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there |
1061 | | // however, we also need to verify that the sources are in compatible buffer types |
1062 | | // (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph |
1063 | | // however, this is slow to verify, so we have a more strict requirement that the buffer type is the same |
1064 | | // this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU) |
1065 | | // additionally, set remaining unassigned nodes to the backend with the most supported inputs |
1066 | | // only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point |
1067 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
1068 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1069 | 0 | if (ggml_is_view_op(node->op)) { |
1070 | 0 | continue; |
1071 | 0 | } |
1072 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1073 | 0 | if (*node_backend_id == -1) { |
1074 | | // unassigned node: find the backend with the most supported inputs |
1075 | 0 | int n_supported_best = -1; |
1076 | 0 | for (int b = 0; b < sched->n_backends; b++) { |
1077 | 0 | if (ggml_backend_supports_op(sched->backends[b], node)) { |
1078 | 0 | int n_supported = 0; |
1079 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1080 | 0 | struct ggml_tensor * src = node->src[j]; |
1081 | 0 | if (src == NULL) { |
1082 | 0 | continue; |
1083 | 0 | } |
1084 | 0 | if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && ggml_backend_sched_buffer_supported(sched, src, b)) { |
1085 | 0 | n_supported++; |
1086 | 0 | } |
1087 | 0 | } |
1088 | 0 | if (n_supported > n_supported_best) { |
1089 | 0 | n_supported_best = n_supported; |
1090 | 0 | *node_backend_id = b; |
1091 | 0 | SET_CAUSE(node, "3.best"); |
1092 | 0 | } |
1093 | 0 | } |
1094 | 0 | } |
1095 | 0 | } else { |
1096 | | // assigned node: upgrade to higher prio backend if possible |
1097 | 0 | for (int b = 0; b < *node_backend_id; b++) { |
1098 | 0 | if (sched->bufts[b] == sched->bufts[*node_backend_id] && ggml_backend_supports_op(sched->backends[b], node)) { |
1099 | 0 | bool supported = true; |
1100 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1101 | 0 | struct ggml_tensor * src = node->src[j]; |
1102 | 0 | if (src == NULL) { |
1103 | 0 | continue; |
1104 | 0 | } |
1105 | 0 | if (!ggml_backend_sched_buffer_supported(sched, src, b)) { |
1106 | 0 | supported = false; |
1107 | 0 | break; |
1108 | 0 | } |
1109 | 0 | } |
1110 | 0 | if (supported) { |
1111 | 0 | *node_backend_id = b; |
1112 | 0 | SET_CAUSE(node, "3.upg"); |
1113 | 0 | break; |
1114 | 0 | } |
1115 | 0 | } |
1116 | 0 | } |
1117 | 0 | } |
1118 | 0 | } |
1119 | | |
1120 | | // pass 4: assign backends to remaining src from dst and view_src |
1121 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
1122 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1123 | 0 | int * cur_backend_id = &tensor_backend_id(node); |
1124 | 0 | if (node->view_src != NULL && *cur_backend_id == -1) { |
1125 | 0 | *cur_backend_id = tensor_backend_id(node->view_src); |
1126 | 0 | SET_CAUSE(node, "4.vsrc"); |
1127 | 0 | } |
1128 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1129 | 0 | struct ggml_tensor * src = node->src[j]; |
1130 | 0 | if (src == NULL) { |
1131 | 0 | continue; |
1132 | 0 | } |
1133 | 0 | int * src_backend_id = &tensor_backend_id(src); |
1134 | 0 | if (*src_backend_id == -1) { |
1135 | 0 | if (src->view_src != NULL) { |
1136 | | // views are always on the same backend as the source |
1137 | 0 | *src_backend_id = tensor_backend_id(src->view_src); |
1138 | 0 | SET_CAUSE(src, "4.vsrc"); |
1139 | 0 | } else { |
1140 | 0 | *src_backend_id = *cur_backend_id; |
1141 | 0 | SET_CAUSE(src, "4.cur"); |
1142 | 0 | } |
1143 | 0 | } |
1144 | 0 | } |
1145 | | // if the node is still unassigned, assign it to the first backend that supports it |
1146 | 0 | for (int b = 0; b < sched->n_backends && *cur_backend_id == -1; b++) { |
1147 | 0 | ggml_backend_sched_set_if_supported(sched, node, b, cur_backend_id); |
1148 | 0 | } |
1149 | 0 | GGML_ASSERT(*cur_backend_id != -1); |
1150 | 0 | } |
1151 | | |
1152 | | // pass 5: split graph, find tensors that need to be copied |
1153 | 0 | { |
1154 | 0 | int i_split = 0; |
1155 | 0 | struct ggml_backend_sched_split * split = &sched->splits[0]; |
1156 | | // find the backend of the first split, skipping view ops |
1157 | 0 | int i = 0; |
1158 | 0 | for (; i < graph->n_nodes; i++) { |
1159 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1160 | 0 | if (!ggml_is_view_op(node->op)) { |
1161 | 0 | split->backend_id = tensor_backend_id(node); |
1162 | 0 | break; |
1163 | 0 | } |
1164 | 0 | } |
1165 | 0 | split->i_start = 0; |
1166 | 0 | split->n_inputs = 0; |
1167 | 0 | int cur_backend_id = split->backend_id; |
1168 | 0 | for (; i < graph->n_nodes; i++) { |
1169 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1170 | |
|
1171 | 0 | if (ggml_is_view_op(node->op)) { |
1172 | 0 | continue; |
1173 | 0 | } |
1174 | | |
1175 | 0 | const int node_backend_id = tensor_backend_id(node); |
1176 | |
|
1177 | 0 | GGML_ASSERT(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback |
1178 | | |
1179 | | // check if we should start a new split based on the sources of the current node |
1180 | 0 | bool need_new_split = false; |
1181 | 0 | if (node_backend_id == cur_backend_id && split->n_inputs > 0) { |
1182 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1183 | 0 | struct ggml_tensor * src = node->src[j]; |
1184 | 0 | if (src == NULL) { |
1185 | 0 | continue; |
1186 | 0 | } |
1187 | | // check if a weight is on a different and incompatible backend |
1188 | | // by starting a new split, the memory of the previously offloaded weights can be reused |
1189 | 0 | if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { |
1190 | 0 | int src_backend_id = tensor_backend_id(src); |
1191 | 0 | if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) { |
1192 | 0 | need_new_split = true; |
1193 | 0 | break; |
1194 | 0 | } |
1195 | 0 | } |
1196 | | // check if the split has too many inputs |
1197 | | // FIXME: count the number of inputs instead of only checking when full |
1198 | 0 | if (split->n_inputs == GGML_SCHED_MAX_SPLIT_INPUTS) { |
1199 | 0 | const size_t id = hash_id(src); |
1200 | 0 | int src_backend_id = sched->hv_tensor_backend_ids[id]; |
1201 | 0 | bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id); |
1202 | 0 | if (src_backend_id != cur_backend_id && tensor_id_copy(id, cur_backend_id, 0) == NULL && !supported) { |
1203 | 0 | need_new_split = true; |
1204 | 0 | break; |
1205 | 0 | } |
1206 | 0 | } |
1207 | 0 | } |
1208 | 0 | } |
1209 | |
|
1210 | 0 | if (node_backend_id != cur_backend_id || need_new_split) { |
1211 | 0 | split->i_end = i; |
1212 | 0 | i_split++; |
1213 | 0 | if (i_split >= sched->splits_capacity) { |
1214 | 0 | sched->splits_capacity *= 2; |
1215 | 0 | sched->splits = (ggml_backend_sched_split *) |
1216 | 0 | realloc(sched->splits, sched->splits_capacity * sizeof(struct ggml_backend_sched_split)); |
1217 | 0 | GGML_ASSERT(sched->splits != NULL); |
1218 | 0 | } |
1219 | 0 | split = &sched->splits[i_split]; |
1220 | 0 | split->backend_id = node_backend_id; |
1221 | 0 | split->i_start = i; |
1222 | 0 | split->n_inputs = 0; |
1223 | 0 | cur_backend_id = node_backend_id; |
1224 | 0 | } |
1225 | | |
1226 | | // find inputs that are not on the same backend |
1227 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1228 | 0 | struct ggml_tensor * src = node->src[j]; |
1229 | 0 | if (src == NULL) { |
1230 | 0 | continue; |
1231 | 0 | } |
1232 | | |
1233 | 0 | size_t src_id = hash_id(src); |
1234 | 0 | const int src_backend_id = sched->hv_tensor_backend_ids[src_id]; |
1235 | 0 | GGML_ASSERT(src_backend_id != -1); // all inputs should be assigned by now |
1236 | |
|
1237 | 0 | if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) { |
1238 | 0 | if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) { |
1239 | 0 | ggml_backend_t backend = sched->backends[src_backend_id]; |
1240 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1241 | 0 | struct ggml_tensor * tensor_copy; |
1242 | 0 | if (c == sched->cur_copy) { |
1243 | 0 | tensor_copy = src; // use the original tensor as the current copy |
1244 | 0 | } else { |
1245 | 0 | tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); |
1246 | 0 | ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c); |
1247 | 0 | } |
1248 | 0 | ggml_set_input(tensor_copy); |
1249 | 0 | ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor |
1250 | 0 | tensor_id_copy(src_id, src_backend_id, c) = tensor_copy; |
1251 | 0 | SET_CAUSE(tensor_copy, "4.cpy"); |
1252 | 0 | } |
1253 | 0 | int n_graph_inputs = sched->n_graph_inputs++; |
1254 | 0 | GGML_ASSERT(n_graph_inputs < GGML_SCHED_MAX_SPLIT_INPUTS); |
1255 | 0 | sched->graph_inputs[n_graph_inputs] = src; |
1256 | 0 | } |
1257 | 0 | } |
1258 | |
|
1259 | 0 | if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) { |
1260 | | // create a copy of the input in the split's backend |
1261 | 0 | if (tensor_id_copy(src_id, cur_backend_id, 0) == NULL) { |
1262 | 0 | ggml_backend_t backend = sched->backends[cur_backend_id]; |
1263 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1264 | 0 | struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); |
1265 | 0 | ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c); |
1266 | 0 | if (sched->n_copies > 1) { |
1267 | 0 | ggml_set_input(tensor_copy); |
1268 | 0 | ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor |
1269 | 0 | } |
1270 | 0 | tensor_id_copy(src_id, cur_backend_id, c) = tensor_copy; |
1271 | 0 | SET_CAUSE(tensor_copy, "4.cpy"); |
1272 | 0 | } |
1273 | 0 | int n_inputs = split->n_inputs++; |
1274 | 0 | GGML_ASSERT(n_inputs < GGML_SCHED_MAX_SPLIT_INPUTS); |
1275 | 0 | split->inputs[n_inputs] = src; |
1276 | 0 | } |
1277 | 0 | node->src[j] = tensor_id_copy(src_id, cur_backend_id, sched->cur_copy); |
1278 | 0 | } |
1279 | 0 | } |
1280 | 0 | } |
1281 | 0 | split->i_end = graph->n_nodes; |
1282 | 0 | sched->n_splits = i_split + 1; |
1283 | 0 | } |
1284 | |
|
1285 | 0 | if (sched->debug) { |
1286 | 0 | ggml_backend_sched_print_assignments(sched, graph); |
1287 | 0 | } |
1288 | | |
1289 | | // swap node_backend_ids and leaf _backend_ids with prevs |
1290 | 0 | { |
1291 | 0 | int * tmp = sched->node_backend_ids; |
1292 | 0 | sched->node_backend_ids = sched->prev_node_backend_ids; |
1293 | 0 | sched->prev_node_backend_ids = tmp; |
1294 | |
|
1295 | 0 | tmp = sched->leaf_backend_ids; |
1296 | 0 | sched->leaf_backend_ids = sched->prev_leaf_backend_ids; |
1297 | 0 | sched->prev_leaf_backend_ids = tmp; |
1298 | 0 | } |
1299 | |
|
1300 | 0 | int graph_size = std::max(graph->n_nodes, graph->n_leafs) + sched->n_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sched->n_copies; |
1301 | | |
1302 | | // remember the actual graph_size for performing reallocation checks later [GGML_SCHED_DEBUG_REALLOC] |
1303 | 0 | sched->debug_prev_graph_size = sched->debug_graph_size; |
1304 | 0 | sched->debug_graph_size = graph_size; |
1305 | |
|
1306 | 0 | if (sched->graph.size < graph_size) { |
1307 | 0 | sched->graph.size = graph_size; |
1308 | 0 | sched->graph.nodes = (ggml_tensor **) realloc(sched->graph.nodes, graph_size * sizeof(struct ggml_tensor *)); |
1309 | 0 | sched->graph.leafs = (ggml_tensor **) realloc(sched->graph.leafs, graph_size * sizeof(struct ggml_tensor *)); |
1310 | 0 | GGML_ASSERT(sched->graph.nodes != NULL); |
1311 | 0 | GGML_ASSERT(sched->graph.leafs != NULL); |
1312 | 0 | } |
1313 | 0 | sched->graph.n_nodes = 0; |
1314 | 0 | sched->graph.n_leafs = 0; |
1315 | |
|
1316 | 0 | struct ggml_cgraph * graph_copy = &sched->graph; |
1317 | |
|
1318 | 0 | for (int i = 0; i < sched->n_splits; i++) { |
1319 | 0 | struct ggml_backend_sched_split * split = &sched->splits[i]; |
1320 | 0 | split->graph = ggml_graph_view(graph, split->i_start, split->i_end); |
1321 | | |
1322 | | // Optimize this split of the graph. This needs to happen before we make graph_copy, |
1323 | | // so they are in sync. |
1324 | 0 | ggml_backend_graph_optimize(sched->backends[split->backend_id], &split->graph); |
1325 | | |
1326 | | // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split |
1327 | 0 | for (int j = 0; j < split->n_inputs; j++) { |
1328 | 0 | assert(graph_copy->size > (graph_copy->n_nodes + 1)); |
1329 | |
|
1330 | 0 | struct ggml_tensor * input = split->inputs[j]; |
1331 | 0 | const size_t input_id = hash_id(input); |
1332 | 0 | struct ggml_tensor * input_cpy = tensor_id_copy(input_id, split->backend_id, sched->cur_copy); |
1333 | | |
1334 | | // add a dependency to the input source so that it is not freed before the copy is done |
1335 | 0 | struct ggml_tensor * input_dep = ggml_view_tensor(sched->ctx, input); |
1336 | 0 | input_dep->src[0] = input; |
1337 | 0 | sched->node_backend_ids[graph_copy->n_nodes] = sched->hv_tensor_backend_ids[input_id]; |
1338 | 0 | graph_copy->nodes[graph_copy->n_nodes++] = input_dep; |
1339 | | |
1340 | | // add a dependency to the input copy so that it is allocated at the start of the split |
1341 | 0 | sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id; |
1342 | 0 | graph_copy->nodes[graph_copy->n_nodes++] = input_cpy; |
1343 | 0 | } |
1344 | |
|
1345 | 0 | for (int j = split->i_start; j < split->i_end; j++) { |
1346 | 0 | assert(graph_copy->size > graph_copy->n_nodes); |
1347 | 0 | sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]); |
1348 | 0 | graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j]; |
1349 | 0 | } |
1350 | 0 | } |
1351 | |
|
1352 | 0 | if (sched->n_copies > 1) { |
1353 | | // add input copies as leafs so that they are allocated first |
1354 | 0 | for (int i = 0; i < sched->n_graph_inputs; i++) { |
1355 | 0 | struct ggml_tensor * input = sched->graph_inputs[i]; |
1356 | 0 | size_t id = hash_id(input); |
1357 | 0 | int backend_id = tensor_backend_id(input); |
1358 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1359 | 0 | struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c); |
1360 | 0 | sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id; |
1361 | 0 | assert(graph_copy->size > graph_copy->n_leafs); |
1362 | 0 | graph_copy->leafs[graph_copy->n_leafs++] = input_cpy; |
1363 | 0 | } |
1364 | 0 | } |
1365 | |
|
1366 | 0 | for (int i = 0; i < sched->n_splits; i++) { |
1367 | 0 | struct ggml_backend_sched_split * split = &sched->splits[i]; |
1368 | 0 | int backend_id = split->backend_id; |
1369 | 0 | for (int j = 0; j < split->n_inputs; j++) { |
1370 | 0 | struct ggml_tensor * input = split->inputs[j]; |
1371 | 0 | size_t id = hash_id(input); |
1372 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1373 | 0 | struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c); |
1374 | 0 | sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id; |
1375 | 0 | assert(graph_copy->size > graph_copy->n_leafs); |
1376 | 0 | graph_copy->leafs[graph_copy->n_leafs++] = input_cpy; |
1377 | 0 | } |
1378 | 0 | } |
1379 | 0 | } |
1380 | 0 | } |
1381 | | |
1382 | | // add leafs from the original graph |
1383 | 0 | for (int i = 0; i < graph->n_leafs; i++) { |
1384 | 0 | struct ggml_tensor * leaf = graph->leafs[i]; |
1385 | 0 | sched->leaf_backend_ids[graph_copy->n_leafs] = tensor_backend_id(leaf); |
1386 | 0 | assert(graph_copy->size > graph_copy->n_leafs); |
1387 | 0 | graph_copy->leafs[graph_copy->n_leafs++] = leaf; |
1388 | 0 | } |
1389 | 0 | } |
1390 | | |
1391 | 0 | static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) { |
1392 | 0 | bool backend_ids_changed = false; |
1393 | 0 | for (int i = 0; i < sched->graph.n_nodes; i++) { |
1394 | 0 | if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] && |
1395 | 0 | sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) { |
1396 | 0 | backend_ids_changed = true; |
1397 | 0 | break; |
1398 | 0 | } |
1399 | 0 | } |
1400 | 0 | if (!backend_ids_changed) { |
1401 | 0 | for (int i = 0; i < sched->graph.n_leafs; i++) { |
1402 | 0 | if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] && |
1403 | 0 | sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) { |
1404 | 0 | backend_ids_changed = true; |
1405 | 0 | break; |
1406 | 0 | } |
1407 | 0 | } |
1408 | 0 | } |
1409 | | |
1410 | | // allocate graph |
1411 | 0 | if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { |
1412 | | #ifndef NDEBUG |
1413 | | GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed); |
1414 | | #endif |
1415 | |
|
1416 | 0 | if (sched->debug_realloc > 0) { |
1417 | | // we are interested only in situations where the graph was reallocated even though its size remained the same [GGML_SCHED_DEBUG_REALLOC] |
1418 | | // example: https://github.com/ggml-org/llama.cpp/pull/17143 |
1419 | 0 | const bool unexpected = !backend_ids_changed && sched->debug_prev_graph_size == sched->debug_graph_size; |
1420 | |
|
1421 | 0 | if (unexpected || sched->debug_realloc > 1) { |
1422 | 0 | GGML_ABORT("%s: unexpected graph reallocation (graph size = %d, nodes = %d, leafs = %d), debug_realloc = %d\n", __func__, |
1423 | 0 | sched->debug_graph_size, sched->graph.n_nodes, sched->graph.n_leafs, sched->debug_realloc); |
1424 | 0 | } |
1425 | 0 | } |
1426 | | |
1427 | | // the re-allocation may cause the split inputs to be moved to a different address |
1428 | | // synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy |
1429 | 0 | for (int i = 0; i < sched->n_backends; i++) { |
1430 | 0 | ggml_backend_synchronize(sched->backends[i]); |
1431 | 0 | } |
1432 | |
|
1433 | 0 | ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids); |
1434 | 0 | if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { |
1435 | 0 | GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__); |
1436 | 0 | return false; |
1437 | 0 | } |
1438 | 0 | } |
1439 | | |
1440 | 0 | return true; |
1441 | 0 | } |
1442 | | |
1443 | 0 | static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) { |
1444 | 0 | GGML_ASSERT(sched); |
1445 | 0 | struct ggml_backend_sched_split * splits = sched->splits; |
1446 | |
|
1447 | 0 | ggml_tensor * prev_ids_tensor = nullptr; |
1448 | 0 | std::vector<int32_t> ids; |
1449 | 0 | std::vector<ggml_bitset_t> used_ids; |
1450 | |
|
1451 | 0 | for (int split_id = 0; split_id < sched->n_splits; split_id++) { |
1452 | 0 | struct ggml_backend_sched_split * split = &splits[split_id]; |
1453 | 0 | int split_backend_id = split->backend_id; |
1454 | 0 | ggml_backend_t split_backend = sched->backends[split_backend_id]; |
1455 | | |
1456 | | // copy the input tensors to the split backend |
1457 | 0 | for (int input_id = 0; input_id < split->n_inputs; input_id++) { |
1458 | 0 | ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]); |
1459 | 0 | struct ggml_tensor * input = split->inputs[input_id]; |
1460 | 0 | struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy); |
1461 | |
|
1462 | 0 | if (input->flags & GGML_TENSOR_FLAG_INPUT) { |
1463 | | // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done |
1464 | 0 | if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
1465 | 0 | ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); |
1466 | 0 | } else { |
1467 | 0 | ggml_backend_synchronize(split_backend); |
1468 | 0 | } |
1469 | 0 | ggml_backend_tensor_copy(input, input_cpy); |
1470 | 0 | } else { |
1471 | | // wait for the split backend to finish using the input before overwriting it |
1472 | 0 | if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
1473 | 0 | ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]); |
1474 | 0 | } else { |
1475 | 0 | ggml_backend_synchronize(split_backend); |
1476 | 0 | } |
1477 | | |
1478 | | // when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used |
1479 | 0 | ggml_tensor * node = split->graph.nodes[0]; |
1480 | 0 | if (split->graph.n_nodes > 0 && |
1481 | 0 | ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS && |
1482 | 0 | ggml_backend_buffer_is_host(input->buffer) && ( |
1483 | 0 | (node->src[0] == input_cpy && node->op == GGML_OP_MUL_MAT_ID) |
1484 | | //|| (node->src[1] == input_cpy && node->op == GGML_OP_ADD_ID) /* GGML_OP_ADD_ID weights are small and not worth splitting */ |
1485 | 0 | )) { |
1486 | |
|
1487 | 0 | const int64_t n_expert = node->op == GGML_OP_MUL_MAT_ID ? input->ne[2] : input->ne[1]; |
1488 | 0 | const size_t expert_size = node->op == GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1]; |
1489 | |
|
1490 | 0 | ggml_backend_synchronize(input_backend); |
1491 | | |
1492 | | // get the ids |
1493 | 0 | ggml_tensor * ids_tensor = node->src[2]; |
1494 | 0 | ggml_backend_t ids_backend = split_backend; |
1495 | | |
1496 | | // if the ids tensor is also an input of the split, it may not have been copied yet to the split backend |
1497 | | // in that case, we use the original ids tensor |
1498 | 0 | for (int i = input_id + 1; i < split->n_inputs; i++) { |
1499 | 0 | if (ids_tensor == tensor_copy(split->inputs[i], split_backend_id, sched->cur_copy)) { |
1500 | 0 | ids_tensor = split->inputs[i]; |
1501 | 0 | ids_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[i]); |
1502 | 0 | break; |
1503 | 0 | } |
1504 | 0 | } |
1505 | |
|
1506 | 0 | if (ids_tensor != prev_ids_tensor) { |
1507 | 0 | ids.resize(ggml_nbytes(ids_tensor) / sizeof(int32_t)); |
1508 | 0 | ggml_backend_tensor_get_async(ids_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor)); |
1509 | 0 | ggml_backend_synchronize(ids_backend); |
1510 | | |
1511 | | // find the used experts |
1512 | 0 | used_ids.clear(); |
1513 | 0 | used_ids.resize(ggml_bitset_size(n_expert)); |
1514 | 0 | for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) { |
1515 | 0 | for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) { |
1516 | 0 | int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)]; |
1517 | 0 | GGML_ASSERT(id >= 0 && id < n_expert); |
1518 | 0 | ggml_bitset_set(used_ids.data(), id); |
1519 | 0 | } |
1520 | 0 | } |
1521 | |
|
1522 | 0 | prev_ids_tensor = ids_tensor; |
1523 | 0 | } |
1524 | | |
1525 | | // group consecutive experts and copy them together |
1526 | 0 | auto copy_experts = [&](int32_t first_id, int32_t last_id) { |
1527 | 0 | const size_t expert_offset = first_id * expert_size; |
1528 | 0 | const size_t expert_size_copy = (last_id - first_id + 1) * expert_size; |
1529 | 0 | const size_t padding = std::min<size_t>(expert_size, 512); |
1530 | 0 | const size_t padding_end = last_id < n_expert - 1 ? padding : 0; |
1531 | |
|
1532 | 0 | ggml_backend_tensor_set_async(split_backend, |
1533 | 0 | input_cpy, |
1534 | 0 | (const uint8_t *)input->data + expert_offset, expert_offset, |
1535 | | // copy a bit extra at the to ensure there are no NaNs in the padding of the last expert |
1536 | | // this is necessary for MMQ in the CUDA backend |
1537 | 0 | expert_size_copy + padding_end); |
1538 | 0 | }; |
1539 | |
|
1540 | 0 | int id = 0; |
1541 | 0 | while (!ggml_bitset_get(used_ids.data(), id)) { |
1542 | 0 | id++; |
1543 | 0 | } |
1544 | 0 | int32_t first_id = id; |
1545 | 0 | int32_t last_id = first_id; |
1546 | |
|
1547 | 0 | for (++id; id < n_expert; ++id) { |
1548 | 0 | if (!ggml_bitset_get(used_ids.data(), id)) { |
1549 | 0 | continue; |
1550 | 0 | } |
1551 | | |
1552 | 0 | if (id == last_id + 1) { |
1553 | 0 | last_id = id; |
1554 | 0 | continue; |
1555 | 0 | } |
1556 | | |
1557 | 0 | copy_experts(first_id, last_id); |
1558 | |
|
1559 | 0 | first_id = id; |
1560 | 0 | last_id = id; |
1561 | 0 | } |
1562 | 0 | copy_experts(first_id, last_id); |
1563 | 0 | } else { |
1564 | | // try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events |
1565 | | // TODO: add public function to facilitate this, since applications do not have direct access to the backend interface |
1566 | 0 | if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) { |
1567 | 0 | ggml_backend_synchronize(input_backend); |
1568 | 0 | if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
1569 | 0 | ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); |
1570 | 0 | } else { |
1571 | 0 | ggml_backend_synchronize(split_backend); |
1572 | 0 | } |
1573 | 0 | ggml_backend_tensor_copy(input, input_cpy); |
1574 | 0 | } |
1575 | 0 | } |
1576 | 0 | } |
1577 | 0 | } |
1578 | |
|
1579 | 0 | if (!sched->callback_eval) { |
1580 | 0 | enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph); |
1581 | 0 | if (ec != GGML_STATUS_SUCCESS) { |
1582 | 0 | return ec; |
1583 | 0 | } |
1584 | 0 | } else { |
1585 | | // similar to ggml_backend_compare_graph_backend |
1586 | 0 | for (int j0 = 0; j0 < split->graph.n_nodes; j0++) { |
1587 | 0 | struct ggml_tensor * t = split->graph.nodes[j0]; |
1588 | | |
1589 | | // check if the user needs data from this node |
1590 | 0 | bool need = sched->callback_eval(t, true, sched->callback_eval_user_data); |
1591 | |
|
1592 | 0 | int j1 = j0; |
1593 | | |
1594 | | // determine the range [j0, j1] of nodes that can be computed together |
1595 | 0 | while (!need && j1 < split->graph.n_nodes - 1) { |
1596 | 0 | t = split->graph.nodes[++j1]; |
1597 | 0 | need = sched->callback_eval(t, true, sched->callback_eval_user_data); |
1598 | 0 | } |
1599 | |
|
1600 | 0 | struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1); |
1601 | |
|
1602 | 0 | enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &gv); |
1603 | 0 | if (ec != GGML_STATUS_SUCCESS) { |
1604 | 0 | return ec; |
1605 | 0 | } |
1606 | | |
1607 | | // TODO: pass backend to the callback, then the user can decide if they want to synchronize |
1608 | 0 | ggml_backend_synchronize(split_backend); |
1609 | |
|
1610 | 0 | if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) { |
1611 | 0 | break; |
1612 | 0 | } |
1613 | | |
1614 | 0 | j0 = j1; |
1615 | 0 | } |
1616 | 0 | } |
1617 | | |
1618 | | // record the event of this copy |
1619 | 0 | if (split->n_inputs > 0) { |
1620 | 0 | if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
1621 | 0 | ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy], split_backend); |
1622 | 0 | } |
1623 | 0 | } |
1624 | 0 | } |
1625 | | |
1626 | 0 | return GGML_STATUS_SUCCESS; |
1627 | 0 | } |
1628 | | |
1629 | | ggml_backend_sched_t ggml_backend_sched_new( |
1630 | | ggml_backend_t * backends, |
1631 | | ggml_backend_buffer_type_t * bufts, |
1632 | | int n_backends, |
1633 | | size_t graph_size, |
1634 | | bool parallel, |
1635 | 0 | bool op_offload) { |
1636 | 0 | GGML_ASSERT(n_backends > 0); |
1637 | 0 | GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS); |
1638 | 0 | GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU); |
1639 | |
|
1640 | 0 | struct ggml_backend_sched * sched = (ggml_backend_sched *) calloc(1, sizeof(struct ggml_backend_sched)); |
1641 | |
|
1642 | 0 | const char * GGML_SCHED_DEBUG = getenv("GGML_SCHED_DEBUG"); |
1643 | 0 | sched->debug = GGML_SCHED_DEBUG ? atoi(GGML_SCHED_DEBUG) : 0; |
1644 | |
|
1645 | 0 | sched->debug_realloc = 0; |
1646 | | #ifdef GGML_SCHED_NO_REALLOC |
1647 | | sched->debug_realloc = 1; |
1648 | | #endif |
1649 | 0 | const char * GGML_SCHED_DEBUG_REALLOC = getenv("GGML_SCHED_DEBUG_REALLOC"); |
1650 | 0 | sched->debug_realloc = GGML_SCHED_DEBUG_REALLOC ? atoi(GGML_SCHED_DEBUG_REALLOC) : sched->debug_realloc; |
1651 | |
|
1652 | 0 | sched->n_backends = n_backends; |
1653 | 0 | sched->n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1; |
1654 | | |
1655 | | // initialize hash table |
1656 | | // FIXME: needs to be size*2 to account for leafs (do it in graph_split instead) |
1657 | 0 | sched->hash_set = ggml_hash_set_new(graph_size); |
1658 | 0 | sched->hv_tensor_backend_ids = (int *) malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0])); |
1659 | 0 | sched->hv_tensor_copies = (ggml_tensor **) malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *)); |
1660 | |
|
1661 | 0 | const size_t ggml_sched_max_splits = graph_size; // at most there is one split for each node in the graph |
1662 | 0 | const size_t nodes_size = graph_size + ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2; |
1663 | 0 | sched->node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->node_backend_ids[0])); |
1664 | 0 | sched->leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->leaf_backend_ids[0])); |
1665 | 0 | sched->prev_node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0])); |
1666 | 0 | sched->prev_leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0])); |
1667 | |
|
1668 | 0 | sched->debug_graph_size = 0; |
1669 | 0 | sched->debug_prev_graph_size = 0; |
1670 | |
|
1671 | 0 | sched->context_buffer_size = ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false); |
1672 | 0 | sched->context_buffer = (char *) malloc(sched->context_buffer_size); |
1673 | |
|
1674 | 0 | const int initial_splits_capacity = 16; |
1675 | 0 | sched->splits = (ggml_backend_sched_split *) calloc(initial_splits_capacity, sizeof(sched->splits[0])); |
1676 | 0 | sched->splits_capacity = initial_splits_capacity; |
1677 | |
|
1678 | 0 | for (int b = 0; b < n_backends; b++) { |
1679 | 0 | sched->backends[b] = backends[b]; |
1680 | 0 | sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]); |
1681 | 0 | GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b])); |
1682 | |
|
1683 | 0 | if (sched->n_copies > 1) { |
1684 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1685 | 0 | sched->events[b][c] = ggml_backend_event_new(backends[b]->device); |
1686 | 0 | } |
1687 | 0 | } |
1688 | 0 | } |
1689 | |
|
1690 | 0 | sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends); |
1691 | 0 | sched->op_offload = op_offload; |
1692 | |
|
1693 | 0 | ggml_backend_sched_reset(sched); |
1694 | |
|
1695 | 0 | return sched; |
1696 | 0 | } |
1697 | | |
1698 | 0 | void ggml_backend_sched_free(ggml_backend_sched_t sched) { |
1699 | 0 | if (sched == NULL) { |
1700 | 0 | return; |
1701 | 0 | } |
1702 | 0 | for (int b = 0; b < sched->n_backends; b++) { |
1703 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1704 | 0 | ggml_backend_event_free(sched->events[b][c]); |
1705 | 0 | } |
1706 | 0 | } |
1707 | 0 | ggml_gallocr_free(sched->galloc); |
1708 | 0 | ggml_free(sched->ctx); |
1709 | 0 | ggml_hash_set_free(&sched->hash_set); |
1710 | 0 | free(sched->splits); |
1711 | 0 | free(sched->hv_tensor_backend_ids); |
1712 | 0 | free(sched->hv_tensor_copies); |
1713 | 0 | free(sched->node_backend_ids); |
1714 | 0 | free(sched->leaf_backend_ids); |
1715 | 0 | free(sched->prev_node_backend_ids); |
1716 | 0 | free(sched->prev_leaf_backend_ids); |
1717 | 0 | free(sched->context_buffer); |
1718 | 0 | free(sched->graph.nodes); |
1719 | 0 | free(sched->graph.leafs); |
1720 | 0 | free(sched); |
1721 | 0 | } |
1722 | | |
1723 | 0 | void ggml_backend_sched_reset(ggml_backend_sched_t sched) { |
1724 | 0 | GGML_ASSERT(sched); |
1725 | | // reset state for the next run |
1726 | 0 | if (!sched->is_reset) { |
1727 | 0 | ggml_hash_set_reset(&sched->hash_set); |
1728 | 0 | memset(sched->hv_tensor_backend_ids, -1, sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0])); |
1729 | 0 | memset(sched->hv_tensor_copies, 0, sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *)); |
1730 | 0 | sched->is_reset = true; |
1731 | 0 | } |
1732 | 0 | sched->is_alloc = false; |
1733 | 0 | } |
1734 | | |
1735 | 0 | void ggml_backend_sched_reserve_size(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph, size_t * sizes) { |
1736 | 0 | GGML_ASSERT(sched); |
1737 | 0 | GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); |
1738 | 0 | GGML_ASSERT(sizes); |
1739 | |
|
1740 | 0 | ggml_backend_sched_reset(sched); |
1741 | |
|
1742 | 0 | ggml_backend_sched_synchronize(sched); |
1743 | |
|
1744 | 0 | ggml_backend_sched_split_graph(sched, measure_graph); |
1745 | |
|
1746 | 0 | ggml_gallocr_reserve_n_size(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids, sizes); |
1747 | 0 | } |
1748 | | |
1749 | 0 | bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) { |
1750 | 0 | GGML_ASSERT(sched); |
1751 | 0 | GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); |
1752 | |
|
1753 | 0 | ggml_backend_sched_synchronize(sched); |
1754 | |
|
1755 | 0 | ggml_backend_sched_split_graph(sched, measure_graph); |
1756 | |
|
1757 | 0 | if (!ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) { |
1758 | 0 | return false; |
1759 | 0 | } |
1760 | | |
1761 | 0 | ggml_backend_sched_reset(sched); |
1762 | |
|
1763 | 0 | return true; |
1764 | 0 | } |
1765 | | |
1766 | 0 | bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
1767 | 0 | GGML_ASSERT(sched); |
1768 | 0 | GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs); |
1769 | 0 | GGML_ASSERT(!sched->is_alloc); |
1770 | |
|
1771 | 0 | sched->cur_copy = sched->next_copy; |
1772 | 0 | sched->next_copy = (sched->next_copy + 1) % sched->n_copies; |
1773 | |
|
1774 | 0 | ggml_backend_sched_split_graph(sched, graph); |
1775 | |
|
1776 | 0 | if (!ggml_backend_sched_alloc_splits(sched)) { |
1777 | 0 | return false; |
1778 | 0 | } |
1779 | | |
1780 | 0 | sched->is_alloc = true; |
1781 | |
|
1782 | 0 | return true; |
1783 | 0 | } |
1784 | | |
1785 | 0 | enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
1786 | 0 | enum ggml_status err = ggml_backend_sched_graph_compute_async(sched, graph); |
1787 | 0 | ggml_backend_sched_synchronize(sched); |
1788 | 0 | return err; |
1789 | 0 | } |
1790 | | |
1791 | 0 | enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
1792 | 0 | GGML_ASSERT(sched); |
1793 | 0 | if (!sched->is_reset && !sched->is_alloc) { |
1794 | 0 | ggml_backend_sched_reset(sched); |
1795 | 0 | } |
1796 | |
|
1797 | 0 | if (!sched->is_alloc) { |
1798 | 0 | if (!ggml_backend_sched_alloc_graph(sched, graph)) { |
1799 | 0 | return GGML_STATUS_ALLOC_FAILED; |
1800 | 0 | } |
1801 | 0 | } |
1802 | | |
1803 | 0 | return ggml_backend_sched_compute_splits(sched); |
1804 | 0 | } |
1805 | | |
1806 | 0 | void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) { |
1807 | 0 | GGML_ASSERT(sched); |
1808 | 0 | for (int i = 0; i < sched->n_backends; i++) { |
1809 | 0 | ggml_backend_synchronize(sched->backends[i]); |
1810 | 0 | } |
1811 | 0 | if (!sched->is_alloc) { |
1812 | | // if the graph is not already allocated, always use copy 0 after a synchronization |
1813 | | // this ensures that during generation the same copy is used every time, |
1814 | | // which avoids changes in the graph that could cause CUDA or other graphs to be disabled |
1815 | 0 | sched->next_copy = 0; |
1816 | 0 | } |
1817 | 0 | } |
1818 | | |
1819 | 0 | void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) { |
1820 | 0 | GGML_ASSERT(sched); |
1821 | 0 | sched->callback_eval = callback; |
1822 | 0 | sched->callback_eval_user_data = user_data; |
1823 | 0 | } |
1824 | | |
1825 | 0 | int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) { |
1826 | 0 | GGML_ASSERT(sched); |
1827 | 0 | return sched->n_splits; |
1828 | 0 | } |
1829 | | |
1830 | 0 | int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched) { |
1831 | 0 | GGML_ASSERT(sched); |
1832 | 0 | return sched->n_copies; |
1833 | 0 | } |
1834 | | |
1835 | 0 | int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched) { |
1836 | 0 | GGML_ASSERT(sched); |
1837 | 0 | return sched->n_backends; |
1838 | 0 | } |
1839 | | |
1840 | 0 | ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i) { |
1841 | 0 | GGML_ASSERT(sched); |
1842 | 0 | GGML_ASSERT(i >= 0 && i < sched->n_backends); |
1843 | 0 | return sched->backends[i]; |
1844 | 0 | } |
1845 | | |
1846 | 0 | ggml_backend_buffer_type_t ggml_backend_sched_get_buffer_type(ggml_backend_sched_t sched, ggml_backend_t backend) { |
1847 | 0 | GGML_ASSERT(sched); |
1848 | 0 | int backend_index = ggml_backend_sched_backend_id(sched, backend); |
1849 | 0 | GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); |
1850 | |
|
1851 | 0 | return sched->bufts[backend_index]; |
1852 | 0 | } |
1853 | | |
1854 | 0 | size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) { |
1855 | 0 | GGML_ASSERT(sched); |
1856 | 0 | int backend_index = ggml_backend_sched_backend_id(sched, backend); |
1857 | 0 | GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); |
1858 | |
|
1859 | 0 | return ggml_gallocr_get_buffer_size(sched->galloc, backend_index); |
1860 | 0 | } |
1861 | | |
1862 | 0 | void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) { |
1863 | 0 | GGML_ASSERT(sched); |
1864 | 0 | int backend_index = ggml_backend_sched_backend_id(sched, backend); |
1865 | 0 | GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); |
1866 | 0 | tensor_backend_id(node) = backend_index; |
1867 | 0 | SET_CAUSE(node, "usr"); |
1868 | 0 | sched->is_reset = false; |
1869 | 0 | } |
1870 | | |
1871 | 0 | ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) { |
1872 | 0 | GGML_ASSERT(sched); |
1873 | 0 | int backend_index = tensor_backend_id(node); |
1874 | 0 | if (backend_index == -1) { |
1875 | 0 | return NULL; |
1876 | 0 | } |
1877 | 0 | return sched->backends[backend_index]; |
1878 | 0 | } |
1879 | | |
1880 | | // utils |
1881 | | |
1882 | 0 | enum ggml_status ggml_backend_view_init(struct ggml_tensor * tensor) { |
1883 | 0 | GGML_ASSERT(tensor); |
1884 | 0 | GGML_ASSERT(tensor->buffer == NULL); |
1885 | 0 | GGML_ASSERT(tensor->view_src != NULL); |
1886 | 0 | GGML_ASSERT(tensor->view_src->buffer != NULL); |
1887 | 0 | GGML_ASSERT(tensor->view_src->data != NULL); |
1888 | |
|
1889 | 0 | tensor->buffer = tensor->view_src->buffer; |
1890 | 0 | tensor->data = (char *)tensor->view_src->data + tensor->view_offs; |
1891 | 0 | return ggml_backend_buffer_init_tensor(tensor->buffer, tensor); |
1892 | 0 | } |
1893 | | |
1894 | 0 | enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr) { |
1895 | 0 | GGML_ASSERT(tensor); |
1896 | 0 | GGML_ASSERT(tensor->buffer == NULL); |
1897 | 0 | GGML_ASSERT(tensor->data == NULL); |
1898 | 0 | GGML_ASSERT(tensor->view_src == NULL); |
1899 | 0 | GGML_ASSERT(addr >= ggml_backend_buffer_get_base(buffer)); |
1900 | 0 | GGML_ASSERT((char *)addr + ggml_backend_buffer_get_alloc_size(buffer, tensor) <= |
1901 | 0 | (char *)ggml_backend_buffer_get_base(buffer) + ggml_backend_buffer_get_size(buffer)); |
1902 | |
|
1903 | 0 | tensor->buffer = buffer; |
1904 | 0 | tensor->data = addr; |
1905 | 0 | return ggml_backend_buffer_init_tensor(buffer, tensor); |
1906 | 0 | } |
1907 | | |
1908 | | static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies, |
1909 | 0 | struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) { |
1910 | |
|
1911 | 0 | GGML_ASSERT(src != NULL); |
1912 | 0 | GGML_ASSERT(src->data && "graph must be allocated"); |
1913 | |
|
1914 | 0 | size_t id = ggml_hash_insert(&hash_set, src); |
1915 | 0 | if (id == GGML_HASHSET_ALREADY_EXISTS) { |
1916 | 0 | return node_copies[ggml_hash_find(&hash_set, src)]; |
1917 | 0 | } |
1918 | | |
1919 | 0 | struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src); |
1920 | 0 | if (src->view_src != NULL) { |
1921 | 0 | dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src); |
1922 | 0 | dst->view_offs = src->view_offs; |
1923 | 0 | } |
1924 | 0 | dst->op = src->op; |
1925 | 0 | memcpy(dst->op_params, src->op_params, sizeof(dst->op_params)); |
1926 | 0 | ggml_set_name(dst, src->name); |
1927 | | |
1928 | | // copy src |
1929 | 0 | for (int i = 0; i < GGML_MAX_SRC; i++) { |
1930 | 0 | struct ggml_tensor * s = src->src[i]; |
1931 | 0 | if (s == NULL) { |
1932 | 0 | continue; |
1933 | 0 | } |
1934 | 0 | dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s); |
1935 | 0 | } |
1936 | |
|
1937 | 0 | node_copies[id] = dst; |
1938 | 0 | return dst; |
1939 | 0 | } |
1940 | | |
1941 | 0 | static void graph_copy_init_tensor(struct ggml_hash_set * hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) { |
1942 | 0 | size_t id = ggml_hash_find(hash_set, src); |
1943 | 0 | if (node_init[id]) { |
1944 | 0 | return; |
1945 | 0 | } |
1946 | 0 | node_init[id] = true; |
1947 | |
|
1948 | 0 | struct ggml_tensor * dst = node_copies[id]; |
1949 | 0 | if (dst->view_src != NULL) { |
1950 | 0 | graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src); |
1951 | 0 | enum ggml_status status = ggml_backend_view_init(dst); |
1952 | 0 | GGML_ASSERT(status == GGML_STATUS_SUCCESS); |
1953 | 0 | } |
1954 | 0 | else { |
1955 | 0 | ggml_backend_tensor_copy(src, dst); |
1956 | 0 | } |
1957 | | |
1958 | | // init src |
1959 | 0 | for (int i = 0; i < GGML_MAX_SRC; i++) { |
1960 | 0 | struct ggml_tensor * s = src->src[i]; |
1961 | 0 | if (s == NULL) { |
1962 | 0 | continue; |
1963 | 0 | } |
1964 | 0 | graph_copy_init_tensor(hash_set, node_copies, node_init, s); |
1965 | 0 | } |
1966 | 0 | } |
1967 | | |
1968 | 0 | struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) { |
1969 | 0 | GGML_ASSERT(graph); |
1970 | 0 | struct ggml_hash_set hash_set = ggml_hash_set_new(graph->visited_hash_set.size); |
1971 | 0 | struct ggml_tensor ** node_copies = (ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT |
1972 | 0 | bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0])); |
1973 | |
|
1974 | 0 | struct ggml_init_params params = { |
1975 | 0 | /* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false), |
1976 | 0 | /* .mem_buffer = */ NULL, |
1977 | 0 | /* .no_alloc = */ true |
1978 | 0 | }; |
1979 | |
|
1980 | 0 | struct ggml_context * ctx_allocated = ggml_init(params); |
1981 | 0 | struct ggml_context * ctx_unallocated = ggml_init(params); |
1982 | |
|
1983 | 0 | if (ctx_allocated == NULL || ctx_unallocated == NULL) { |
1984 | 0 | GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__); |
1985 | 0 | ggml_hash_set_free(&hash_set); |
1986 | 0 | free(node_copies); |
1987 | 0 | free(node_init); |
1988 | 0 | ggml_free(ctx_allocated); |
1989 | 0 | ggml_free(ctx_unallocated); |
1990 | 0 | return { |
1991 | 0 | /* .buffer = */ NULL, |
1992 | 0 | /* .ctx_allocated = */ NULL, |
1993 | 0 | /* .ctx_unallocated = */ NULL, |
1994 | 0 | /* .graph = */ NULL, |
1995 | 0 | }; |
1996 | 0 | } |
1997 | | |
1998 | | // dup nodes |
1999 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
2000 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
2001 | 0 | graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node); |
2002 | 0 | } |
2003 | | |
2004 | | // allocate nodes |
2005 | 0 | ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend); |
2006 | 0 | if (buffer == NULL) { |
2007 | 0 | GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__); |
2008 | 0 | ggml_hash_set_free(&hash_set); |
2009 | 0 | free(node_copies); |
2010 | 0 | free(node_init); |
2011 | 0 | ggml_free(ctx_allocated); |
2012 | 0 | ggml_free(ctx_unallocated); |
2013 | 0 | return { |
2014 | 0 | /* .buffer = */ NULL, |
2015 | 0 | /* .ctx_allocated = */ NULL, |
2016 | 0 | /* .ctx_unallocated = */ NULL, |
2017 | 0 | /* .graph = */ NULL, |
2018 | 0 | }; |
2019 | 0 | } |
2020 | | |
2021 | | //printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024); |
2022 | | |
2023 | | // copy data and init views |
2024 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
2025 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
2026 | 0 | graph_copy_init_tensor(&hash_set, node_copies, node_init, node); |
2027 | 0 | } |
2028 | | |
2029 | | // build graph copy |
2030 | 0 | struct ggml_cgraph * graph_copy = ggml_new_graph_custom(ctx_allocated, graph->size, false); |
2031 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
2032 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
2033 | 0 | struct ggml_tensor * node_copy = node_copies[ggml_hash_find(&hash_set, node)]; |
2034 | 0 | graph_copy->nodes[i] = node_copy; |
2035 | 0 | } |
2036 | 0 | graph_copy->n_nodes = graph->n_nodes; |
2037 | |
|
2038 | 0 | ggml_hash_set_free(&hash_set); |
2039 | 0 | free(node_copies); |
2040 | 0 | free(node_init); |
2041 | |
|
2042 | 0 | return { |
2043 | 0 | /* .buffer = */ buffer, |
2044 | 0 | /* .ctx_allocated = */ ctx_allocated, |
2045 | 0 | /* .ctx_unallocated = */ ctx_unallocated, |
2046 | 0 | /* .graph = */ graph_copy, |
2047 | 0 | }; |
2048 | 0 | } |
2049 | | |
2050 | 0 | void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) { |
2051 | 0 | ggml_backend_buffer_free(copy.buffer); |
2052 | 0 | ggml_free(copy.ctx_allocated); |
2053 | 0 | ggml_free(copy.ctx_unallocated); |
2054 | 0 | } |
2055 | | |
2056 | 0 | bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor * test_node) { |
2057 | 0 | struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph); |
2058 | 0 | if (copy.buffer == NULL) { |
2059 | 0 | return false; |
2060 | 0 | } |
2061 | | |
2062 | 0 | struct ggml_cgraph * g1 = graph; |
2063 | 0 | struct ggml_cgraph * g2 = copy.graph; |
2064 | |
|
2065 | 0 | assert(g1->n_nodes == g2->n_nodes); |
2066 | |
|
2067 | 0 | if (test_node != nullptr) { |
2068 | | // Compute the whole graph and only test the output for a specific tensor |
2069 | 0 | ggml_backend_graph_compute(backend1, g1); |
2070 | 0 | ggml_backend_graph_compute(backend2, g2); |
2071 | |
|
2072 | 0 | int test_node_idx = -1; |
2073 | 0 | for (int i = 0; i < g1->n_nodes; i++) { |
2074 | 0 | struct ggml_tensor * t1 = g1->nodes[i]; |
2075 | 0 | if (t1 == test_node) { |
2076 | 0 | test_node_idx = i; |
2077 | 0 | break; |
2078 | 0 | } |
2079 | 0 | } |
2080 | 0 | GGML_ASSERT(test_node_idx != -1); |
2081 | |
|
2082 | 0 | callback(test_node_idx, g1->nodes[test_node_idx], g2->nodes[test_node_idx], user_data); |
2083 | 0 | } else { |
2084 | 0 | for (int i = 0; i < g1->n_nodes; i++) { |
2085 | 0 | struct ggml_tensor * t1 = g1->nodes[i]; |
2086 | 0 | struct ggml_tensor * t2 = g2->nodes[i]; |
2087 | |
|
2088 | 0 | assert(t1->op == t2->op && ggml_are_same_layout(t1, t2)); |
2089 | |
|
2090 | 0 | struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1); |
2091 | 0 | struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1); |
2092 | |
|
2093 | 0 | ggml_backend_graph_compute(backend1, &g1v); |
2094 | 0 | ggml_backend_graph_compute(backend2, &g2v); |
2095 | |
|
2096 | 0 | if (ggml_is_view_op(t1->op)) { |
2097 | 0 | continue; |
2098 | 0 | } |
2099 | | |
2100 | | // compare results, calculate rms etc |
2101 | 0 | if (!callback(i, t1, t2, user_data)) { |
2102 | 0 | break; |
2103 | 0 | } |
2104 | 0 | } |
2105 | 0 | } |
2106 | 0 | ggml_backend_graph_copy_free(copy); |
2107 | |
|
2108 | 0 | return true; |
2109 | 0 | } |
2110 | | |
2111 | | // CPU backend - buffer |
2112 | | |
2113 | 0 | static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) { |
2114 | 0 | GGML_ASSERT(buffer); |
2115 | 0 | uintptr_t data = (uintptr_t)buffer->context; |
2116 | | |
2117 | | // align the buffer |
2118 | 0 | if (data % TENSOR_ALIGNMENT != 0) { |
2119 | 0 | data = GGML_PAD(data, TENSOR_ALIGNMENT); |
2120 | 0 | } |
2121 | |
|
2122 | 0 | return (void *)data; |
2123 | 0 | } |
2124 | | |
2125 | 0 | static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { |
2126 | 0 | GGML_ASSERT(buffer); |
2127 | 0 | ggml_aligned_free(buffer->context, buffer->size); |
2128 | 0 | } |
2129 | | |
2130 | 0 | static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { |
2131 | 0 | GGML_ASSERT(tensor); |
2132 | 0 | memset((char *)tensor->data + offset, value, size); |
2133 | |
|
2134 | 0 | GGML_UNUSED(buffer); |
2135 | 0 | } |
2136 | | |
2137 | 0 | static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { |
2138 | 0 | GGML_ASSERT(tensor); |
2139 | 0 | memcpy((char *)tensor->data + offset, data, size); |
2140 | |
|
2141 | 0 | GGML_UNUSED(buffer); |
2142 | 0 | } |
2143 | | |
2144 | 0 | static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { |
2145 | 0 | GGML_ASSERT(tensor); |
2146 | 0 | memcpy(data, (const char *)tensor->data + offset, size); |
2147 | |
|
2148 | 0 | GGML_UNUSED(buffer); |
2149 | 0 | } |
2150 | | |
2151 | 0 | static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { |
2152 | 0 | GGML_ASSERT(src); |
2153 | 0 | if (ggml_backend_buffer_is_host(src->buffer)) { |
2154 | 0 | memcpy(dst->data, src->data, ggml_nbytes(src)); |
2155 | 0 | return true; |
2156 | 0 | } |
2157 | 0 | return false; |
2158 | | |
2159 | 0 | GGML_UNUSED(buffer); |
2160 | 0 | } |
2161 | | |
2162 | 0 | static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { |
2163 | 0 | GGML_ASSERT(buffer); |
2164 | 0 | memset(buffer->context, value, buffer->size); |
2165 | 0 | } |
2166 | | |
2167 | | static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = { |
2168 | | /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer, |
2169 | | /* .get_base = */ ggml_backend_cpu_buffer_get_base, |
2170 | | /* .init_tensor = */ NULL, // no initialization required |
2171 | | /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor, |
2172 | | /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor, |
2173 | | /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, |
2174 | | /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor, |
2175 | | /* .clear = */ ggml_backend_cpu_buffer_clear, |
2176 | | /* .reset = */ NULL, |
2177 | | }; |
2178 | | |
2179 | | static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = { |
2180 | | /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed |
2181 | | /* .get_base = */ ggml_backend_cpu_buffer_get_base, |
2182 | | /* .init_tensor = */ NULL, // no initialization required |
2183 | | /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor, |
2184 | | /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor, |
2185 | | /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, |
2186 | | /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor, |
2187 | | /* .clear = */ ggml_backend_cpu_buffer_clear, |
2188 | | /* .reset = */ NULL, |
2189 | | }; |
2190 | | |
2191 | | // CPU backend buffer type |
2192 | | |
2193 | | // this buffer type is defined here to make it available to all backends |
2194 | | |
2195 | 0 | static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) { |
2196 | 0 | return "CPU"; |
2197 | | |
2198 | 0 | GGML_UNUSED(buft); |
2199 | 0 | } |
2200 | | |
2201 | 0 | static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { |
2202 | 0 | void * data = ggml_aligned_malloc(size); |
2203 | |
|
2204 | 0 | if (data == NULL) { |
2205 | 0 | GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size); |
2206 | 0 | return NULL; |
2207 | 0 | } |
2208 | | |
2209 | 0 | return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, size); |
2210 | 0 | } |
2211 | | |
2212 | 0 | static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { |
2213 | 0 | return TENSOR_ALIGNMENT; |
2214 | | |
2215 | 0 | GGML_UNUSED(buft); |
2216 | 0 | } |
2217 | | |
2218 | 0 | static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) { |
2219 | 0 | return true; |
2220 | | |
2221 | 0 | GGML_UNUSED(buft); |
2222 | 0 | } |
2223 | | |
2224 | 0 | ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) { |
2225 | 0 | static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { |
2226 | 0 | /* .iface = */ { |
2227 | 0 | /* .get_name = */ ggml_backend_cpu_buffer_type_get_name, |
2228 | 0 | /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer, |
2229 | 0 | /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, |
2230 | 0 | /* .get_max_size = */ NULL, // defaults to SIZE_MAX |
2231 | 0 | /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes |
2232 | 0 | /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, |
2233 | 0 | }, |
2234 | 0 | /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), |
2235 | 0 | /* .context = */ NULL, |
2236 | 0 | }; |
2237 | |
|
2238 | 0 | return &ggml_backend_cpu_buffer_type; |
2239 | 0 | } |
2240 | | |
2241 | 0 | static const char * ggml_backend_cpu_buffer_from_ptr_type_get_name(ggml_backend_buffer_type_t buft) { |
2242 | 0 | return "CPU_Mapped"; |
2243 | | |
2244 | 0 | GGML_UNUSED(buft); |
2245 | 0 | } |
2246 | | |
2247 | 0 | static ggml_backend_buffer_type_t ggml_backend_cpu_buffer_from_ptr_type(void) { |
2248 | 0 | static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { |
2249 | 0 | /* .iface = */ { |
2250 | 0 | /* .get_name = */ ggml_backend_cpu_buffer_from_ptr_type_get_name, |
2251 | 0 | /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer, |
2252 | 0 | /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, |
2253 | 0 | /* .get_max_size = */ NULL, // defaults to SIZE_MAX |
2254 | 0 | /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes |
2255 | 0 | /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, |
2256 | 0 | }, |
2257 | 0 | /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), |
2258 | 0 | /* .context = */ NULL, |
2259 | 0 | }; |
2260 | |
|
2261 | 0 | return &ggml_backend_cpu_buffer_type; |
2262 | 0 | } |
2263 | | |
2264 | 0 | ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) { |
2265 | 0 | GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned"); |
2266 | 0 | return ggml_backend_buffer_init(ggml_backend_cpu_buffer_from_ptr_type(), ggml_backend_cpu_buffer_from_ptr_i, ptr, size); |
2267 | 0 | } |