/src/llama.cpp/ggml/src/ggml-backend.cpp
Line | Count | Source |
1 | | // Note: porting this file to C++ is a work in progress |
2 | | |
3 | | #ifdef _WIN32 |
4 | | #define WIN32_LEAN_AND_MEAN |
5 | | #ifndef NOMINMAX |
6 | | # define NOMINMAX |
7 | | #endif |
8 | | #include <windows.h> |
9 | | #endif |
10 | | |
11 | | #include "ggml-backend.h" |
12 | | #include "ggml-backend-impl.h" |
13 | | #include "ggml-alloc.h" |
14 | | #include "ggml-impl.h" |
15 | | |
16 | | #include <assert.h> |
17 | | #include <limits.h> |
18 | | #include <stdarg.h> |
19 | | #include <stdio.h> |
20 | | #include <stdlib.h> |
21 | | #include <string.h> |
22 | | #include <algorithm> |
23 | | #include <vector> |
24 | | |
25 | | #ifdef __APPLE__ |
26 | | #include <sys/types.h> |
27 | | #include <sys/sysctl.h> |
28 | | #endif |
29 | | |
30 | | |
31 | | // backend buffer type |
32 | | |
33 | 0 | const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) { |
34 | 0 | GGML_ASSERT(buft); |
35 | 0 | return buft->iface.get_name(buft); |
36 | 0 | } |
37 | | |
38 | 0 | ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { |
39 | 0 | GGML_ASSERT(buft); |
40 | 0 | if (size == 0) { |
41 | | // return a dummy buffer for zero-sized allocations |
42 | 0 | return ggml_backend_buffer_init(buft, {}, NULL, 0); |
43 | 0 | } |
44 | 0 | return buft->iface.alloc_buffer(buft, size); |
45 | 0 | } |
46 | | |
47 | 0 | size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) { |
48 | 0 | GGML_ASSERT(buft); |
49 | 0 | return buft->iface.get_alignment(buft); |
50 | 0 | } |
51 | | |
52 | 0 | size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) { |
53 | 0 | GGML_ASSERT(buft); |
54 | | // get_max_size is optional, defaults to SIZE_MAX |
55 | 0 | if (buft->iface.get_max_size) { |
56 | 0 | return buft->iface.get_max_size(buft); |
57 | 0 | } |
58 | 0 | return SIZE_MAX; |
59 | 0 | } |
60 | | |
61 | 0 | size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) { |
62 | 0 | GGML_ASSERT(buft); |
63 | | // get_alloc_size is optional, defaults to ggml_nbytes |
64 | 0 | if (buft->iface.get_alloc_size) { |
65 | 0 | size_t size = buft->iface.get_alloc_size(buft, tensor); |
66 | 0 | assert(size >= ggml_nbytes(tensor)); |
67 | 0 | return size; |
68 | 0 | } |
69 | 0 | return ggml_nbytes(tensor); |
70 | 0 | } |
71 | | |
72 | 0 | bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) { |
73 | 0 | GGML_ASSERT(buft); |
74 | 0 | if (buft->iface.is_host) { |
75 | 0 | return buft->iface.is_host(buft); |
76 | 0 | } |
77 | 0 | return false; |
78 | 0 | } |
79 | | |
80 | 0 | ggml_backend_dev_t ggml_backend_buft_get_device(ggml_backend_buffer_type_t buft) { |
81 | 0 | GGML_ASSERT(buft); |
82 | 0 | return buft->device; |
83 | 0 | } |
84 | | |
85 | | // backend buffer |
86 | | |
87 | | ggml_backend_buffer_t ggml_backend_buffer_init( |
88 | | ggml_backend_buffer_type_t buft, |
89 | | struct ggml_backend_buffer_i iface, |
90 | | void * context, |
91 | 0 | size_t size) { |
92 | 0 | ggml_backend_buffer_t buffer = new ggml_backend_buffer { |
93 | 0 | /* .interface = */ iface, |
94 | 0 | /* .buft = */ buft, |
95 | 0 | /* .context = */ context, |
96 | 0 | /* .size = */ size, |
97 | 0 | /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY |
98 | 0 | }; |
99 | |
|
100 | 0 | return buffer; |
101 | 0 | } |
102 | | |
103 | 0 | const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) { |
104 | 0 | return ggml_backend_buft_name(ggml_backend_buffer_get_type(buffer)); |
105 | 0 | } |
106 | | |
107 | 0 | void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { |
108 | 0 | if (buffer == NULL) { |
109 | 0 | return; |
110 | 0 | } |
111 | | |
112 | 0 | if (buffer->iface.free_buffer != NULL) { |
113 | 0 | buffer->iface.free_buffer(buffer); |
114 | 0 | } |
115 | 0 | delete buffer; |
116 | 0 | } |
117 | | |
118 | 0 | size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { |
119 | 0 | GGML_ASSERT(buffer); |
120 | 0 | return buffer->size; |
121 | 0 | } |
122 | | |
123 | 0 | void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) { |
124 | 0 | GGML_ASSERT(buffer); |
125 | | // get_base is optional if the buffer is zero-sized |
126 | 0 | if (!ggml_backend_buffer_is_meta(buffer) && buffer->size == 0) { |
127 | 0 | return NULL; |
128 | 0 | } |
129 | | |
130 | | // FIXME JG: a multi_buffer has a non-zero size, according to the above comment get_base is not optional, |
131 | | // I don't know whether the above comment is correct |
132 | 0 | if (!buffer->iface.get_base) { |
133 | 0 | return NULL; |
134 | 0 | } |
135 | | |
136 | 0 | void * base = buffer->iface.get_base(buffer); |
137 | |
|
138 | 0 | GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL"); |
139 | |
|
140 | 0 | return base; |
141 | 0 | } |
142 | | |
143 | 0 | enum ggml_status ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { |
144 | 0 | GGML_ASSERT(buffer); |
145 | | // init_tensor is optional |
146 | 0 | if (buffer->iface.init_tensor) { |
147 | 0 | return buffer->iface.init_tensor(buffer, tensor); |
148 | 0 | } |
149 | 0 | return GGML_STATUS_SUCCESS; |
150 | 0 | } |
151 | | |
152 | 0 | void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { |
153 | 0 | GGML_ASSERT(buffer); |
154 | | // clear is optional if the buffer is zero-sized |
155 | 0 | if (buffer->size == 0) { |
156 | 0 | return; |
157 | 0 | } |
158 | | |
159 | 0 | buffer->iface.clear(buffer, value); |
160 | 0 | } |
161 | | |
162 | 0 | size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) { |
163 | 0 | return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer)); |
164 | 0 | } |
165 | | |
166 | 0 | size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) { |
167 | 0 | return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer)); |
168 | 0 | } |
169 | | |
170 | 0 | size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor) { |
171 | 0 | return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor); |
172 | 0 | } |
173 | | |
174 | 0 | bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) { |
175 | 0 | return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer)); |
176 | 0 | } |
177 | | |
178 | 0 | void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { |
179 | 0 | GGML_ASSERT(buffer); |
180 | 0 | buffer->usage = usage; |
181 | | |
182 | | // FIXME: add a generic callback to the buffer interface |
183 | 0 | if (ggml_backend_buffer_is_multi_buffer(buffer)) { |
184 | 0 | ggml_backend_multi_buffer_set_usage(buffer, usage); |
185 | 0 | } |
186 | 0 | } |
187 | | |
188 | 0 | enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage(ggml_backend_buffer_t buffer) { |
189 | 0 | GGML_ASSERT(buffer); |
190 | 0 | return buffer->usage; |
191 | 0 | } |
192 | | |
193 | 0 | ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) { |
194 | 0 | GGML_ASSERT(buffer); |
195 | 0 | return buffer->buft; |
196 | 0 | } |
197 | | |
198 | 0 | void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) { |
199 | 0 | GGML_ASSERT(buffer); |
200 | 0 | if (buffer->iface.reset) { |
201 | 0 | buffer->iface.reset(buffer); |
202 | 0 | } |
203 | 0 | } |
204 | | |
205 | 0 | bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) { |
206 | 0 | ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer; |
207 | 0 | if (dst_buf->iface.cpy_tensor) { |
208 | 0 | return dst_buf->iface.cpy_tensor(dst_buf, src, dst); |
209 | 0 | } |
210 | 0 | return false; |
211 | 0 | } |
212 | | |
213 | | // backend |
214 | | |
215 | 0 | ggml_guid_t ggml_backend_guid(ggml_backend_t backend) { |
216 | 0 | if (backend == NULL) { |
217 | 0 | return NULL; |
218 | 0 | } |
219 | 0 | return backend->guid; |
220 | 0 | } |
221 | | |
222 | 0 | const char * ggml_backend_name(ggml_backend_t backend) { |
223 | 0 | if (backend == NULL) { |
224 | 0 | return "NULL"; |
225 | 0 | } |
226 | 0 | return backend->iface.get_name(backend); |
227 | 0 | } |
228 | | |
229 | 0 | void ggml_backend_free(ggml_backend_t backend) { |
230 | 0 | if (backend == NULL) { |
231 | 0 | return; |
232 | 0 | } |
233 | | |
234 | 0 | backend->iface.free(backend); |
235 | 0 | } |
236 | | |
237 | 0 | ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend) { |
238 | 0 | GGML_ASSERT(backend); |
239 | 0 | return ggml_backend_dev_buffer_type(backend->device); |
240 | 0 | } |
241 | | |
242 | 0 | ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) { |
243 | 0 | return ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), size); |
244 | 0 | } |
245 | | |
246 | 0 | size_t ggml_backend_get_alignment(ggml_backend_t backend) { |
247 | 0 | return ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend)); |
248 | 0 | } |
249 | | |
250 | 0 | size_t ggml_backend_get_max_size(ggml_backend_t backend) { |
251 | 0 | return ggml_backend_buft_get_max_size(ggml_backend_get_default_buffer_type(backend)); |
252 | 0 | } |
253 | | |
254 | 0 | void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { |
255 | 0 | GGML_ASSERT(backend); |
256 | 0 | GGML_ASSERT(tensor); |
257 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
258 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
259 | |
|
260 | 0 | if (backend->iface.set_tensor_async == NULL) { |
261 | 0 | ggml_backend_synchronize(backend); |
262 | 0 | ggml_backend_tensor_set(tensor, data, offset, size); |
263 | 0 | } else { |
264 | 0 | backend->iface.set_tensor_async(backend, tensor, data, offset, size); |
265 | 0 | } |
266 | 0 | } |
267 | | |
268 | 0 | void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { |
269 | 0 | GGML_ASSERT(backend); |
270 | 0 | GGML_ASSERT(tensor); |
271 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
272 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); |
273 | |
|
274 | 0 | if (backend->iface.get_tensor_async == NULL) { |
275 | 0 | ggml_backend_synchronize(backend); |
276 | 0 | ggml_backend_tensor_get(tensor, data, offset, size); |
277 | 0 | } else { |
278 | 0 | backend->iface.get_tensor_async(backend, tensor, data, offset, size); |
279 | 0 | } |
280 | 0 | } |
281 | | |
282 | | void ggml_backend_tensor_set_2d_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size, |
283 | 0 | size_t n_copies, size_t stride_tensor, size_t stride_data) { |
284 | 0 | GGML_ASSERT(backend); |
285 | 0 | GGML_ASSERT(tensor); |
286 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
287 | |
|
288 | 0 | if (n_copies <= 1 || backend->iface.set_tensor_2d_async == NULL) { |
289 | 0 | for (size_t i = 0; i < n_copies; i++) { |
290 | 0 | ggml_backend_tensor_set_async(backend, tensor, (const char *) data + i*stride_data, offset + i*stride_tensor, size); |
291 | 0 | } |
292 | 0 | return; |
293 | 0 | } |
294 | 0 | if (size == 0) { |
295 | 0 | return; |
296 | 0 | } |
297 | | |
298 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
299 | 0 | GGML_ASSERT(offset + (n_copies-1)*stride_tensor + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
300 | 0 | backend->iface.set_tensor_2d_async(backend, tensor, data, offset, size, n_copies, stride_tensor, stride_data); |
301 | 0 | } |
302 | | |
303 | | void ggml_backend_tensor_get_2d_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size, |
304 | 0 | size_t n_copies, size_t stride_tensor, size_t stride_data) { |
305 | 0 | GGML_ASSERT(backend); |
306 | 0 | GGML_ASSERT(tensor); |
307 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
308 | |
|
309 | 0 | if (n_copies <= 1 || backend->iface.set_tensor_2d_async == NULL) { |
310 | 0 | for (size_t i = 0; i < n_copies; i++) { |
311 | 0 | ggml_backend_tensor_get_async(backend, tensor, (char *) data + i*stride_data, offset + i*stride_tensor, size); |
312 | 0 | } |
313 | 0 | return; |
314 | 0 | } |
315 | 0 | if (size == 0) { |
316 | 0 | return; |
317 | 0 | } |
318 | | |
319 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
320 | 0 | GGML_ASSERT(offset + (n_copies-1)*stride_tensor + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
321 | 0 | backend->iface.get_tensor_2d_async(backend, tensor, data, offset, size, n_copies, stride_tensor, stride_data); |
322 | 0 | } |
323 | | |
324 | 0 | void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { |
325 | 0 | GGML_ASSERT(tensor); |
326 | 0 | ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
327 | 0 | GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
328 | |
|
329 | 0 | if (size == 0) { |
330 | 0 | return; |
331 | 0 | } |
332 | | |
333 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
334 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
335 | |
|
336 | 0 | buf->iface.set_tensor(buf, tensor, data, offset, size); |
337 | 0 | } |
338 | | |
339 | 0 | void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { |
340 | 0 | GGML_ASSERT(tensor); |
341 | 0 | ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
342 | 0 | GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
343 | |
|
344 | 0 | if (size == 0) { |
345 | 0 | return; |
346 | 0 | } |
347 | | |
348 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
349 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); |
350 | |
|
351 | 0 | buf->iface.get_tensor(buf, tensor, data, offset, size); |
352 | 0 | } |
353 | | |
354 | | void ggml_backend_tensor_set_2d(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size, |
355 | 0 | size_t n_copies, size_t stride_tensor, size_t stride_data) { |
356 | 0 | GGML_ASSERT(tensor); |
357 | 0 | ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
358 | 0 | GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
359 | |
|
360 | 0 | if (n_copies <= 1 || buf->iface.set_tensor_2d == NULL) { |
361 | 0 | for (size_t i = 0; i < n_copies; i++) { |
362 | 0 | ggml_backend_tensor_set(tensor, (const char *) data + i*stride_data, offset + i*stride_tensor, size); |
363 | 0 | } |
364 | 0 | return; |
365 | 0 | } |
366 | 0 | if (size == 0) { |
367 | 0 | return; |
368 | 0 | } |
369 | | |
370 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
371 | 0 | GGML_ASSERT(offset + (n_copies-1)*stride_tensor + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
372 | |
|
373 | 0 | buf->iface.set_tensor_2d(buf, tensor, data, offset, size, n_copies, stride_tensor, stride_data); |
374 | 0 | } |
375 | | |
376 | | void ggml_backend_tensor_get_2d(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size, |
377 | 0 | size_t n_copies, size_t stride_tensor, size_t stride_data) { |
378 | 0 | GGML_ASSERT(tensor); |
379 | 0 | ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
380 | 0 | GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
381 | |
|
382 | 0 | if (n_copies <= 1 || buf->iface.set_tensor_2d == NULL) { |
383 | 0 | for (size_t i = 0; i < n_copies; i++) { |
384 | 0 | ggml_backend_tensor_get(tensor, (char *) data + i*stride_data, offset + i*stride_tensor, size); |
385 | 0 | } |
386 | 0 | return; |
387 | 0 | } |
388 | 0 | if (size == 0) { |
389 | 0 | return; |
390 | 0 | } |
391 | | |
392 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
393 | 0 | GGML_ASSERT(offset + (n_copies-1)*stride_tensor + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); |
394 | |
|
395 | 0 | buf->iface.get_tensor_2d(buf, tensor, data, offset, size, n_copies, stride_tensor, stride_data); |
396 | 0 | } |
397 | | |
398 | 0 | void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { |
399 | 0 | GGML_ASSERT(tensor); |
400 | 0 | ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
401 | |
|
402 | 0 | if (size == 0) { |
403 | 0 | return; |
404 | 0 | } |
405 | | |
406 | 0 | GGML_ASSERT(buf != NULL && "tensor buffer not set"); |
407 | 0 | GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); |
408 | 0 | GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); |
409 | 0 | GGML_ASSERT(buf->iface.memset_tensor != NULL && "memset not implemented by backend buffer"); |
410 | |
|
411 | 0 | buf->iface.memset_tensor(buf, tensor, value, offset, size); |
412 | 0 | } |
413 | | |
414 | 0 | void ggml_backend_synchronize(ggml_backend_t backend) { |
415 | 0 | GGML_ASSERT(backend); |
416 | 0 | if (backend->iface.synchronize == NULL) { |
417 | 0 | return; |
418 | 0 | } |
419 | | |
420 | 0 | backend->iface.synchronize(backend); |
421 | 0 | } |
422 | | |
423 | 0 | ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
424 | 0 | GGML_ASSERT(backend); |
425 | 0 | GGML_ASSERT(backend->iface.graph_plan_create != NULL); |
426 | |
|
427 | 0 | return backend->iface.graph_plan_create(backend, cgraph); |
428 | 0 | } |
429 | | |
430 | 0 | void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { |
431 | 0 | GGML_ASSERT(backend); |
432 | 0 | GGML_ASSERT(backend->iface.graph_plan_free != NULL); |
433 | |
|
434 | 0 | backend->iface.graph_plan_free(backend, plan); |
435 | 0 | } |
436 | | |
437 | 0 | enum ggml_status ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { |
438 | 0 | GGML_ASSERT(backend); |
439 | 0 | GGML_ASSERT(backend->iface.graph_plan_compute != NULL); |
440 | |
|
441 | 0 | return backend->iface.graph_plan_compute(backend, plan); |
442 | 0 | } |
443 | | |
444 | 0 | enum ggml_status ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
445 | 0 | enum ggml_status err = ggml_backend_graph_compute_async(backend, cgraph); |
446 | 0 | ggml_backend_synchronize(backend); |
447 | 0 | return err; |
448 | 0 | } |
449 | | |
450 | 0 | enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
451 | 0 | GGML_ASSERT(backend); |
452 | 0 | return backend->iface.graph_compute(backend, cgraph); |
453 | 0 | } |
454 | | |
455 | 0 | bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { |
456 | 0 | GGML_ASSERT(backend); |
457 | 0 | return ggml_backend_dev_supports_op(backend->device, op); |
458 | 0 | } |
459 | | |
460 | 0 | bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) { |
461 | 0 | GGML_ASSERT(backend); |
462 | 0 | return ggml_backend_dev_supports_buft(backend->device, buft); |
463 | 0 | } |
464 | | |
465 | 0 | bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op) { |
466 | 0 | GGML_ASSERT(backend); |
467 | 0 | return ggml_backend_dev_offload_op(backend->device, op); |
468 | 0 | } |
469 | | |
470 | 0 | ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend) { |
471 | 0 | GGML_ASSERT(backend); |
472 | 0 | return backend->device; |
473 | 0 | } |
474 | | |
475 | | // backend copy |
476 | | |
477 | 0 | void ggml_backend_tensor_copy(const struct ggml_tensor * src, struct ggml_tensor * dst) { |
478 | 0 | GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); |
479 | |
|
480 | 0 | if (src == dst) { |
481 | 0 | return; |
482 | 0 | } |
483 | | |
484 | 0 | if (ggml_backend_buffer_is_host(src->buffer)) { |
485 | 0 | ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); |
486 | 0 | } else if (ggml_backend_buffer_is_host(dst->buffer)) { |
487 | 0 | ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); |
488 | 0 | } else if (!ggml_backend_buffer_copy_tensor(src, dst)) { |
489 | | #ifndef NDEBUG |
490 | | GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer)); |
491 | | #endif // NDEBUG |
492 | 0 | size_t nbytes = ggml_nbytes(src); |
493 | 0 | void * data = malloc(nbytes); |
494 | 0 | ggml_backend_tensor_get(src, data, 0, nbytes); |
495 | 0 | ggml_backend_tensor_set(dst, data, 0, nbytes); |
496 | 0 | free(data); |
497 | 0 | } |
498 | 0 | } |
499 | | |
500 | 0 | void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const struct ggml_tensor * src, struct ggml_tensor * dst) { |
501 | 0 | GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); |
502 | |
|
503 | 0 | if (src == dst) { |
504 | 0 | return; |
505 | 0 | } |
506 | | |
507 | 0 | GGML_ASSERT(backend_dst); |
508 | 0 | if (backend_dst->iface.cpy_tensor_async != NULL) { |
509 | 0 | if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) { |
510 | 0 | return; |
511 | 0 | } |
512 | 0 | } |
513 | | |
514 | | // an async copy would normally happen after all the queued operations on both backends are completed |
515 | | // to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy |
516 | 0 | ggml_backend_synchronize(backend_src); |
517 | 0 | ggml_backend_synchronize(backend_dst); |
518 | 0 | ggml_backend_tensor_copy(src, dst); |
519 | 0 | } |
520 | | |
521 | | // events |
522 | | |
523 | 0 | ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device) { |
524 | | // null device is allowed for the transition period to the device interface |
525 | 0 | if (device == NULL || device->iface.event_new == NULL) { |
526 | 0 | return NULL; |
527 | 0 | } |
528 | 0 | return device->iface.event_new(device); |
529 | 0 | } |
530 | | |
531 | 0 | void ggml_backend_event_free(ggml_backend_event_t event) { |
532 | 0 | if (event == NULL) { |
533 | 0 | return; |
534 | 0 | } |
535 | 0 | event->device->iface.event_free(event->device, event); |
536 | 0 | } |
537 | | |
538 | 0 | void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend) { |
539 | 0 | GGML_ASSERT(backend); |
540 | 0 | GGML_ASSERT(backend->iface.event_record != NULL); |
541 | |
|
542 | 0 | backend->iface.event_record(backend, event); |
543 | 0 | } |
544 | | |
545 | 0 | void ggml_backend_event_synchronize(ggml_backend_event_t event) { |
546 | 0 | GGML_ASSERT(event); |
547 | 0 | GGML_ASSERT(event->device->iface.event_synchronize); |
548 | |
|
549 | 0 | event->device->iface.event_synchronize(event->device, event); |
550 | 0 | } |
551 | | |
552 | 0 | void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event) { |
553 | 0 | GGML_ASSERT(backend); |
554 | 0 | GGML_ASSERT(backend->iface.event_wait != NULL); |
555 | |
|
556 | 0 | backend->iface.event_wait(backend, event); |
557 | 0 | } |
558 | | |
559 | 0 | static void ggml_backend_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * cgraph) { |
560 | 0 | GGML_ASSERT(backend); |
561 | 0 | if (backend->iface.graph_optimize != NULL) { |
562 | 0 | backend->iface.graph_optimize(backend, cgraph); |
563 | 0 | } |
564 | 0 | } |
565 | | |
566 | | // Backend device |
567 | | |
568 | 0 | const char * ggml_backend_dev_name(ggml_backend_dev_t device) { |
569 | 0 | GGML_ASSERT(device); |
570 | 0 | return device->iface.get_name(device); |
571 | 0 | } |
572 | | |
573 | 0 | const char * ggml_backend_dev_description(ggml_backend_dev_t device) { |
574 | 0 | GGML_ASSERT(device); |
575 | 0 | return device->iface.get_description(device); |
576 | 0 | } |
577 | | |
578 | 0 | void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total) { |
579 | 0 | GGML_ASSERT(device); |
580 | 0 | device->iface.get_memory(device, free, total); |
581 | 0 | } |
582 | | |
583 | 8 | enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) { |
584 | 8 | GGML_ASSERT(device); |
585 | 8 | return device->iface.get_type(device); |
586 | 8 | } |
587 | | |
588 | 0 | void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) { |
589 | 0 | GGML_ASSERT(device); |
590 | 0 | memset(props, 0, sizeof(*props)); |
591 | 0 | device->iface.get_props(device, props); |
592 | 0 | } |
593 | | |
594 | 0 | ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device) { |
595 | 0 | GGML_ASSERT(device); |
596 | 0 | return device->reg; |
597 | 0 | } |
598 | | |
599 | 0 | ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params) { |
600 | 0 | GGML_ASSERT(device); |
601 | 0 | return device->iface.init_backend(device, params); |
602 | 0 | } |
603 | | |
604 | 0 | ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device) { |
605 | 0 | GGML_ASSERT(device); |
606 | 0 | return device->iface.get_buffer_type(device); |
607 | 0 | } |
608 | | |
609 | 0 | ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) { |
610 | 0 | GGML_ASSERT(device); |
611 | 0 | if (device->iface.get_host_buffer_type == NULL) { |
612 | 0 | return NULL; |
613 | 0 | } |
614 | | |
615 | 0 | return device->iface.get_host_buffer_type(device); |
616 | 0 | } |
617 | | |
618 | 0 | ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) { |
619 | 0 | GGML_ASSERT(device); |
620 | 0 | return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size); |
621 | 0 | } |
622 | | |
623 | 0 | bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op) { |
624 | 0 | GGML_ASSERT(device); |
625 | 0 | return device->iface.supports_op(device, op); |
626 | 0 | } |
627 | | |
628 | 0 | bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft) { |
629 | 0 | GGML_ASSERT(device); |
630 | 0 | return device->iface.supports_buft(device, buft); |
631 | 0 | } |
632 | | |
633 | 0 | bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op) { |
634 | 0 | GGML_ASSERT(device); |
635 | 0 | if (device->iface.offload_op != NULL) { |
636 | 0 | return device->iface.offload_op(device, op); |
637 | 0 | } |
638 | | |
639 | 0 | return false; |
640 | 0 | } |
641 | | |
642 | | // Backend (reg) |
643 | | |
644 | 0 | const char * ggml_backend_reg_name(ggml_backend_reg_t reg) { |
645 | 0 | GGML_ASSERT(reg); |
646 | 0 | return reg->iface.get_name(reg); |
647 | 0 | } |
648 | | |
649 | 2 | size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg) { |
650 | 2 | GGML_ASSERT(reg); |
651 | 2 | return reg->iface.get_device_count(reg); |
652 | 2 | } |
653 | | |
654 | 1 | ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index) { |
655 | 1 | GGML_ASSERT(reg); |
656 | 1 | return reg->iface.get_device(reg, index); |
657 | 1 | } |
658 | | |
659 | 0 | void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) { |
660 | 0 | GGML_ASSERT(reg); |
661 | 0 | if (!reg->iface.get_proc_address) { |
662 | 0 | return NULL; |
663 | 0 | } |
664 | 0 | return reg->iface.get_proc_address(reg, name); |
665 | 0 | } |
666 | | |
667 | | // multi-buffer buffer |
668 | | |
669 | | struct ggml_backend_multi_buffer_context { |
670 | | ggml_backend_buffer_t * buffers; |
671 | | size_t n_buffers; |
672 | | }; |
673 | | |
674 | 0 | static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) { |
675 | 0 | GGML_ASSERT(buffer); |
676 | 0 | ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; |
677 | 0 | for (size_t i = 0; i < ctx->n_buffers; i++) { |
678 | 0 | ggml_backend_buffer_free(ctx->buffers[i]); |
679 | 0 | } |
680 | |
|
681 | 0 | free(ctx->buffers); |
682 | 0 | free(ctx); |
683 | 0 | } |
684 | | |
685 | 0 | static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { |
686 | 0 | GGML_ASSERT(buffer); |
687 | 0 | ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; |
688 | 0 | for (size_t i = 0; i < ctx->n_buffers; i++) { |
689 | 0 | ggml_backend_buffer_clear(ctx->buffers[i], value); |
690 | 0 | } |
691 | 0 | } |
692 | | |
693 | | static const struct ggml_backend_buffer_i ggml_backend_multi_buffer_i = { |
694 | | /* .free_buffer = */ ggml_backend_multi_buffer_free_buffer, |
695 | | /* .get_base = */ NULL, |
696 | | /* .init_tensor = */ NULL, |
697 | | /* .memset_tensor = */ NULL, |
698 | | /* .set_tensor = */ NULL, |
699 | | /* .get_tensor = */ NULL, |
700 | | /* .set_tensor_2d = */ NULL, |
701 | | /* .get_tensor_2d = */ NULL, |
702 | | /* .cpy_tensor = */ NULL, |
703 | | /* .clear = */ ggml_backend_multi_buffer_clear, |
704 | | /* .reset = */ NULL, |
705 | | }; |
706 | | |
707 | 0 | ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers) { |
708 | 0 | ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) malloc(sizeof(struct ggml_backend_multi_buffer_context)); |
709 | 0 | ctx->n_buffers = n_buffers; |
710 | 0 | ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t)); |
711 | |
|
712 | 0 | GGML_ASSERT(ctx->buffers != NULL); |
713 | |
|
714 | 0 | size_t total_size = 0; |
715 | 0 | for (size_t i = 0; i < n_buffers; i++) { |
716 | 0 | ctx->buffers[i] = buffers[i]; |
717 | 0 | total_size += ggml_backend_buffer_get_size(buffers[i]); |
718 | 0 | } |
719 | |
|
720 | 0 | return ggml_backend_buffer_init(buffers[0]->buft, ggml_backend_multi_buffer_i, ctx, total_size); |
721 | 0 | } |
722 | | |
723 | 0 | bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer) { |
724 | 0 | GGML_ASSERT(buffer); |
725 | 0 | return buffer->iface.free_buffer == ggml_backend_multi_buffer_free_buffer; |
726 | 0 | } |
727 | | |
728 | 0 | void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { |
729 | 0 | GGML_ASSERT(buffer); |
730 | 0 | GGML_ASSERT(ggml_backend_buffer_is_multi_buffer(buffer)); |
731 | 0 | ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context; |
732 | 0 | for (size_t i = 0; i < ctx->n_buffers; i++) { |
733 | 0 | ggml_backend_buffer_set_usage(ctx->buffers[i], usage); |
734 | 0 | } |
735 | 0 | } |
736 | | |
737 | | // creates a copy of the tensor with the same memory layout |
738 | 0 | static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) { |
739 | 0 | struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor); |
740 | 0 | for (int i = 0; i < GGML_MAX_DIMS; i++) { |
741 | 0 | dup->nb[i] = tensor->nb[i]; |
742 | 0 | } |
743 | 0 | return dup; |
744 | 0 | } |
745 | | |
746 | 0 | static bool ggml_is_view_op(enum ggml_op op) { |
747 | 0 | return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE; |
748 | 0 | } |
749 | | |
750 | | // scheduler |
751 | | |
752 | | #ifndef GGML_SCHED_MAX_BACKENDS |
753 | | #define GGML_SCHED_MAX_BACKENDS 16 |
754 | | #endif |
755 | | |
756 | | #ifndef GGML_SCHED_MAX_SPLIT_INPUTS |
757 | 0 | #define GGML_SCHED_MAX_SPLIT_INPUTS 30 |
758 | | #endif |
759 | | |
760 | | #ifndef GGML_SCHED_MAX_COPIES |
761 | | #define GGML_SCHED_MAX_COPIES 4 |
762 | | #endif |
763 | | |
764 | | struct ggml_backend_sched_split { |
765 | | int backend_id; |
766 | | int i_start; |
767 | | int i_end; |
768 | | struct ggml_tensor * inputs[GGML_SCHED_MAX_SPLIT_INPUTS]; |
769 | | int n_inputs; |
770 | | // graph view of this split |
771 | | struct ggml_cgraph graph; |
772 | | }; |
773 | | |
774 | | struct ggml_backend_sched { |
775 | | bool is_reset; // true if the scheduler has been reset since the last graph split |
776 | | bool is_alloc; |
777 | | |
778 | | int n_backends; |
779 | | |
780 | | ggml_backend_t backends[GGML_SCHED_MAX_BACKENDS]; |
781 | | ggml_backend_buffer_type_t bufts[GGML_SCHED_MAX_BACKENDS]; |
782 | | ggml_gallocr_t galloc; |
783 | | |
784 | | // hash map of the nodes in the graph |
785 | | struct ggml_hash_set hash_set; |
786 | | int * hv_tensor_backend_ids; // [hash_set.size] |
787 | | struct ggml_tensor ** hv_tensor_copies; // [hash_set.size][n_backends][n_copies] |
788 | | |
789 | | int * node_backend_ids; // [graph_size] |
790 | | int * leaf_backend_ids; // [graph_size] |
791 | | |
792 | | int * prev_node_backend_ids; // [graph_size] |
793 | | int * prev_leaf_backend_ids; // [graph_size] |
794 | | |
795 | | // copy of the graph with modified inputs |
796 | | struct ggml_cgraph graph; |
797 | | |
798 | | // graph splits |
799 | | struct ggml_backend_sched_split * splits; |
800 | | int n_splits; |
801 | | int splits_capacity; |
802 | | |
803 | | // pipeline parallelism support |
804 | | int n_copies; |
805 | | int cur_copy; |
806 | | int next_copy; |
807 | | ggml_backend_event_t events[GGML_SCHED_MAX_BACKENDS][GGML_SCHED_MAX_COPIES]; |
808 | | struct ggml_tensor * graph_inputs[GGML_SCHED_MAX_SPLIT_INPUTS]; |
809 | | int n_graph_inputs; |
810 | | |
811 | | struct ggml_context * ctx; |
812 | | |
813 | | ggml_backend_sched_eval_callback callback_eval; |
814 | | void * callback_eval_user_data; |
815 | | |
816 | | char * context_buffer; |
817 | | size_t context_buffer_size; |
818 | | |
819 | | bool op_offload; |
820 | | |
821 | | int debug; |
822 | | |
823 | | // used for debugging graph reallocations [GGML_SCHED_DEBUG_REALLOC] |
824 | | // ref: https://github.com/ggml-org/llama.cpp/pull/17617 |
825 | | int debug_realloc; |
826 | | int debug_graph_size; |
827 | | int debug_prev_graph_size; |
828 | | }; |
829 | | |
830 | 0 | #define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor) |
831 | 0 | #define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)] |
832 | 0 | #define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)] |
833 | 0 | #define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id) |
834 | | |
835 | | // returns the priority of the backend, lower id is higher priority |
836 | 0 | static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backend_t backend) { |
837 | 0 | for (int i = 0; i < sched->n_backends; i++) { |
838 | 0 | if (sched->backends[i] == backend) { |
839 | 0 | return i; |
840 | 0 | } |
841 | 0 | } |
842 | 0 | return -1; |
843 | 0 | } |
844 | | |
845 | 0 | static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor, const struct ggml_tensor * op) { |
846 | 0 | ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
847 | 0 | if (buffer == NULL) { |
848 | 0 | return -1; |
849 | 0 | } |
850 | | |
851 | | // find highest prio backend that supports the buffer type and the op |
852 | 0 | for (int i = 0; i < sched->n_backends; i++) { |
853 | 0 | if (ggml_backend_supports_buft(sched->backends[i], buffer->buft) && |
854 | 0 | ggml_backend_supports_op(sched->backends[i], op)) { |
855 | 0 | return i; |
856 | 0 | } |
857 | 0 | } |
858 | | |
859 | | #ifndef NDEBUG |
860 | | GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n", |
861 | | __func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name); |
862 | | #endif |
863 | | |
864 | 0 | return -1; |
865 | 0 | } |
866 | | |
867 | | #if 0 |
868 | | #define GGML_SCHED_MAX_SPLITS_DEBUG 4096 |
869 | | static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS_DEBUG*GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only |
870 | | #define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__) |
871 | | #define GET_CAUSE(node) causes[hash_id(node)] |
872 | | #else |
873 | | #define SET_CAUSE(node, ...) |
874 | | #define GET_CAUSE(node) "" |
875 | | #endif |
876 | | |
877 | | // returns the backend that should be used for the node based on the current locations |
878 | 0 | static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * tensor) { |
879 | | // assign pre-allocated nodes to their backend |
880 | 0 | int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor, tensor); |
881 | 0 | if (cur_backend_id != -1) { |
882 | 0 | SET_CAUSE(tensor, "1.dst"); |
883 | 0 | return cur_backend_id; |
884 | 0 | } |
885 | | |
886 | | // view_src |
887 | 0 | if (tensor->view_src != NULL) { |
888 | 0 | cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor); |
889 | 0 | if (cur_backend_id != -1) { |
890 | 0 | SET_CAUSE(tensor, "1.vsrc"); |
891 | 0 | return cur_backend_id; |
892 | 0 | } |
893 | 0 | } |
894 | | |
895 | 0 | if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) { |
896 | | // since the tensor is pre-allocated, it cannot be moved to another backend |
897 | 0 | ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; |
898 | 0 | GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, ggml_backend_buffer_name(buffer), ggml_op_name(tensor->op)); |
899 | 0 | } |
900 | | |
901 | | // graph input |
902 | 0 | if (tensor->flags & GGML_TENSOR_FLAG_INPUT) { |
903 | 0 | cur_backend_id = sched->n_backends - 1; // last backend (assumed CPU) |
904 | 0 | SET_CAUSE(tensor, "1.inp"); |
905 | 0 | return cur_backend_id; |
906 | 0 | } |
907 | | |
908 | | // operations with weights are preferably run on the same backend as the weights |
909 | 0 | for (int i = 0; i < GGML_MAX_SRC; i++) { |
910 | 0 | const struct ggml_tensor * src = tensor->src[i]; |
911 | 0 | if (src == NULL) { |
912 | 0 | continue; |
913 | 0 | } |
914 | | // skip ROPE since the rope freqs tensor is too small to choose a backend based on it |
915 | | // not an ideal solution |
916 | 0 | if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { |
917 | 0 | int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor); |
918 | | // check if a backend with higher prio wants to offload the op |
919 | 0 | if (sched->op_offload && src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) { |
920 | 0 | for (int b = 0; b < src_backend_id; b++) { |
921 | 0 | if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) { |
922 | 0 | SET_CAUSE(tensor, "1.off"); |
923 | 0 | return b; |
924 | 0 | } |
925 | 0 | } |
926 | 0 | } |
927 | 0 | SET_CAUSE(tensor, "1.wgt%d", i); |
928 | 0 | return src_backend_id; |
929 | 0 | } |
930 | 0 | } |
931 | | |
932 | 0 | return -1; |
933 | 0 | } |
934 | | |
935 | 0 | static char * fmt_size(size_t size) { |
936 | 0 | static char buffer[128]; |
937 | 0 | if (size >= 1024*1024) { |
938 | 0 | snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024); |
939 | 0 | } else { |
940 | 0 | snprintf(buffer, sizeof(buffer), "%zuK", size/1024); |
941 | 0 | } |
942 | 0 | return buffer; |
943 | 0 | } |
944 | | |
945 | 0 | static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
946 | 0 | int cur_split = 0; |
947 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
948 | 0 | if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) { |
949 | 0 | ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id]; |
950 | 0 | GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs", cur_split, ggml_backend_name(split_backend), |
951 | 0 | sched->splits[cur_split].n_inputs); |
952 | 0 | for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) { |
953 | 0 | if (j == 0) { |
954 | 0 | GGML_LOG_DEBUG(": "); |
955 | 0 | } |
956 | 0 | GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name, |
957 | 0 | fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j]))); |
958 | 0 | } |
959 | 0 | GGML_LOG_DEBUG("\n"); |
960 | 0 | cur_split++; |
961 | 0 | } |
962 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
963 | 0 | if (ggml_is_view_op(node->op)) { |
964 | 0 | continue; |
965 | 0 | } |
966 | 0 | if (sched->debug > 1) { |
967 | 0 | ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node); |
968 | 0 | GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s] use=%d,c=%d:", i, ggml_op_name(node->op), node->name, |
969 | 0 | fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node), |
970 | 0 | graph->use_counts[ggml_hash_find(&graph->visited_hash_set, node)], node->flags & GGML_TENSOR_FLAG_COMPUTE ? 1 : 0); |
971 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
972 | 0 | struct ggml_tensor * src = node->src[j]; |
973 | 0 | if (src == NULL) { |
974 | 0 | continue; |
975 | 0 | } |
976 | 0 | ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend(sched, src); |
977 | 0 | GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name, |
978 | 0 | fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src)); |
979 | 0 | } |
980 | 0 | GGML_LOG_DEBUG("\n"); |
981 | 0 | } |
982 | 0 | } |
983 | 0 | } |
984 | | |
985 | 0 | static bool ggml_backend_sched_buffer_supported(ggml_backend_sched_t sched, struct ggml_tensor * t, int backend_id) { |
986 | 0 | ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer; |
987 | 0 | ggml_backend_buffer_type_t buft = NULL; |
988 | |
|
989 | 0 | if (buf) { |
990 | | // the tensor is already allocated |
991 | 0 | buft = buf->buft; |
992 | 0 | } else { |
993 | | // see if the tensor already has a backend assigned, and use the buffer type of that backend |
994 | 0 | int tensor_backend_id = tensor_backend_id(t); |
995 | 0 | if (tensor_backend_id == -1 && t->view_src) { |
996 | 0 | tensor_backend_id = tensor_backend_id(t->view_src); |
997 | 0 | } |
998 | 0 | if (tensor_backend_id != -1) { |
999 | 0 | buft = sched->bufts[tensor_backend_id]; |
1000 | 0 | } |
1001 | 0 | } |
1002 | |
|
1003 | 0 | return buft != NULL && ggml_backend_supports_buft(sched->backends[backend_id], buft); |
1004 | 0 | } |
1005 | | |
1006 | 0 | static void ggml_backend_sched_set_if_supported(ggml_backend_sched_t sched, struct ggml_tensor * node, int cur_backend_id, int * node_backend_id) { |
1007 | 0 | if (ggml_backend_supports_op(sched->backends[cur_backend_id], node)) { |
1008 | 0 | *node_backend_id = cur_backend_id; |
1009 | 0 | SET_CAUSE(node, "2.sup"); |
1010 | 0 | } |
1011 | 0 | } |
1012 | | |
1013 | | // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend |
1014 | 0 | void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
1015 | | // reset splits |
1016 | 0 | sched->n_splits = 0; |
1017 | 0 | sched->n_graph_inputs = 0; |
1018 | 0 | sched->is_reset = false; |
1019 | |
|
1020 | 0 | struct ggml_init_params params = { |
1021 | 0 | /* .mem_size = */ sched->context_buffer_size, |
1022 | 0 | /* .mem_buffer = */ sched->context_buffer, |
1023 | 0 | /* .no_alloc = */ true |
1024 | 0 | }; |
1025 | |
|
1026 | 0 | ggml_free(sched->ctx); |
1027 | |
|
1028 | 0 | sched->ctx = ggml_init(params); |
1029 | 0 | if (sched->ctx == NULL) { |
1030 | 0 | GGML_ABORT("%s: failed to initialize context\n", __func__); |
1031 | 0 | } |
1032 | | |
1033 | | // pass 1: assign backends to ops with pre-allocated inputs |
1034 | 0 | for (int i = 0; i < graph->n_leafs; i++) { |
1035 | 0 | struct ggml_tensor * leaf = graph->leafs[i]; |
1036 | 0 | int * leaf_backend_id = &tensor_backend_id(leaf); |
1037 | | // do not overwrite user assignments |
1038 | 0 | if (*leaf_backend_id == -1) { |
1039 | 0 | *leaf_backend_id = ggml_backend_sched_backend_id_from_cur(sched, leaf); |
1040 | 0 | } |
1041 | 0 | } |
1042 | |
|
1043 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
1044 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1045 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1046 | | // do not overwrite user assignments |
1047 | 0 | if (*node_backend_id == -1) { |
1048 | 0 | *node_backend_id = ggml_backend_sched_backend_id_from_cur(sched, node); |
1049 | |
|
1050 | | #if 0 |
1051 | | // src |
1052 | | if (node->op == GGML_OP_NONE) { |
1053 | | continue; |
1054 | | } |
1055 | | |
1056 | | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1057 | | struct ggml_tensor * src = node->src[j]; |
1058 | | if (src == NULL) { |
1059 | | continue; |
1060 | | } |
1061 | | int * src_backend_id = &tensor_backend_id(src); |
1062 | | if (*src_backend_id == -1) { |
1063 | | *src_backend_id = ggml_backend_sched_backend_id_from_cur(sched, src); |
1064 | | } |
1065 | | } |
1066 | | #endif |
1067 | 0 | } |
1068 | 0 | } |
1069 | | |
1070 | | // pass 2: expand current backend assignments |
1071 | | // assign the same backend to adjacent nodes |
1072 | | // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend) |
1073 | | // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops |
1074 | | // ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known |
1075 | | // expand gpu down |
1076 | 0 | { |
1077 | 0 | int cur_backend_id = -1; |
1078 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
1079 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1080 | 0 | if (ggml_is_view_op(node->op)) { |
1081 | 0 | continue; |
1082 | 0 | } |
1083 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1084 | 0 | if (*node_backend_id != -1) { |
1085 | 0 | if (*node_backend_id == sched->n_backends - 1) { |
1086 | | // skip cpu (lowest prio backend) |
1087 | 0 | cur_backend_id = -1; |
1088 | 0 | } else { |
1089 | 0 | cur_backend_id = *node_backend_id; |
1090 | 0 | } |
1091 | 0 | } else if (cur_backend_id != -1) { |
1092 | 0 | ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
1093 | 0 | } |
1094 | 0 | } |
1095 | 0 | } |
1096 | | // expand gpu up |
1097 | 0 | { |
1098 | 0 | int cur_backend_id = -1; |
1099 | 0 | for (int i = graph->n_nodes - 1; i >= 0; i--) { |
1100 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1101 | 0 | if (ggml_is_view_op(node->op)) { |
1102 | 0 | continue; |
1103 | 0 | } |
1104 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1105 | 0 | if (*node_backend_id != -1) { |
1106 | 0 | if (*node_backend_id == sched->n_backends - 1) { |
1107 | | // skip cpu (lowest prio backend) |
1108 | 0 | cur_backend_id = -1; |
1109 | 0 | } else { |
1110 | 0 | cur_backend_id = *node_backend_id; |
1111 | 0 | } |
1112 | 0 | } else if (cur_backend_id != -1) { |
1113 | 0 | ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
1114 | 0 | } |
1115 | 0 | } |
1116 | 0 | } |
1117 | | // expand rest down |
1118 | 0 | { |
1119 | 0 | int cur_backend_id = -1; |
1120 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
1121 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1122 | 0 | if (ggml_is_view_op(node->op)) { |
1123 | 0 | continue; |
1124 | 0 | } |
1125 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1126 | 0 | if (*node_backend_id != -1) { |
1127 | 0 | cur_backend_id = *node_backend_id; |
1128 | 0 | } else if (cur_backend_id != -1) { |
1129 | 0 | ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
1130 | 0 | } |
1131 | 0 | } |
1132 | 0 | } |
1133 | | // expand rest up |
1134 | 0 | { |
1135 | 0 | int cur_backend_id = -1; |
1136 | 0 | for (int i = graph->n_nodes - 1; i >= 0; i--) { |
1137 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1138 | 0 | if (ggml_is_view_op(node->op)) { |
1139 | 0 | continue; |
1140 | 0 | } |
1141 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1142 | 0 | if (*node_backend_id != -1) { |
1143 | 0 | cur_backend_id = *node_backend_id; |
1144 | 0 | } else if (cur_backend_id != -1) { |
1145 | 0 | ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id); |
1146 | 0 | } |
1147 | 0 | } |
1148 | 0 | } |
1149 | | |
1150 | | // pass 3: upgrade nodes to higher prio backends with compatible buffer types |
1151 | | // if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there |
1152 | | // however, we also need to verify that the sources are in compatible buffer types |
1153 | | // (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph |
1154 | | // however, this is slow to verify, so we have a more strict requirement that the buffer type is the same |
1155 | | // this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU) |
1156 | | // additionally, set remaining unassigned nodes to the backend with the most supported inputs |
1157 | | // only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point |
1158 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
1159 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1160 | 0 | if (ggml_is_view_op(node->op)) { |
1161 | 0 | continue; |
1162 | 0 | } |
1163 | 0 | int * node_backend_id = &tensor_backend_id(node); |
1164 | 0 | if (*node_backend_id == -1) { |
1165 | | // unassigned node: find the backend with the most supported inputs |
1166 | 0 | int n_supported_best = -1; |
1167 | 0 | for (int b = 0; b < sched->n_backends; b++) { |
1168 | 0 | if (ggml_backend_supports_op(sched->backends[b], node)) { |
1169 | 0 | int n_supported = 0; |
1170 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1171 | 0 | struct ggml_tensor * src = node->src[j]; |
1172 | 0 | if (src == NULL) { |
1173 | 0 | continue; |
1174 | 0 | } |
1175 | 0 | if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && ggml_backend_sched_buffer_supported(sched, src, b)) { |
1176 | 0 | n_supported++; |
1177 | 0 | } |
1178 | 0 | } |
1179 | 0 | if (n_supported > n_supported_best) { |
1180 | 0 | n_supported_best = n_supported; |
1181 | 0 | *node_backend_id = b; |
1182 | 0 | SET_CAUSE(node, "3.best"); |
1183 | 0 | } |
1184 | 0 | } |
1185 | 0 | } |
1186 | 0 | } else { |
1187 | | // assigned node: upgrade to higher prio backend if possible |
1188 | 0 | for (int b = 0; b < *node_backend_id; b++) { |
1189 | 0 | if (sched->bufts[b] == sched->bufts[*node_backend_id] && ggml_backend_supports_op(sched->backends[b], node)) { |
1190 | 0 | bool supported = true; |
1191 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1192 | 0 | struct ggml_tensor * src = node->src[j]; |
1193 | 0 | if (src == NULL) { |
1194 | 0 | continue; |
1195 | 0 | } |
1196 | 0 | if (!ggml_backend_sched_buffer_supported(sched, src, b)) { |
1197 | 0 | supported = false; |
1198 | 0 | break; |
1199 | 0 | } |
1200 | 0 | } |
1201 | 0 | if (supported) { |
1202 | 0 | *node_backend_id = b; |
1203 | 0 | SET_CAUSE(node, "3.upg"); |
1204 | 0 | break; |
1205 | 0 | } |
1206 | 0 | } |
1207 | 0 | } |
1208 | 0 | } |
1209 | 0 | } |
1210 | | |
1211 | | // pass 4: assign backends to remaining src from dst and view_src |
1212 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
1213 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1214 | 0 | int * cur_backend_id = &tensor_backend_id(node); |
1215 | 0 | if (node->view_src != NULL && *cur_backend_id == -1) { |
1216 | 0 | *cur_backend_id = tensor_backend_id(node->view_src); |
1217 | 0 | SET_CAUSE(node, "4.vsrc"); |
1218 | 0 | } |
1219 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1220 | 0 | struct ggml_tensor * src = node->src[j]; |
1221 | 0 | if (src == NULL) { |
1222 | 0 | continue; |
1223 | 0 | } |
1224 | 0 | int * src_backend_id = &tensor_backend_id(src); |
1225 | 0 | if (*src_backend_id == -1) { |
1226 | 0 | if (src->view_src != NULL) { |
1227 | | // views are always on the same backend as the source |
1228 | 0 | *src_backend_id = tensor_backend_id(src->view_src); |
1229 | 0 | SET_CAUSE(src, "4.vsrc"); |
1230 | 0 | } else { |
1231 | 0 | *src_backend_id = *cur_backend_id; |
1232 | 0 | SET_CAUSE(src, "4.cur"); |
1233 | 0 | } |
1234 | 0 | } |
1235 | 0 | } |
1236 | | // if the node is still unassigned, assign it to the first backend that supports it |
1237 | 0 | for (int b = 0; b < sched->n_backends && *cur_backend_id == -1; b++) { |
1238 | 0 | ggml_backend_sched_set_if_supported(sched, node, b, cur_backend_id); |
1239 | 0 | } |
1240 | 0 | GGML_ASSERT(*cur_backend_id != -1); |
1241 | 0 | } |
1242 | | |
1243 | | // pass 5: split graph, find tensors that need to be copied |
1244 | 0 | { |
1245 | 0 | int i_split = 0; |
1246 | 0 | struct ggml_backend_sched_split * split = &sched->splits[0]; |
1247 | | // find the backend of the first split, skipping view ops |
1248 | 0 | int i = 0; |
1249 | 0 | for (; i < graph->n_nodes; i++) { |
1250 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1251 | 0 | if (!ggml_is_view_op(node->op)) { |
1252 | 0 | split->backend_id = tensor_backend_id(node); |
1253 | 0 | break; |
1254 | 0 | } |
1255 | 0 | } |
1256 | 0 | split->i_start = 0; |
1257 | 0 | split->n_inputs = 0; |
1258 | 0 | int cur_backend_id = split->backend_id; |
1259 | 0 | for (; i < graph->n_nodes; i++) { |
1260 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
1261 | |
|
1262 | 0 | if (ggml_is_view_op(node->op)) { |
1263 | 0 | continue; |
1264 | 0 | } |
1265 | | |
1266 | 0 | const int node_backend_id = tensor_backend_id(node); |
1267 | |
|
1268 | 0 | GGML_ASSERT(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback |
1269 | | |
1270 | | // check if we should start a new split based on the sources of the current node |
1271 | 0 | bool need_new_split = false; |
1272 | 0 | if (node_backend_id == cur_backend_id && split->n_inputs > 0) { |
1273 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1274 | 0 | struct ggml_tensor * src = node->src[j]; |
1275 | 0 | if (src == NULL) { |
1276 | 0 | continue; |
1277 | 0 | } |
1278 | | // check if a weight is on a different and incompatible backend |
1279 | | // by starting a new split, the memory of the previously offloaded weights can be reused |
1280 | 0 | if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { |
1281 | 0 | int src_backend_id = tensor_backend_id(src); |
1282 | 0 | if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) { |
1283 | 0 | need_new_split = true; |
1284 | 0 | break; |
1285 | 0 | } |
1286 | 0 | } |
1287 | | // check if the split has too many inputs |
1288 | | // FIXME: count the number of inputs instead of only checking when full |
1289 | 0 | if (split->n_inputs == GGML_SCHED_MAX_SPLIT_INPUTS) { |
1290 | 0 | const size_t id = hash_id(src); |
1291 | 0 | int src_backend_id = sched->hv_tensor_backend_ids[id]; |
1292 | 0 | bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id); |
1293 | 0 | if (src_backend_id != cur_backend_id && tensor_id_copy(id, cur_backend_id, 0) == NULL && !supported) { |
1294 | 0 | need_new_split = true; |
1295 | 0 | break; |
1296 | 0 | } |
1297 | 0 | } |
1298 | 0 | } |
1299 | 0 | } |
1300 | |
|
1301 | 0 | if (node_backend_id != cur_backend_id || need_new_split) { |
1302 | 0 | split->i_end = i; |
1303 | 0 | i_split++; |
1304 | 0 | if (i_split >= sched->splits_capacity) { |
1305 | 0 | sched->splits_capacity *= 2; |
1306 | 0 | sched->splits = (ggml_backend_sched_split *) |
1307 | 0 | realloc(sched->splits, sched->splits_capacity * sizeof(struct ggml_backend_sched_split)); |
1308 | 0 | GGML_ASSERT(sched->splits != NULL); |
1309 | 0 | } |
1310 | 0 | split = &sched->splits[i_split]; |
1311 | 0 | split->backend_id = node_backend_id; |
1312 | 0 | split->i_start = i; |
1313 | 0 | split->n_inputs = 0; |
1314 | 0 | cur_backend_id = node_backend_id; |
1315 | 0 | } |
1316 | | |
1317 | | // find inputs that are not on the same backend |
1318 | 0 | for (int j = 0; j < GGML_MAX_SRC; j++) { |
1319 | 0 | struct ggml_tensor * src = node->src[j]; |
1320 | 0 | if (src == NULL) { |
1321 | 0 | continue; |
1322 | 0 | } |
1323 | | |
1324 | 0 | size_t src_id = hash_id(src); |
1325 | 0 | const int src_backend_id = sched->hv_tensor_backend_ids[src_id]; |
1326 | 0 | GGML_ASSERT(src_backend_id != -1); // all inputs should be assigned by now |
1327 | |
|
1328 | 0 | if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) { |
1329 | 0 | if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) { |
1330 | 0 | ggml_backend_t backend = sched->backends[src_backend_id]; |
1331 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1332 | 0 | struct ggml_tensor * tensor_copy; |
1333 | 0 | if (c == sched->cur_copy) { |
1334 | 0 | tensor_copy = src; // use the original tensor as the current copy |
1335 | 0 | } else { |
1336 | 0 | tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); |
1337 | 0 | ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c); |
1338 | 0 | } |
1339 | 0 | ggml_set_input(tensor_copy); |
1340 | 0 | ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor |
1341 | 0 | tensor_id_copy(src_id, src_backend_id, c) = tensor_copy; |
1342 | 0 | SET_CAUSE(tensor_copy, "4.cpy"); |
1343 | 0 | } |
1344 | 0 | int n_graph_inputs = sched->n_graph_inputs++; |
1345 | 0 | GGML_ASSERT(n_graph_inputs < GGML_SCHED_MAX_SPLIT_INPUTS); |
1346 | 0 | sched->graph_inputs[n_graph_inputs] = src; |
1347 | 0 | } |
1348 | 0 | } |
1349 | |
|
1350 | 0 | if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) { |
1351 | | // create a copy of the input in the split's backend |
1352 | 0 | if (tensor_id_copy(src_id, cur_backend_id, 0) == NULL) { |
1353 | 0 | ggml_backend_t backend = sched->backends[cur_backend_id]; |
1354 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1355 | 0 | struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); |
1356 | 0 | ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c); |
1357 | 0 | if (sched->n_copies > 1) { |
1358 | 0 | ggml_set_input(tensor_copy); |
1359 | 0 | ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor |
1360 | 0 | } |
1361 | 0 | tensor_id_copy(src_id, cur_backend_id, c) = tensor_copy; |
1362 | 0 | SET_CAUSE(tensor_copy, "4.cpy"); |
1363 | 0 | } |
1364 | 0 | int n_inputs = split->n_inputs++; |
1365 | 0 | GGML_ASSERT(n_inputs < GGML_SCHED_MAX_SPLIT_INPUTS); |
1366 | 0 | split->inputs[n_inputs] = src; |
1367 | 0 | } |
1368 | 0 | node->src[j] = tensor_id_copy(src_id, cur_backend_id, sched->cur_copy); |
1369 | 0 | } |
1370 | 0 | } |
1371 | 0 | } |
1372 | 0 | split->i_end = graph->n_nodes; |
1373 | 0 | sched->n_splits = i_split + 1; |
1374 | 0 | } |
1375 | |
|
1376 | 0 | if (sched->debug) { |
1377 | 0 | ggml_backend_sched_print_assignments(sched, graph); |
1378 | 0 | } |
1379 | | |
1380 | | // swap node_backend_ids and leaf _backend_ids with prevs |
1381 | 0 | { |
1382 | 0 | int * tmp = sched->node_backend_ids; |
1383 | 0 | sched->node_backend_ids = sched->prev_node_backend_ids; |
1384 | 0 | sched->prev_node_backend_ids = tmp; |
1385 | |
|
1386 | 0 | tmp = sched->leaf_backend_ids; |
1387 | 0 | sched->leaf_backend_ids = sched->prev_leaf_backend_ids; |
1388 | 0 | sched->prev_leaf_backend_ids = tmp; |
1389 | 0 | } |
1390 | |
|
1391 | 0 | int graph_size = std::max(graph->n_nodes, graph->n_leafs) + sched->n_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sched->n_copies; |
1392 | | |
1393 | | // remember the actual graph_size for performing reallocation checks later [GGML_SCHED_DEBUG_REALLOC] |
1394 | 0 | sched->debug_prev_graph_size = sched->debug_graph_size; |
1395 | 0 | sched->debug_graph_size = graph_size; |
1396 | |
|
1397 | 0 | if (sched->graph.size < graph_size) { |
1398 | 0 | sched->graph.size = graph_size; |
1399 | 0 | sched->graph.nodes = (ggml_tensor **) realloc(sched->graph.nodes, graph_size * sizeof(struct ggml_tensor *)); |
1400 | 0 | sched->graph.leafs = (ggml_tensor **) realloc(sched->graph.leafs, graph_size * sizeof(struct ggml_tensor *)); |
1401 | 0 | GGML_ASSERT(sched->graph.nodes != NULL); |
1402 | 0 | GGML_ASSERT(sched->graph.leafs != NULL); |
1403 | 0 | } |
1404 | 0 | sched->graph.n_nodes = 0; |
1405 | 0 | sched->graph.n_leafs = 0; |
1406 | |
|
1407 | 0 | struct ggml_cgraph * graph_copy = &sched->graph; |
1408 | |
|
1409 | 0 | for (int i = 0; i < sched->n_splits; i++) { |
1410 | 0 | struct ggml_backend_sched_split * split = &sched->splits[i]; |
1411 | 0 | split->graph = ggml_graph_view(graph, split->i_start, split->i_end); |
1412 | | |
1413 | | // Optimize this split of the graph. This needs to happen before we make graph_copy, |
1414 | | // so they are in sync. |
1415 | 0 | ggml_backend_graph_optimize(sched->backends[split->backend_id], &split->graph); |
1416 | | |
1417 | | // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split |
1418 | 0 | for (int j = 0; j < split->n_inputs; j++) { |
1419 | 0 | assert(graph_copy->size > (graph_copy->n_nodes + 1)); |
1420 | |
|
1421 | 0 | struct ggml_tensor * input = split->inputs[j]; |
1422 | 0 | const size_t input_id = hash_id(input); |
1423 | 0 | struct ggml_tensor * input_cpy = tensor_id_copy(input_id, split->backend_id, sched->cur_copy); |
1424 | | |
1425 | | // add a dependency to the input source so that it is not freed before the copy is done |
1426 | 0 | struct ggml_tensor * input_dep = ggml_view_tensor(sched->ctx, input); |
1427 | 0 | input_dep->src[0] = input; |
1428 | 0 | sched->node_backend_ids[graph_copy->n_nodes] = sched->hv_tensor_backend_ids[input_id]; |
1429 | 0 | graph_copy->nodes[graph_copy->n_nodes++] = input_dep; |
1430 | | |
1431 | | // add a dependency to the input copy so that it is allocated at the start of the split |
1432 | 0 | sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id; |
1433 | 0 | graph_copy->nodes[graph_copy->n_nodes++] = input_cpy; |
1434 | 0 | } |
1435 | |
|
1436 | 0 | for (int j = split->i_start; j < split->i_end; j++) { |
1437 | 0 | assert(graph_copy->size > graph_copy->n_nodes); |
1438 | 0 | sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]); |
1439 | 0 | graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j]; |
1440 | 0 | } |
1441 | 0 | } |
1442 | |
|
1443 | 0 | if (sched->n_copies > 1) { |
1444 | | // add input copies as leafs so that they are allocated first |
1445 | 0 | for (int i = 0; i < sched->n_graph_inputs; i++) { |
1446 | 0 | struct ggml_tensor * input = sched->graph_inputs[i]; |
1447 | 0 | size_t id = hash_id(input); |
1448 | 0 | int backend_id = tensor_backend_id(input); |
1449 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1450 | 0 | struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c); |
1451 | 0 | sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id; |
1452 | 0 | assert(graph_copy->size > graph_copy->n_leafs); |
1453 | 0 | graph_copy->leafs[graph_copy->n_leafs++] = input_cpy; |
1454 | 0 | } |
1455 | 0 | } |
1456 | |
|
1457 | 0 | for (int i = 0; i < sched->n_splits; i++) { |
1458 | 0 | struct ggml_backend_sched_split * split = &sched->splits[i]; |
1459 | 0 | int backend_id = split->backend_id; |
1460 | 0 | for (int j = 0; j < split->n_inputs; j++) { |
1461 | 0 | struct ggml_tensor * input = split->inputs[j]; |
1462 | 0 | size_t id = hash_id(input); |
1463 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1464 | 0 | struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c); |
1465 | 0 | sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id; |
1466 | 0 | assert(graph_copy->size > graph_copy->n_leafs); |
1467 | 0 | graph_copy->leafs[graph_copy->n_leafs++] = input_cpy; |
1468 | 0 | } |
1469 | 0 | } |
1470 | 0 | } |
1471 | 0 | } |
1472 | | |
1473 | | // add leafs from the original graph |
1474 | 0 | for (int i = 0; i < graph->n_leafs; i++) { |
1475 | 0 | struct ggml_tensor * leaf = graph->leafs[i]; |
1476 | 0 | sched->leaf_backend_ids[graph_copy->n_leafs] = tensor_backend_id(leaf); |
1477 | 0 | assert(graph_copy->size > graph_copy->n_leafs); |
1478 | 0 | graph_copy->leafs[graph_copy->n_leafs++] = leaf; |
1479 | 0 | } |
1480 | 0 | } |
1481 | | |
1482 | 0 | static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) { |
1483 | 0 | bool backend_ids_changed = false; |
1484 | 0 | for (int i = 0; i < sched->graph.n_nodes; i++) { |
1485 | 0 | if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] && |
1486 | 0 | sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) { |
1487 | 0 | backend_ids_changed = true; |
1488 | 0 | break; |
1489 | 0 | } |
1490 | 0 | } |
1491 | 0 | if (!backend_ids_changed) { |
1492 | 0 | for (int i = 0; i < sched->graph.n_leafs; i++) { |
1493 | 0 | if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] && |
1494 | 0 | sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) { |
1495 | 0 | backend_ids_changed = true; |
1496 | 0 | break; |
1497 | 0 | } |
1498 | 0 | } |
1499 | 0 | } |
1500 | | |
1501 | | // allocate graph |
1502 | 0 | if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { |
1503 | | #ifndef NDEBUG |
1504 | | GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed); |
1505 | | #endif |
1506 | |
|
1507 | 0 | if (sched->debug_realloc > 0) { |
1508 | | // we are interested only in situations where the graph was reallocated even though its size remained the same [GGML_SCHED_DEBUG_REALLOC] |
1509 | | // example: https://github.com/ggml-org/llama.cpp/pull/17143 |
1510 | 0 | const bool unexpected = !backend_ids_changed && sched->debug_prev_graph_size == sched->debug_graph_size; |
1511 | |
|
1512 | 0 | if (unexpected || sched->debug_realloc > 1) { |
1513 | 0 | GGML_ABORT("%s: unexpected graph reallocation (graph size = %d, nodes = %d, leafs = %d), debug_realloc = %d\n", __func__, |
1514 | 0 | sched->debug_graph_size, sched->graph.n_nodes, sched->graph.n_leafs, sched->debug_realloc); |
1515 | 0 | } |
1516 | 0 | } |
1517 | | |
1518 | | // the re-allocation may cause the split inputs to be moved to a different address |
1519 | | // synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy |
1520 | 0 | for (int i = 0; i < sched->n_backends; i++) { |
1521 | 0 | ggml_backend_synchronize(sched->backends[i]); |
1522 | 0 | } |
1523 | |
|
1524 | 0 | ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids); |
1525 | 0 | if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { |
1526 | 0 | GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__); |
1527 | 0 | return false; |
1528 | 0 | } |
1529 | 0 | } |
1530 | | |
1531 | 0 | return true; |
1532 | 0 | } |
1533 | | |
1534 | 0 | static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) { |
1535 | 0 | GGML_ASSERT(sched); |
1536 | 0 | struct ggml_backend_sched_split * splits = sched->splits; |
1537 | |
|
1538 | 0 | ggml_tensor * prev_ids_tensor = nullptr; |
1539 | 0 | std::vector<int32_t> ids; |
1540 | 0 | std::vector<ggml_bitset_t> used_ids; |
1541 | |
|
1542 | 0 | for (int split_id = 0; split_id < sched->n_splits; split_id++) { |
1543 | 0 | struct ggml_backend_sched_split * split = &splits[split_id]; |
1544 | 0 | int split_backend_id = split->backend_id; |
1545 | 0 | ggml_backend_t split_backend = sched->backends[split_backend_id]; |
1546 | | |
1547 | | // copy the input tensors to the split backend |
1548 | 0 | for (int input_id = 0; input_id < split->n_inputs; input_id++) { |
1549 | 0 | ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]); |
1550 | 0 | struct ggml_tensor * input = split->inputs[input_id]; |
1551 | 0 | struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy); |
1552 | |
|
1553 | 0 | if (input->flags & GGML_TENSOR_FLAG_INPUT) { |
1554 | | // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done |
1555 | 0 | if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
1556 | 0 | ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); |
1557 | 0 | } else { |
1558 | 0 | ggml_backend_synchronize(split_backend); |
1559 | 0 | } |
1560 | 0 | ggml_backend_tensor_copy(input, input_cpy); |
1561 | 0 | } else { |
1562 | | // wait for the split backend to finish using the input before overwriting it |
1563 | 0 | if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
1564 | 0 | ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]); |
1565 | 0 | } else { |
1566 | 0 | ggml_backend_synchronize(split_backend); |
1567 | 0 | } |
1568 | | |
1569 | | // when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used |
1570 | 0 | ggml_tensor * node = split->graph.nodes[0]; |
1571 | 0 | if (split->graph.n_nodes > 0 && |
1572 | 0 | ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS && |
1573 | 0 | ggml_backend_buffer_is_host(input->buffer) && ( |
1574 | 0 | (node->src[0] == input_cpy && node->op == GGML_OP_MUL_MAT_ID) |
1575 | | //|| (node->src[1] == input_cpy && node->op == GGML_OP_ADD_ID) /* GGML_OP_ADD_ID weights are small and not worth splitting */ |
1576 | 0 | )) { |
1577 | |
|
1578 | 0 | const int64_t n_expert = node->op == GGML_OP_MUL_MAT_ID ? input->ne[2] : input->ne[1]; |
1579 | 0 | const size_t expert_size = node->op == GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1]; |
1580 | |
|
1581 | 0 | ggml_backend_synchronize(input_backend); |
1582 | | |
1583 | | // get the ids |
1584 | 0 | ggml_tensor * ids_tensor = node->src[2]; |
1585 | 0 | ggml_backend_t ids_backend = split_backend; |
1586 | | |
1587 | | // if the ids tensor is also an input of the split, it may not have been copied yet to the split backend |
1588 | | // in that case, we use the original ids tensor |
1589 | 0 | for (int i = input_id + 1; i < split->n_inputs; i++) { |
1590 | 0 | if (ids_tensor == tensor_copy(split->inputs[i], split_backend_id, sched->cur_copy)) { |
1591 | 0 | ids_tensor = split->inputs[i]; |
1592 | 0 | ids_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[i]); |
1593 | 0 | break; |
1594 | 0 | } |
1595 | 0 | } |
1596 | |
|
1597 | 0 | if (ids_tensor != prev_ids_tensor) { |
1598 | 0 | ids.resize(ggml_nbytes(ids_tensor) / sizeof(int32_t)); |
1599 | 0 | ggml_backend_tensor_get_async(ids_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor)); |
1600 | 0 | ggml_backend_synchronize(ids_backend); |
1601 | | |
1602 | | // find the used experts |
1603 | 0 | used_ids.clear(); |
1604 | 0 | used_ids.resize(ggml_bitset_size(n_expert)); |
1605 | 0 | for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) { |
1606 | 0 | for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) { |
1607 | 0 | int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)]; |
1608 | 0 | GGML_ASSERT(id >= 0 && id < n_expert); |
1609 | 0 | ggml_bitset_set(used_ids.data(), id); |
1610 | 0 | } |
1611 | 0 | } |
1612 | |
|
1613 | 0 | prev_ids_tensor = ids_tensor; |
1614 | 0 | } |
1615 | | |
1616 | | // group consecutive experts and copy them together |
1617 | 0 | auto copy_experts = [&](int32_t first_id, int32_t last_id) { |
1618 | 0 | const size_t expert_offset = first_id * expert_size; |
1619 | 0 | const size_t expert_size_copy = (last_id - first_id + 1) * expert_size; |
1620 | 0 | const size_t padding = std::min<size_t>(expert_size, 512); |
1621 | 0 | const size_t padding_end = last_id < n_expert - 1 ? padding : 0; |
1622 | |
|
1623 | 0 | ggml_backend_tensor_set_async(split_backend, |
1624 | 0 | input_cpy, |
1625 | 0 | (const uint8_t *)input->data + expert_offset, expert_offset, |
1626 | | // copy a bit extra at the to ensure there are no NaNs in the padding of the last expert |
1627 | | // this is necessary for MMQ in the CUDA backend |
1628 | 0 | expert_size_copy + padding_end); |
1629 | 0 | }; |
1630 | |
|
1631 | 0 | int id = 0; |
1632 | 0 | while (!ggml_bitset_get(used_ids.data(), id)) { |
1633 | 0 | id++; |
1634 | 0 | } |
1635 | 0 | int32_t first_id = id; |
1636 | 0 | int32_t last_id = first_id; |
1637 | |
|
1638 | 0 | for (++id; id < n_expert; ++id) { |
1639 | 0 | if (!ggml_bitset_get(used_ids.data(), id)) { |
1640 | 0 | continue; |
1641 | 0 | } |
1642 | | |
1643 | 0 | if (id == last_id + 1) { |
1644 | 0 | last_id = id; |
1645 | 0 | continue; |
1646 | 0 | } |
1647 | | |
1648 | 0 | copy_experts(first_id, last_id); |
1649 | |
|
1650 | 0 | first_id = id; |
1651 | 0 | last_id = id; |
1652 | 0 | } |
1653 | 0 | copy_experts(first_id, last_id); |
1654 | 0 | } else { |
1655 | | // try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events |
1656 | | // TODO: add public function to facilitate this, since applications do not have direct access to the backend interface |
1657 | 0 | if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) { |
1658 | 0 | ggml_backend_synchronize(input_backend); |
1659 | 0 | if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
1660 | 0 | ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]); |
1661 | 0 | } else { |
1662 | 0 | ggml_backend_synchronize(split_backend); |
1663 | 0 | } |
1664 | 0 | ggml_backend_tensor_copy(input, input_cpy); |
1665 | 0 | } |
1666 | 0 | } |
1667 | 0 | } |
1668 | 0 | } |
1669 | |
|
1670 | 0 | if (!sched->callback_eval) { |
1671 | 0 | enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph); |
1672 | 0 | if (ec != GGML_STATUS_SUCCESS) { |
1673 | 0 | return ec; |
1674 | 0 | } |
1675 | 0 | } else { |
1676 | | // similar to ggml_backend_compare_graph_backend |
1677 | 0 | for (int j0 = 0; j0 < split->graph.n_nodes; j0++) { |
1678 | 0 | struct ggml_tensor * t = split->graph.nodes[j0]; |
1679 | | |
1680 | | // check if the user needs data from this node |
1681 | 0 | bool need = sched->callback_eval(t, true, sched->callback_eval_user_data); |
1682 | |
|
1683 | 0 | int j1 = j0; |
1684 | | |
1685 | | // determine the range [j0, j1] of nodes that can be computed together |
1686 | 0 | while (!need && j1 < split->graph.n_nodes - 1) { |
1687 | 0 | t = split->graph.nodes[++j1]; |
1688 | 0 | need = sched->callback_eval(t, true, sched->callback_eval_user_data); |
1689 | 0 | } |
1690 | |
|
1691 | 0 | struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1); |
1692 | |
|
1693 | 0 | enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &gv); |
1694 | 0 | if (ec != GGML_STATUS_SUCCESS) { |
1695 | 0 | return ec; |
1696 | 0 | } |
1697 | | |
1698 | | // TODO: pass backend to the callback, then the user can decide if they want to synchronize |
1699 | 0 | ggml_backend_synchronize(split_backend); |
1700 | |
|
1701 | 0 | if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) { |
1702 | 0 | break; |
1703 | 0 | } |
1704 | | |
1705 | 0 | j0 = j1; |
1706 | 0 | } |
1707 | 0 | } |
1708 | | |
1709 | | // record the event of this copy |
1710 | 0 | if (split->n_inputs > 0) { |
1711 | 0 | if (sched->events[split_backend_id][sched->cur_copy] != NULL) { |
1712 | 0 | ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy], split_backend); |
1713 | 0 | } |
1714 | 0 | } |
1715 | 0 | } |
1716 | | |
1717 | 0 | return GGML_STATUS_SUCCESS; |
1718 | 0 | } |
1719 | | |
1720 | | ggml_backend_sched_t ggml_backend_sched_new( |
1721 | | ggml_backend_t * backends, |
1722 | | ggml_backend_buffer_type_t * bufts, |
1723 | | int n_backends, |
1724 | | size_t graph_size, |
1725 | | bool parallel, |
1726 | 0 | bool op_offload) { |
1727 | 0 | GGML_ASSERT(n_backends > 0); |
1728 | 0 | GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS); |
1729 | 0 | GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU); |
1730 | |
|
1731 | 0 | struct ggml_backend_sched * sched = (ggml_backend_sched *) calloc(1, sizeof(struct ggml_backend_sched)); |
1732 | |
|
1733 | 0 | const char * GGML_SCHED_DEBUG = getenv("GGML_SCHED_DEBUG"); |
1734 | 0 | sched->debug = GGML_SCHED_DEBUG ? atoi(GGML_SCHED_DEBUG) : 0; |
1735 | |
|
1736 | 0 | sched->debug_realloc = 0; |
1737 | | #ifdef GGML_SCHED_NO_REALLOC |
1738 | | sched->debug_realloc = 1; |
1739 | | #endif |
1740 | 0 | const char * GGML_SCHED_DEBUG_REALLOC = getenv("GGML_SCHED_DEBUG_REALLOC"); |
1741 | 0 | sched->debug_realloc = GGML_SCHED_DEBUG_REALLOC ? atoi(GGML_SCHED_DEBUG_REALLOC) : sched->debug_realloc; |
1742 | |
|
1743 | 0 | sched->n_backends = n_backends; |
1744 | 0 | sched->n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1; |
1745 | | |
1746 | | // initialize hash table |
1747 | | // FIXME: needs to be size*2 to account for leafs (do it in graph_split instead) |
1748 | 0 | sched->hash_set = ggml_hash_set_new(graph_size); |
1749 | 0 | sched->hv_tensor_backend_ids = (int *) malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0])); |
1750 | 0 | sched->hv_tensor_copies = (ggml_tensor **) malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *)); |
1751 | |
|
1752 | 0 | const size_t ggml_sched_max_splits = graph_size; // at most there is one split for each node in the graph |
1753 | 0 | const size_t nodes_size = graph_size + ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2; |
1754 | 0 | sched->node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->node_backend_ids[0])); |
1755 | 0 | sched->leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->leaf_backend_ids[0])); |
1756 | 0 | sched->prev_node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0])); |
1757 | 0 | sched->prev_leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0])); |
1758 | |
|
1759 | 0 | sched->debug_graph_size = 0; |
1760 | 0 | sched->debug_prev_graph_size = 0; |
1761 | |
|
1762 | 0 | sched->context_buffer_size = ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false); |
1763 | 0 | sched->context_buffer = (char *) malloc(sched->context_buffer_size); |
1764 | |
|
1765 | 0 | const int initial_splits_capacity = 16; |
1766 | 0 | sched->splits = (ggml_backend_sched_split *) calloc(initial_splits_capacity, sizeof(sched->splits[0])); |
1767 | 0 | sched->splits_capacity = initial_splits_capacity; |
1768 | |
|
1769 | 0 | for (int b = 0; b < n_backends; b++) { |
1770 | 0 | sched->backends[b] = backends[b]; |
1771 | 0 | sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]); |
1772 | 0 | GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b])); |
1773 | |
|
1774 | 0 | if (sched->n_copies > 1) { |
1775 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1776 | 0 | sched->events[b][c] = ggml_backend_event_new(backends[b]->device); |
1777 | 0 | } |
1778 | 0 | } |
1779 | 0 | } |
1780 | |
|
1781 | 0 | sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends); |
1782 | 0 | sched->op_offload = op_offload; |
1783 | |
|
1784 | 0 | ggml_backend_sched_reset(sched); |
1785 | |
|
1786 | 0 | return sched; |
1787 | 0 | } |
1788 | | |
1789 | 0 | void ggml_backend_sched_free(ggml_backend_sched_t sched) { |
1790 | 0 | if (sched == NULL) { |
1791 | 0 | return; |
1792 | 0 | } |
1793 | 0 | for (int b = 0; b < sched->n_backends; b++) { |
1794 | 0 | for (int c = 0; c < sched->n_copies; c++) { |
1795 | 0 | ggml_backend_event_free(sched->events[b][c]); |
1796 | 0 | } |
1797 | 0 | } |
1798 | 0 | ggml_gallocr_free(sched->galloc); |
1799 | 0 | ggml_free(sched->ctx); |
1800 | 0 | ggml_hash_set_free(&sched->hash_set); |
1801 | 0 | free(sched->splits); |
1802 | 0 | free(sched->hv_tensor_backend_ids); |
1803 | 0 | free(sched->hv_tensor_copies); |
1804 | 0 | free(sched->node_backend_ids); |
1805 | 0 | free(sched->leaf_backend_ids); |
1806 | 0 | free(sched->prev_node_backend_ids); |
1807 | 0 | free(sched->prev_leaf_backend_ids); |
1808 | 0 | free(sched->context_buffer); |
1809 | 0 | free(sched->graph.nodes); |
1810 | 0 | free(sched->graph.leafs); |
1811 | 0 | free(sched); |
1812 | 0 | } |
1813 | | |
1814 | 0 | void ggml_backend_sched_reset(ggml_backend_sched_t sched) { |
1815 | 0 | GGML_ASSERT(sched); |
1816 | | // reset state for the next run |
1817 | 0 | if (!sched->is_reset) { |
1818 | 0 | ggml_hash_set_reset(&sched->hash_set); |
1819 | 0 | memset(sched->hv_tensor_backend_ids, -1, sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0])); |
1820 | 0 | memset(sched->hv_tensor_copies, 0, sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *)); |
1821 | 0 | sched->is_reset = true; |
1822 | 0 | } |
1823 | 0 | sched->is_alloc = false; |
1824 | 0 | } |
1825 | | |
1826 | 0 | void ggml_backend_sched_reserve_size(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph, size_t * sizes) { |
1827 | 0 | GGML_ASSERT(sched); |
1828 | 0 | GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); |
1829 | 0 | GGML_ASSERT(sizes); |
1830 | |
|
1831 | 0 | ggml_backend_sched_reset(sched); |
1832 | |
|
1833 | 0 | ggml_backend_sched_synchronize(sched); |
1834 | |
|
1835 | 0 | ggml_backend_sched_split_graph(sched, measure_graph); |
1836 | |
|
1837 | 0 | ggml_gallocr_reserve_n_size(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids, sizes); |
1838 | 0 | } |
1839 | | |
1840 | 0 | bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) { |
1841 | 0 | GGML_ASSERT(sched); |
1842 | 0 | GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); |
1843 | |
|
1844 | 0 | ggml_backend_sched_synchronize(sched); |
1845 | |
|
1846 | 0 | ggml_backend_sched_split_graph(sched, measure_graph); |
1847 | |
|
1848 | 0 | if (!ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) { |
1849 | 0 | return false; |
1850 | 0 | } |
1851 | | |
1852 | 0 | ggml_backend_sched_reset(sched); |
1853 | |
|
1854 | 0 | return true; |
1855 | 0 | } |
1856 | | |
1857 | 0 | bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
1858 | 0 | GGML_ASSERT(sched); |
1859 | 0 | GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs); |
1860 | 0 | GGML_ASSERT(!sched->is_alloc); |
1861 | |
|
1862 | 0 | sched->cur_copy = sched->next_copy; |
1863 | 0 | sched->next_copy = (sched->next_copy + 1) % sched->n_copies; |
1864 | |
|
1865 | 0 | ggml_backend_sched_split_graph(sched, graph); |
1866 | |
|
1867 | 0 | if (!ggml_backend_sched_alloc_splits(sched)) { |
1868 | 0 | return false; |
1869 | 0 | } |
1870 | | |
1871 | 0 | sched->is_alloc = true; |
1872 | |
|
1873 | 0 | return true; |
1874 | 0 | } |
1875 | | |
1876 | 0 | enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
1877 | 0 | enum ggml_status err = ggml_backend_sched_graph_compute_async(sched, graph); |
1878 | 0 | ggml_backend_sched_synchronize(sched); |
1879 | 0 | return err; |
1880 | 0 | } |
1881 | | |
1882 | 0 | enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { |
1883 | 0 | GGML_ASSERT(sched); |
1884 | 0 | if (!sched->is_reset && !sched->is_alloc) { |
1885 | 0 | ggml_backend_sched_reset(sched); |
1886 | 0 | } |
1887 | |
|
1888 | 0 | if (!sched->is_alloc) { |
1889 | 0 | if (!ggml_backend_sched_alloc_graph(sched, graph)) { |
1890 | 0 | return GGML_STATUS_ALLOC_FAILED; |
1891 | 0 | } |
1892 | 0 | } |
1893 | | |
1894 | 0 | return ggml_backend_sched_compute_splits(sched); |
1895 | 0 | } |
1896 | | |
1897 | 0 | void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) { |
1898 | 0 | GGML_ASSERT(sched); |
1899 | 0 | for (int i = 0; i < sched->n_backends; i++) { |
1900 | 0 | ggml_backend_synchronize(sched->backends[i]); |
1901 | 0 | } |
1902 | 0 | if (!sched->is_alloc) { |
1903 | | // if the graph is not already allocated, always use copy 0 after a synchronization |
1904 | | // this ensures that during generation the same copy is used every time, |
1905 | | // which avoids changes in the graph that could cause CUDA or other graphs to be disabled |
1906 | 0 | sched->next_copy = 0; |
1907 | 0 | } |
1908 | 0 | } |
1909 | | |
1910 | 0 | void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) { |
1911 | 0 | GGML_ASSERT(sched); |
1912 | 0 | sched->callback_eval = callback; |
1913 | 0 | sched->callback_eval_user_data = user_data; |
1914 | 0 | } |
1915 | | |
1916 | 0 | int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) { |
1917 | 0 | GGML_ASSERT(sched); |
1918 | 0 | return sched->n_splits; |
1919 | 0 | } |
1920 | | |
1921 | 0 | int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched) { |
1922 | 0 | GGML_ASSERT(sched); |
1923 | 0 | return sched->n_copies; |
1924 | 0 | } |
1925 | | |
1926 | 0 | int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched) { |
1927 | 0 | GGML_ASSERT(sched); |
1928 | 0 | return sched->n_backends; |
1929 | 0 | } |
1930 | | |
1931 | 0 | ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i) { |
1932 | 0 | GGML_ASSERT(sched); |
1933 | 0 | GGML_ASSERT(i >= 0 && i < sched->n_backends); |
1934 | 0 | return sched->backends[i]; |
1935 | 0 | } |
1936 | | |
1937 | 0 | ggml_backend_buffer_type_t ggml_backend_sched_get_buffer_type(ggml_backend_sched_t sched, ggml_backend_t backend) { |
1938 | 0 | GGML_ASSERT(sched); |
1939 | 0 | int backend_index = ggml_backend_sched_backend_id(sched, backend); |
1940 | 0 | GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); |
1941 | |
|
1942 | 0 | return sched->bufts[backend_index]; |
1943 | 0 | } |
1944 | | |
1945 | 0 | size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) { |
1946 | 0 | GGML_ASSERT(sched); |
1947 | 0 | int backend_index = ggml_backend_sched_backend_id(sched, backend); |
1948 | 0 | GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); |
1949 | |
|
1950 | 0 | return ggml_gallocr_get_buffer_size(sched->galloc, backend_index); |
1951 | 0 | } |
1952 | | |
1953 | 0 | void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) { |
1954 | 0 | GGML_ASSERT(sched); |
1955 | 0 | int backend_index = ggml_backend_sched_backend_id(sched, backend); |
1956 | 0 | GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); |
1957 | 0 | tensor_backend_id(node) = backend_index; |
1958 | 0 | SET_CAUSE(node, "usr"); |
1959 | 0 | sched->is_reset = false; |
1960 | 0 | } |
1961 | | |
1962 | 0 | ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) { |
1963 | 0 | GGML_ASSERT(sched); |
1964 | 0 | int backend_index = tensor_backend_id(node); |
1965 | 0 | if (backend_index == -1) { |
1966 | 0 | return NULL; |
1967 | 0 | } |
1968 | 0 | return sched->backends[backend_index]; |
1969 | 0 | } |
1970 | | |
1971 | | // utils |
1972 | | |
1973 | 0 | enum ggml_status ggml_backend_view_init(struct ggml_tensor * tensor) { |
1974 | 0 | GGML_ASSERT(tensor); |
1975 | 0 | GGML_ASSERT(tensor->buffer == NULL); |
1976 | 0 | GGML_ASSERT(tensor->view_src != NULL); |
1977 | 0 | GGML_ASSERT(tensor->view_src->buffer != NULL); |
1978 | 0 | GGML_ASSERT(tensor->view_src->data != NULL); |
1979 | |
|
1980 | 0 | tensor->buffer = tensor->view_src->buffer; |
1981 | 0 | tensor->data = (char *)tensor->view_src->data + tensor->view_offs; |
1982 | 0 | return ggml_backend_buffer_init_tensor(tensor->buffer, tensor); |
1983 | 0 | } |
1984 | | |
1985 | 0 | enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr) { |
1986 | 0 | GGML_ASSERT(tensor); |
1987 | 0 | GGML_ASSERT(tensor->buffer == NULL); |
1988 | 0 | GGML_ASSERT(tensor->data == NULL); |
1989 | 0 | GGML_ASSERT(tensor->view_src == NULL); |
1990 | 0 | GGML_ASSERT(addr >= ggml_backend_buffer_get_base(buffer)); |
1991 | 0 | GGML_ASSERT(ggml_backend_buffer_is_meta(buffer) || |
1992 | 0 | (char *) addr + ggml_backend_buffer_get_alloc_size(buffer, tensor) <= |
1993 | 0 | (char *) ggml_backend_buffer_get_base(buffer) + ggml_backend_buffer_get_size(buffer)); |
1994 | |
|
1995 | 0 | tensor->buffer = buffer; |
1996 | 0 | tensor->data = addr; |
1997 | 0 | return ggml_backend_buffer_init_tensor(buffer, tensor); |
1998 | 0 | } |
1999 | | |
2000 | | static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies, |
2001 | 0 | struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) { |
2002 | |
|
2003 | 0 | GGML_ASSERT(src != NULL); |
2004 | 0 | GGML_ASSERT(src->data && "graph must be allocated"); |
2005 | |
|
2006 | 0 | size_t id = ggml_hash_insert(&hash_set, src); |
2007 | 0 | if (id == GGML_HASHSET_ALREADY_EXISTS) { |
2008 | 0 | return node_copies[ggml_hash_find(&hash_set, src)]; |
2009 | 0 | } |
2010 | | |
2011 | 0 | struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src); |
2012 | 0 | if (src->view_src != NULL) { |
2013 | 0 | dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src); |
2014 | 0 | dst->view_offs = src->view_offs; |
2015 | 0 | } |
2016 | 0 | dst->op = src->op; |
2017 | 0 | dst->flags = src->flags; |
2018 | 0 | memcpy(dst->op_params, src->op_params, sizeof(dst->op_params)); |
2019 | 0 | ggml_set_name(dst, src->name); |
2020 | | |
2021 | | // copy src |
2022 | 0 | for (int i = 0; i < GGML_MAX_SRC; i++) { |
2023 | 0 | struct ggml_tensor * s = src->src[i]; |
2024 | 0 | if (s == NULL) { |
2025 | 0 | continue; |
2026 | 0 | } |
2027 | 0 | dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s); |
2028 | 0 | } |
2029 | |
|
2030 | 0 | node_copies[id] = dst; |
2031 | 0 | return dst; |
2032 | 0 | } |
2033 | | |
2034 | 0 | static void graph_copy_init_tensor(struct ggml_hash_set * hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) { |
2035 | 0 | size_t id = ggml_hash_find(hash_set, src); |
2036 | 0 | if (node_init[id]) { |
2037 | 0 | return; |
2038 | 0 | } |
2039 | 0 | node_init[id] = true; |
2040 | |
|
2041 | 0 | struct ggml_tensor * dst = node_copies[id]; |
2042 | 0 | if (dst->view_src != NULL) { |
2043 | 0 | graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src); |
2044 | 0 | enum ggml_status status = ggml_backend_view_init(dst); |
2045 | 0 | GGML_ASSERT(status == GGML_STATUS_SUCCESS); |
2046 | 0 | } |
2047 | 0 | else { |
2048 | 0 | ggml_backend_tensor_copy(src, dst); |
2049 | 0 | } |
2050 | | |
2051 | | // init src |
2052 | 0 | for (int i = 0; i < GGML_MAX_SRC; i++) { |
2053 | 0 | struct ggml_tensor * s = src->src[i]; |
2054 | 0 | if (s == NULL) { |
2055 | 0 | continue; |
2056 | 0 | } |
2057 | 0 | graph_copy_init_tensor(hash_set, node_copies, node_init, s); |
2058 | 0 | } |
2059 | 0 | } |
2060 | | |
2061 | 0 | struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) { |
2062 | 0 | GGML_ASSERT(graph); |
2063 | 0 | struct ggml_hash_set hash_set = ggml_hash_set_new(graph->visited_hash_set.size); |
2064 | 0 | struct ggml_tensor ** node_copies = (ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT |
2065 | 0 | bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0])); |
2066 | |
|
2067 | 0 | struct ggml_init_params params = { |
2068 | 0 | /* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false), |
2069 | 0 | /* .mem_buffer = */ NULL, |
2070 | 0 | /* .no_alloc = */ true |
2071 | 0 | }; |
2072 | |
|
2073 | 0 | struct ggml_context * ctx_allocated = ggml_init(params); |
2074 | 0 | struct ggml_context * ctx_unallocated = ggml_init(params); |
2075 | |
|
2076 | 0 | if (ctx_allocated == NULL || ctx_unallocated == NULL) { |
2077 | 0 | GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__); |
2078 | 0 | ggml_hash_set_free(&hash_set); |
2079 | 0 | free(node_copies); |
2080 | 0 | free(node_init); |
2081 | 0 | ggml_free(ctx_allocated); |
2082 | 0 | ggml_free(ctx_unallocated); |
2083 | 0 | return { |
2084 | 0 | /* .buffer = */ NULL, |
2085 | 0 | /* .ctx_allocated = */ NULL, |
2086 | 0 | /* .ctx_unallocated = */ NULL, |
2087 | 0 | /* .graph = */ NULL, |
2088 | 0 | }; |
2089 | 0 | } |
2090 | | |
2091 | | // dup nodes |
2092 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
2093 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
2094 | 0 | graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node); |
2095 | 0 | } |
2096 | | |
2097 | | // allocate nodes |
2098 | 0 | ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend); |
2099 | 0 | if (buffer == NULL) { |
2100 | 0 | GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__); |
2101 | 0 | ggml_hash_set_free(&hash_set); |
2102 | 0 | free(node_copies); |
2103 | 0 | free(node_init); |
2104 | 0 | ggml_free(ctx_allocated); |
2105 | 0 | ggml_free(ctx_unallocated); |
2106 | 0 | return { |
2107 | 0 | /* .buffer = */ NULL, |
2108 | 0 | /* .ctx_allocated = */ NULL, |
2109 | 0 | /* .ctx_unallocated = */ NULL, |
2110 | 0 | /* .graph = */ NULL, |
2111 | 0 | }; |
2112 | 0 | } |
2113 | | |
2114 | | //printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024); |
2115 | | |
2116 | | // copy data and init views |
2117 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
2118 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
2119 | 0 | graph_copy_init_tensor(&hash_set, node_copies, node_init, node); |
2120 | 0 | } |
2121 | | |
2122 | | // build graph copy |
2123 | 0 | struct ggml_cgraph * graph_copy = ggml_new_graph_custom(ctx_allocated, graph->size, false); |
2124 | 0 | for (int i = 0; i < graph->n_nodes; i++) { |
2125 | 0 | struct ggml_tensor * node = graph->nodes[i]; |
2126 | 0 | struct ggml_tensor * node_copy = node_copies[ggml_hash_find(&hash_set, node)]; |
2127 | 0 | graph_copy->nodes[i] = node_copy; |
2128 | 0 | } |
2129 | 0 | graph_copy->n_nodes = graph->n_nodes; |
2130 | |
|
2131 | 0 | ggml_hash_set_free(&hash_set); |
2132 | 0 | free(node_copies); |
2133 | 0 | free(node_init); |
2134 | |
|
2135 | 0 | return { |
2136 | 0 | /* .buffer = */ buffer, |
2137 | 0 | /* .ctx_allocated = */ ctx_allocated, |
2138 | 0 | /* .ctx_unallocated = */ ctx_unallocated, |
2139 | 0 | /* .graph = */ graph_copy, |
2140 | 0 | }; |
2141 | 0 | } |
2142 | | |
2143 | 0 | void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) { |
2144 | 0 | ggml_backend_buffer_free(copy.buffer); |
2145 | 0 | ggml_free(copy.ctx_allocated); |
2146 | 0 | ggml_free(copy.ctx_unallocated); |
2147 | 0 | } |
2148 | | |
2149 | 0 | bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor const * const * test_nodes, size_t num_test_nodes) { |
2150 | 0 | struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph); |
2151 | 0 | if (copy.buffer == NULL) { |
2152 | 0 | return false; |
2153 | 0 | } |
2154 | | |
2155 | 0 | struct ggml_cgraph * g1 = graph; |
2156 | 0 | struct ggml_cgraph * g2 = copy.graph; |
2157 | |
|
2158 | 0 | assert(g1->n_nodes == g2->n_nodes); |
2159 | |
|
2160 | 0 | if (num_test_nodes != 0) { |
2161 | 0 | GGML_ASSERT(test_nodes); |
2162 | | // Compute the whole graph and only test the output for specific tensors |
2163 | 0 | ggml_backend_graph_compute(backend1, g1); |
2164 | 0 | ggml_backend_graph_compute(backend2, g2); |
2165 | |
|
2166 | 0 | bool verified = false; |
2167 | 0 | for (int i = 0; i < g1->n_nodes; i++) { |
2168 | 0 | for (size_t j = 0; j < num_test_nodes; ++j) { |
2169 | 0 | if (g1->nodes[i] == test_nodes[j]) { |
2170 | 0 | callback(i, g1->nodes[i], g2->nodes[i], user_data); |
2171 | 0 | verified = true; |
2172 | 0 | } |
2173 | 0 | } |
2174 | 0 | } |
2175 | 0 | GGML_ASSERT(verified); |
2176 | 0 | } else { |
2177 | 0 | for (int i = 0; i < g1->n_nodes; i++) { |
2178 | 0 | struct ggml_tensor * t1 = g1->nodes[i]; |
2179 | 0 | struct ggml_tensor * t2 = g2->nodes[i]; |
2180 | |
|
2181 | 0 | assert(t1->op == t2->op && ggml_are_same_layout(t1, t2)); |
2182 | |
|
2183 | 0 | struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1); |
2184 | 0 | struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1); |
2185 | |
|
2186 | 0 | ggml_backend_graph_compute(backend1, &g1v); |
2187 | 0 | ggml_backend_graph_compute(backend2, &g2v); |
2188 | |
|
2189 | 0 | if (ggml_is_view_op(t1->op)) { |
2190 | 0 | continue; |
2191 | 0 | } |
2192 | | |
2193 | | // compare results, calculate rms etc |
2194 | 0 | if (!callback(i, t1, t2, user_data)) { |
2195 | 0 | break; |
2196 | 0 | } |
2197 | 0 | } |
2198 | 0 | } |
2199 | 0 | ggml_backend_graph_copy_free(copy); |
2200 | |
|
2201 | 0 | return true; |
2202 | 0 | } |
2203 | | |
2204 | | // CPU backend - buffer |
2205 | | |
2206 | 0 | static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) { |
2207 | 0 | GGML_ASSERT(buffer); |
2208 | 0 | uintptr_t data = (uintptr_t)buffer->context; |
2209 | | |
2210 | | // align the buffer |
2211 | 0 | if (data % TENSOR_ALIGNMENT != 0) { |
2212 | 0 | data = GGML_PAD(data, TENSOR_ALIGNMENT); |
2213 | 0 | } |
2214 | |
|
2215 | 0 | return (void *)data; |
2216 | 0 | } |
2217 | | |
2218 | 0 | static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { |
2219 | 0 | GGML_ASSERT(buffer); |
2220 | 0 | ggml_aligned_free(buffer->context, buffer->size); |
2221 | 0 | } |
2222 | | |
2223 | 0 | static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { |
2224 | 0 | GGML_ASSERT(tensor); |
2225 | 0 | memset((char *)tensor->data + offset, value, size); |
2226 | |
|
2227 | 0 | GGML_UNUSED(buffer); |
2228 | 0 | } |
2229 | | |
2230 | 0 | static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { |
2231 | 0 | GGML_ASSERT(tensor); |
2232 | 0 | memcpy((char *)tensor->data + offset, data, size); |
2233 | |
|
2234 | 0 | GGML_UNUSED(buffer); |
2235 | 0 | } |
2236 | | |
2237 | 0 | static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { |
2238 | 0 | GGML_ASSERT(tensor); |
2239 | 0 | memcpy(data, (const char *)tensor->data + offset, size); |
2240 | |
|
2241 | 0 | GGML_UNUSED(buffer); |
2242 | 0 | } |
2243 | | |
2244 | 0 | static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { |
2245 | 0 | GGML_ASSERT(src); |
2246 | 0 | if (ggml_backend_buffer_is_host(src->buffer)) { |
2247 | 0 | memcpy(dst->data, src->data, ggml_nbytes(src)); |
2248 | 0 | return true; |
2249 | 0 | } |
2250 | 0 | return false; |
2251 | | |
2252 | 0 | GGML_UNUSED(buffer); |
2253 | 0 | } |
2254 | | |
2255 | 0 | static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { |
2256 | 0 | GGML_ASSERT(buffer); |
2257 | 0 | memset(buffer->context, value, buffer->size); |
2258 | 0 | } |
2259 | | |
2260 | | static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = { |
2261 | | /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer, |
2262 | | /* .get_base = */ ggml_backend_cpu_buffer_get_base, |
2263 | | /* .init_tensor = */ NULL, // no initialization required |
2264 | | /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor, |
2265 | | /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor, |
2266 | | /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, |
2267 | | /* .set_tensor_2d = */ NULL, |
2268 | | /* .get_tensor_2d = */ NULL, |
2269 | | /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor, |
2270 | | /* .clear = */ ggml_backend_cpu_buffer_clear, |
2271 | | /* .reset = */ NULL, |
2272 | | }; |
2273 | | |
2274 | | static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = { |
2275 | | /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed |
2276 | | /* .get_base = */ ggml_backend_cpu_buffer_get_base, |
2277 | | /* .init_tensor = */ NULL, // no initialization required |
2278 | | /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor, |
2279 | | /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor, |
2280 | | /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, |
2281 | | /* .set_tensor_2d = */ NULL, |
2282 | | /* .get_tensor_2d = */ NULL, |
2283 | | /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor, |
2284 | | /* .clear = */ ggml_backend_cpu_buffer_clear, |
2285 | | /* .reset = */ NULL, |
2286 | | }; |
2287 | | |
2288 | | // CPU backend buffer type |
2289 | | |
2290 | | // this buffer type is defined here to make it available to all backends |
2291 | | |
2292 | 0 | static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) { |
2293 | 0 | return "CPU"; |
2294 | | |
2295 | 0 | GGML_UNUSED(buft); |
2296 | 0 | } |
2297 | | |
2298 | 0 | static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { |
2299 | 0 | void * data = ggml_aligned_malloc(size); |
2300 | |
|
2301 | 0 | if (data == NULL) { |
2302 | 0 | GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size); |
2303 | 0 | return NULL; |
2304 | 0 | } |
2305 | | |
2306 | 0 | return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, size); |
2307 | 0 | } |
2308 | | |
2309 | 0 | static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { |
2310 | 0 | return TENSOR_ALIGNMENT; |
2311 | | |
2312 | 0 | GGML_UNUSED(buft); |
2313 | 0 | } |
2314 | | |
2315 | 0 | static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) { |
2316 | 0 | return true; |
2317 | | |
2318 | 0 | GGML_UNUSED(buft); |
2319 | 0 | } |
2320 | | |
2321 | 0 | ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) { |
2322 | 0 | static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { |
2323 | 0 | /* .iface = */ { |
2324 | 0 | /* .get_name = */ ggml_backend_cpu_buffer_type_get_name, |
2325 | 0 | /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer, |
2326 | 0 | /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, |
2327 | 0 | /* .get_max_size = */ NULL, // defaults to SIZE_MAX |
2328 | 0 | /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes |
2329 | 0 | /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, |
2330 | 0 | }, |
2331 | 0 | /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), |
2332 | 0 | /* .context = */ NULL, |
2333 | 0 | }; |
2334 | |
|
2335 | 0 | return &ggml_backend_cpu_buffer_type; |
2336 | 0 | } |
2337 | | |
2338 | 0 | static const char * ggml_backend_cpu_buffer_from_ptr_type_get_name(ggml_backend_buffer_type_t buft) { |
2339 | 0 | return "CPU_Mapped"; |
2340 | | |
2341 | 0 | GGML_UNUSED(buft); |
2342 | 0 | } |
2343 | | |
2344 | 0 | static ggml_backend_buffer_type_t ggml_backend_cpu_buffer_from_ptr_type(void) { |
2345 | 0 | static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { |
2346 | 0 | /* .iface = */ { |
2347 | 0 | /* .get_name = */ ggml_backend_cpu_buffer_from_ptr_type_get_name, |
2348 | 0 | /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer, |
2349 | 0 | /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, |
2350 | 0 | /* .get_max_size = */ NULL, // defaults to SIZE_MAX |
2351 | 0 | /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes |
2352 | 0 | /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, |
2353 | 0 | }, |
2354 | 0 | /* .device = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), |
2355 | 0 | /* .context = */ NULL, |
2356 | 0 | }; |
2357 | |
|
2358 | 0 | return &ggml_backend_cpu_buffer_type; |
2359 | 0 | } |
2360 | | |
2361 | 0 | ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) { |
2362 | 0 | GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned"); |
2363 | 0 | return ggml_backend_buffer_init(ggml_backend_cpu_buffer_from_ptr_type(), ggml_backend_cpu_buffer_from_ptr_i, ptr, size); |
2364 | 0 | } |