Coverage Report

Created: 2026-03-03 06:12

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/ggml/src/ggml-backend.cpp
Line
Count
Source
1
// Note: porting this file to C++ is a work in progress
2
3
#ifdef _WIN32
4
#define WIN32_LEAN_AND_MEAN
5
#ifndef NOMINMAX
6
#   define NOMINMAX
7
#endif
8
#include <windows.h>
9
#endif
10
11
#include "ggml-backend.h"
12
#include "ggml-backend-impl.h"
13
#include "ggml-alloc.h"
14
#include "ggml-impl.h"
15
16
#include <assert.h>
17
#include <limits.h>
18
#include <stdarg.h>
19
#include <stdio.h>
20
#include <stdlib.h>
21
#include <string.h>
22
#include <algorithm>
23
#include <vector>
24
25
#ifdef __APPLE__
26
#include <sys/types.h>
27
#include <sys/sysctl.h>
28
#endif
29
30
31
// backend buffer type
32
33
0
const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
34
0
    GGML_ASSERT(buft);
35
0
    return buft->iface.get_name(buft);
36
0
}
37
38
0
ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
39
0
    GGML_ASSERT(buft);
40
0
    if (size == 0) {
41
        // return a dummy buffer for zero-sized allocations
42
0
        return ggml_backend_buffer_init(buft, {}, NULL, 0);
43
0
    }
44
0
    return buft->iface.alloc_buffer(buft, size);
45
0
}
46
47
0
size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) {
48
0
    GGML_ASSERT(buft);
49
0
    return buft->iface.get_alignment(buft);
50
0
}
51
52
0
size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) {
53
0
    GGML_ASSERT(buft);
54
    // get_max_size is optional, defaults to SIZE_MAX
55
0
    if (buft->iface.get_max_size) {
56
0
        return buft->iface.get_max_size(buft);
57
0
    }
58
0
    return SIZE_MAX;
59
0
}
60
61
0
size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) {
62
0
    GGML_ASSERT(buft);
63
    // get_alloc_size is optional, defaults to ggml_nbytes
64
0
    if (buft->iface.get_alloc_size) {
65
0
        size_t size = buft->iface.get_alloc_size(buft, tensor);
66
0
        assert(size >= ggml_nbytes(tensor));
67
0
        return size;
68
0
    }
69
0
    return ggml_nbytes(tensor);
70
0
}
71
72
0
bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) {
73
0
    GGML_ASSERT(buft);
74
0
    if (buft->iface.is_host) {
75
0
        return buft->iface.is_host(buft);
76
0
    }
77
0
    return false;
78
0
}
79
80
0
ggml_backend_dev_t ggml_backend_buft_get_device(ggml_backend_buffer_type_t buft) {
81
0
    GGML_ASSERT(buft);
82
0
    return buft->device;
83
0
}
84
85
// backend buffer
86
87
ggml_backend_buffer_t ggml_backend_buffer_init(
88
               ggml_backend_buffer_type_t buft,
89
        struct ggml_backend_buffer_i      iface,
90
               void *                     context,
91
0
               size_t                     size) {
92
0
    ggml_backend_buffer_t buffer = new ggml_backend_buffer {
93
0
        /* .interface = */ iface,
94
0
        /* .buft      = */ buft,
95
0
        /* .context   = */ context,
96
0
        /* .size      = */ size,
97
0
        /* .usage     = */ GGML_BACKEND_BUFFER_USAGE_ANY
98
0
    };
99
100
0
    return buffer;
101
0
}
102
103
0
const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) {
104
0
    return ggml_backend_buft_name(ggml_backend_buffer_get_type(buffer));
105
0
}
106
107
0
void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
108
0
    if (buffer == NULL) {
109
0
        return;
110
0
    }
111
112
0
    if (buffer->iface.free_buffer != NULL) {
113
0
        buffer->iface.free_buffer(buffer);
114
0
    }
115
0
    delete buffer;
116
0
}
117
118
0
size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) {
119
0
    GGML_ASSERT(buffer);
120
0
    return buffer->size;
121
0
}
122
123
0
void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) {
124
0
    GGML_ASSERT(buffer);
125
    // get_base is optional if the buffer is zero-sized
126
0
    if (buffer->size == 0) {
127
0
        return NULL;
128
0
    }
129
130
    // FIXME JG: a multi_buffer has a non-zero size, according to the above comment get_base is not optional,
131
    //     I don't know whether the above comment is correct
132
0
    if (!buffer->iface.get_base) {
133
0
        return NULL;
134
0
    }
135
136
0
    void * base = buffer->iface.get_base(buffer);
137
138
0
    GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL");
139
140
0
    return base;
141
0
}
142
143
0
enum ggml_status ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
144
0
    GGML_ASSERT(buffer);
145
    // init_tensor is optional
146
0
    if (buffer->iface.init_tensor) {
147
0
        return buffer->iface.init_tensor(buffer, tensor);
148
0
    }
149
0
    return GGML_STATUS_SUCCESS;
150
0
}
151
152
0
void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
153
0
    GGML_ASSERT(buffer);
154
    // clear is optional if the buffer is zero-sized
155
0
    if (buffer->size == 0) {
156
0
        return;
157
0
    }
158
159
0
    buffer->iface.clear(buffer, value);
160
0
}
161
162
0
size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) {
163
0
    return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer));
164
0
}
165
166
0
size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) {
167
0
    return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer));
168
0
}
169
170
0
size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor) {
171
0
    return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor);
172
0
}
173
174
0
bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) {
175
0
    return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer));
176
0
}
177
178
0
void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
179
0
    GGML_ASSERT(buffer);
180
0
    buffer->usage = usage;
181
182
    // FIXME: add a generic callback to the buffer interface
183
0
    if (ggml_backend_buffer_is_multi_buffer(buffer)) {
184
0
        ggml_backend_multi_buffer_set_usage(buffer, usage);
185
0
    }
186
0
}
187
188
0
enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage(ggml_backend_buffer_t buffer) {
189
0
    GGML_ASSERT(buffer);
190
0
    return buffer->usage;
191
0
}
192
193
0
ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) {
194
0
    GGML_ASSERT(buffer);
195
0
    return buffer->buft;
196
0
}
197
198
0
void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) {
199
0
    GGML_ASSERT(buffer);
200
0
    if (buffer->iface.reset) {
201
0
        buffer->iface.reset(buffer);
202
0
    }
203
0
}
204
205
0
bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) {
206
0
    ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer;
207
0
    if (dst_buf->iface.cpy_tensor) {
208
0
        return dst_buf->iface.cpy_tensor(dst_buf, src, dst);
209
0
    }
210
0
    return false;
211
0
}
212
213
// backend
214
215
0
ggml_guid_t ggml_backend_guid(ggml_backend_t backend) {
216
0
    if (backend == NULL) {
217
0
        return NULL;
218
0
    }
219
0
    return backend->guid;
220
0
}
221
222
0
const char * ggml_backend_name(ggml_backend_t backend) {
223
0
    if (backend == NULL) {
224
0
        return "NULL";
225
0
    }
226
0
    return backend->iface.get_name(backend);
227
0
}
228
229
0
void ggml_backend_free(ggml_backend_t backend) {
230
0
    if (backend == NULL) {
231
0
        return;
232
0
    }
233
234
0
    backend->iface.free(backend);
235
0
}
236
237
0
ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend) {
238
0
    GGML_ASSERT(backend);
239
0
    return ggml_backend_dev_buffer_type(backend->device);
240
0
}
241
242
0
ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) {
243
0
    return ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), size);
244
0
}
245
246
0
size_t ggml_backend_get_alignment(ggml_backend_t backend) {
247
0
    return ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend));
248
0
}
249
250
0
size_t ggml_backend_get_max_size(ggml_backend_t backend) {
251
0
    return ggml_backend_buft_get_max_size(ggml_backend_get_default_buffer_type(backend));
252
0
}
253
254
0
void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
255
0
    GGML_ASSERT(backend);
256
0
    GGML_ASSERT(tensor);
257
0
    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
258
0
    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
259
260
0
    if (backend->iface.set_tensor_async == NULL) {
261
0
        ggml_backend_synchronize(backend);
262
0
        ggml_backend_tensor_set(tensor, data, offset, size);
263
0
    } else {
264
0
        backend->iface.set_tensor_async(backend, tensor, data, offset, size);
265
0
    }
266
0
}
267
268
0
void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
269
0
    GGML_ASSERT(backend);
270
0
    GGML_ASSERT(tensor);
271
0
    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
272
0
    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
273
274
0
    if (backend->iface.get_tensor_async == NULL) {
275
0
        ggml_backend_synchronize(backend);
276
0
        ggml_backend_tensor_get(tensor, data, offset, size);
277
0
    } else {
278
0
        backend->iface.get_tensor_async(backend, tensor, data, offset, size);
279
0
    }
280
0
}
281
282
0
void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
283
0
    GGML_ASSERT(tensor);
284
0
    ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
285
286
0
    if (size == 0) {
287
0
        return;
288
0
    }
289
290
0
    GGML_ASSERT(buf != NULL && "tensor buffer not set");
291
0
    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
292
0
    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
293
294
0
    buf->iface.set_tensor(buf, tensor, data, offset, size);
295
0
}
296
297
0
void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
298
0
    GGML_ASSERT(tensor);
299
0
    ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
300
301
0
    if (size == 0) {
302
0
        return;
303
0
    }
304
305
0
    GGML_ASSERT(buf != NULL && "tensor buffer not set");
306
0
    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
307
0
    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
308
309
0
    buf->iface.get_tensor(buf, tensor, data, offset, size);
310
0
}
311
312
0
void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
313
0
    GGML_ASSERT(tensor);
314
0
    ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
315
316
0
    if (size == 0) {
317
0
        return;
318
0
    }
319
320
0
    GGML_ASSERT(buf != NULL && "tensor buffer not set");
321
0
    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
322
0
    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
323
0
    GGML_ASSERT(buf->iface.memset_tensor != NULL && "memset not implemented by backend buffer");
324
325
0
    buf->iface.memset_tensor(buf, tensor, value, offset, size);
326
0
}
327
328
0
void ggml_backend_synchronize(ggml_backend_t backend) {
329
0
    GGML_ASSERT(backend);
330
0
    if (backend->iface.synchronize == NULL) {
331
0
        return;
332
0
    }
333
334
0
    backend->iface.synchronize(backend);
335
0
}
336
337
0
ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
338
0
    GGML_ASSERT(backend);
339
0
    GGML_ASSERT(backend->iface.graph_plan_create != NULL);
340
341
0
    return backend->iface.graph_plan_create(backend, cgraph);
342
0
}
343
344
0
void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
345
0
    GGML_ASSERT(backend);
346
0
    GGML_ASSERT(backend->iface.graph_plan_free != NULL);
347
348
0
    backend->iface.graph_plan_free(backend, plan);
349
0
}
350
351
0
enum ggml_status ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
352
0
    GGML_ASSERT(backend);
353
0
    GGML_ASSERT(backend->iface.graph_plan_compute != NULL);
354
355
0
    return backend->iface.graph_plan_compute(backend, plan);
356
0
}
357
358
0
enum ggml_status ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
359
0
    enum ggml_status err = ggml_backend_graph_compute_async(backend, cgraph);
360
0
    ggml_backend_synchronize(backend);
361
0
    return err;
362
0
}
363
364
0
enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
365
0
    GGML_ASSERT(backend);
366
0
    return backend->iface.graph_compute(backend, cgraph);
367
0
}
368
369
0
bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
370
0
    GGML_ASSERT(backend);
371
0
    return ggml_backend_dev_supports_op(backend->device, op);
372
0
}
373
374
0
bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
375
0
    GGML_ASSERT(backend);
376
0
    return ggml_backend_dev_supports_buft(backend->device, buft);
377
0
}
378
379
0
bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op) {
380
0
    GGML_ASSERT(backend);
381
0
    return ggml_backend_dev_offload_op(backend->device, op);
382
0
}
383
384
0
ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend) {
385
0
    GGML_ASSERT(backend);
386
0
    return backend->device;
387
0
}
388
389
// backend copy
390
391
0
void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) {
392
0
    GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
393
394
0
    if (src == dst) {
395
0
        return;
396
0
    }
397
398
0
    if (ggml_backend_buffer_is_host(src->buffer)) {
399
0
        ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
400
0
    } else if (ggml_backend_buffer_is_host(dst->buffer)) {
401
0
        ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
402
0
    } else if (!ggml_backend_buffer_copy_tensor(src, dst)) {
403
#ifndef NDEBUG
404
        GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer));
405
#endif
406
0
        size_t nbytes = ggml_nbytes(src);
407
0
        void * data = malloc(nbytes);
408
0
        ggml_backend_tensor_get(src, data, 0, nbytes);
409
0
        ggml_backend_tensor_set(dst, data, 0, nbytes);
410
0
        free(data);
411
0
    }
412
0
}
413
414
0
void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst) {
415
0
    GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
416
417
0
    if (src == dst) {
418
0
        return;
419
0
    }
420
421
0
    GGML_ASSERT(backend_dst);
422
0
    if (backend_dst->iface.cpy_tensor_async != NULL) {
423
0
        if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) {
424
0
            return;
425
0
        }
426
0
    }
427
428
    // an async copy would normally happen after all the queued operations on both backends are completed
429
    // to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy
430
0
    ggml_backend_synchronize(backend_src);
431
0
    ggml_backend_synchronize(backend_dst);
432
0
    ggml_backend_tensor_copy(src, dst);
433
0
}
434
435
// events
436
437
0
ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device) {
438
    // null device is allowed for the transition period to the device interface
439
0
    if (device == NULL || device->iface.event_new == NULL) {
440
0
        return NULL;
441
0
    }
442
0
    return device->iface.event_new(device);
443
0
}
444
445
0
void ggml_backend_event_free(ggml_backend_event_t event) {
446
0
    if (event == NULL) {
447
0
        return;
448
0
    }
449
0
    event->device->iface.event_free(event->device, event);
450
0
}
451
452
0
void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend) {
453
0
    GGML_ASSERT(backend);
454
0
    GGML_ASSERT(backend->iface.event_record != NULL);
455
456
0
    backend->iface.event_record(backend, event);
457
0
}
458
459
0
void ggml_backend_event_synchronize(ggml_backend_event_t event) {
460
0
    GGML_ASSERT(event);
461
0
    GGML_ASSERT(event->device->iface.event_synchronize);
462
463
0
    event->device->iface.event_synchronize(event->device, event);
464
0
}
465
466
0
void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event) {
467
0
    GGML_ASSERT(backend);
468
0
    GGML_ASSERT(backend->iface.event_wait != NULL);
469
470
0
    backend->iface.event_wait(backend, event);
471
0
}
472
473
0
static void ggml_backend_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
474
0
    GGML_ASSERT(backend);
475
0
    if (backend->iface.graph_optimize != NULL) {
476
0
        backend->iface.graph_optimize(backend, cgraph);
477
0
    }
478
0
}
479
480
// Backend device
481
482
0
const char * ggml_backend_dev_name(ggml_backend_dev_t device) {
483
0
    GGML_ASSERT(device);
484
0
    return device->iface.get_name(device);
485
0
}
486
487
0
const char * ggml_backend_dev_description(ggml_backend_dev_t device) {
488
0
    GGML_ASSERT(device);
489
0
    return device->iface.get_description(device);
490
0
}
491
492
0
void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total) {
493
0
    GGML_ASSERT(device);
494
0
    device->iface.get_memory(device, free, total);
495
0
}
496
497
8
enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) {
498
8
    GGML_ASSERT(device);
499
8
    return device->iface.get_type(device);
500
8
}
501
502
0
void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) {
503
0
    memset(props, 0, sizeof(*props));
504
0
    device->iface.get_props(device, props);
505
0
}
506
507
0
ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device) {
508
0
    GGML_ASSERT(device);
509
0
    return device->reg;
510
0
}
511
512
0
ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params) {
513
0
    GGML_ASSERT(device);
514
0
    return device->iface.init_backend(device, params);
515
0
}
516
517
0
ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device) {
518
0
    GGML_ASSERT(device);
519
0
    return device->iface.get_buffer_type(device);
520
0
}
521
522
0
ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) {
523
0
    GGML_ASSERT(device);
524
0
    if (device->iface.get_host_buffer_type == NULL) {
525
0
        return NULL;
526
0
    }
527
528
0
    return device->iface.get_host_buffer_type(device);
529
0
}
530
531
0
ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) {
532
0
    GGML_ASSERT(device);
533
0
    return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size);
534
0
}
535
536
0
bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op) {
537
0
    GGML_ASSERT(device);
538
0
    return device->iface.supports_op(device, op);
539
0
}
540
541
0
bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft) {
542
0
    GGML_ASSERT(device);
543
0
    return device->iface.supports_buft(device, buft);
544
0
}
545
546
0
bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op) {
547
0
    GGML_ASSERT(device);
548
0
    if (device->iface.offload_op != NULL) {
549
0
        return device->iface.offload_op(device, op);
550
0
    }
551
552
0
    return false;
553
0
}
554
555
// Backend (reg)
556
557
0
const char * ggml_backend_reg_name(ggml_backend_reg_t reg) {
558
0
    GGML_ASSERT(reg);
559
0
    return reg->iface.get_name(reg);
560
0
}
561
562
2
size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg) {
563
2
    GGML_ASSERT(reg);
564
2
    return reg->iface.get_device_count(reg);
565
2
}
566
567
1
ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index) {
568
1
    GGML_ASSERT(reg);
569
1
    return reg->iface.get_device(reg, index);
570
1
}
571
572
0
void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) {
573
0
    GGML_ASSERT(reg);
574
0
    if (!reg->iface.get_proc_address) {
575
0
        return NULL;
576
0
    }
577
0
    return reg->iface.get_proc_address(reg, name);
578
0
}
579
580
// multi-buffer buffer
581
582
struct ggml_backend_multi_buffer_context {
583
    ggml_backend_buffer_t * buffers;
584
    size_t n_buffers;
585
};
586
587
0
static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) {
588
0
    GGML_ASSERT(buffer);
589
0
    ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
590
0
    for (size_t i = 0; i < ctx->n_buffers; i++) {
591
0
        ggml_backend_buffer_free(ctx->buffers[i]);
592
0
    }
593
594
0
    free(ctx->buffers);
595
0
    free(ctx);
596
0
}
597
598
0
static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
599
0
    GGML_ASSERT(buffer);
600
0
    ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
601
0
    for (size_t i = 0; i < ctx->n_buffers; i++) {
602
0
        ggml_backend_buffer_clear(ctx->buffers[i], value);
603
0
    }
604
0
}
605
606
static const struct ggml_backend_buffer_i ggml_backend_multi_buffer_i = {
607
    /* .free_buffer     = */ ggml_backend_multi_buffer_free_buffer,
608
    /* .get_base        = */ NULL,
609
    /* .init_tensor     = */ NULL,
610
    /* .memset_tensor   = */ NULL,
611
    /* .set_tensor      = */ NULL,
612
    /* .get_tensor      = */ NULL,
613
    /* .cpy_tensor      = */ NULL,
614
    /* .clear           = */ ggml_backend_multi_buffer_clear,
615
    /* .reset           = */ NULL,
616
};
617
618
0
ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers) {
619
0
    ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) malloc(sizeof(struct ggml_backend_multi_buffer_context));
620
0
    ctx->n_buffers = n_buffers;
621
0
    ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t));
622
623
0
    GGML_ASSERT(ctx->buffers != NULL);
624
625
0
    size_t total_size = 0;
626
0
    for (size_t i = 0; i < n_buffers; i++) {
627
0
        ctx->buffers[i] = buffers[i];
628
0
        total_size += ggml_backend_buffer_get_size(buffers[i]);
629
0
    }
630
631
0
    return ggml_backend_buffer_init(buffers[0]->buft, ggml_backend_multi_buffer_i, ctx, total_size);
632
0
}
633
634
0
bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer) {
635
0
    GGML_ASSERT(buffer);
636
0
    return buffer->iface.free_buffer == ggml_backend_multi_buffer_free_buffer;
637
0
}
638
639
0
void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
640
0
    GGML_ASSERT(buffer);
641
0
    GGML_ASSERT(ggml_backend_buffer_is_multi_buffer(buffer));
642
0
    ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
643
0
    for (size_t i = 0; i < ctx->n_buffers; i++) {
644
0
        ggml_backend_buffer_set_usage(ctx->buffers[i], usage);
645
0
    }
646
0
}
647
648
// creates a copy of the tensor with the same memory layout
649
0
static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
650
0
    struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
651
0
    for (int i = 0; i < GGML_MAX_DIMS; i++) {
652
0
        dup->nb[i] = tensor->nb[i];
653
0
    }
654
0
    return dup;
655
0
}
656
657
0
static bool ggml_is_view_op(enum ggml_op op) {
658
0
    return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
659
0
}
660
661
// scheduler
662
663
#ifndef GGML_SCHED_MAX_BACKENDS
664
#define GGML_SCHED_MAX_BACKENDS 16
665
#endif
666
667
#ifndef GGML_SCHED_MAX_SPLIT_INPUTS
668
0
#define GGML_SCHED_MAX_SPLIT_INPUTS 30
669
#endif
670
671
#ifndef GGML_SCHED_MAX_COPIES
672
#define GGML_SCHED_MAX_COPIES 4
673
#endif
674
675
struct ggml_backend_sched_split {
676
    int backend_id;
677
    int i_start;
678
    int i_end;
679
    struct ggml_tensor * inputs[GGML_SCHED_MAX_SPLIT_INPUTS];
680
    int n_inputs;
681
    // graph view of this split
682
    struct ggml_cgraph graph;
683
};
684
685
struct ggml_backend_sched {
686
    bool is_reset; // true if the scheduler has been reset since the last graph split
687
    bool is_alloc;
688
689
    int n_backends;
690
691
    ggml_backend_t backends[GGML_SCHED_MAX_BACKENDS];
692
    ggml_backend_buffer_type_t bufts[GGML_SCHED_MAX_BACKENDS];
693
    ggml_gallocr_t galloc;
694
695
    // hash map of the nodes in the graph
696
    struct ggml_hash_set  hash_set;
697
    int                 * hv_tensor_backend_ids; // [hash_set.size]
698
    struct ggml_tensor ** hv_tensor_copies;      // [hash_set.size][n_backends][n_copies]
699
700
    int * node_backend_ids; // [graph_size]
701
    int * leaf_backend_ids; // [graph_size]
702
703
    int * prev_node_backend_ids; // [graph_size]
704
    int * prev_leaf_backend_ids; // [graph_size]
705
706
    // copy of the graph with modified inputs
707
    struct ggml_cgraph graph;
708
709
    // graph splits
710
    struct ggml_backend_sched_split * splits;
711
    int n_splits;
712
    int splits_capacity;
713
714
    // pipeline parallelism support
715
    int n_copies;
716
    int cur_copy;
717
    int next_copy;
718
    ggml_backend_event_t events[GGML_SCHED_MAX_BACKENDS][GGML_SCHED_MAX_COPIES];
719
    struct ggml_tensor * graph_inputs[GGML_SCHED_MAX_SPLIT_INPUTS];
720
    int n_graph_inputs;
721
722
    struct ggml_context * ctx;
723
724
    ggml_backend_sched_eval_callback callback_eval;
725
    void * callback_eval_user_data;
726
727
    char * context_buffer;
728
    size_t context_buffer_size;
729
730
    bool op_offload;
731
732
    int debug;
733
734
    // used for debugging graph reallocations [GGML_SCHED_DEBUG_REALLOC]
735
    // ref: https://github.com/ggml-org/llama.cpp/pull/17617
736
    int debug_realloc;
737
    int debug_graph_size;
738
    int debug_prev_graph_size;
739
};
740
741
0
#define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor)
742
0
#define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)]
743
0
#define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)]
744
0
#define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id)
745
746
// returns the priority of the backend, lower id is higher priority
747
0
static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backend_t backend) {
748
0
    for (int i = 0; i < sched->n_backends; i++) {
749
0
        if (sched->backends[i] == backend) {
750
0
            return i;
751
0
        }
752
0
    }
753
0
    return -1;
754
0
}
755
756
0
static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor, const struct ggml_tensor * op) {
757
0
    ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
758
0
    if (buffer == NULL) {
759
0
        return -1;
760
0
    }
761
762
    // find highest prio backend that supports the buffer type and the op
763
0
    for (int i = 0; i < sched->n_backends; i++) {
764
0
        if (ggml_backend_supports_buft(sched->backends[i], buffer->buft) &&
765
0
            ggml_backend_supports_op(sched->backends[i], op)) {
766
0
            return i;
767
0
        }
768
0
    }
769
770
#ifndef NDEBUG
771
    GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n",
772
        __func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name);
773
#endif
774
775
0
    return -1;
776
0
}
777
778
#if 0
779
#define GGML_SCHED_MAX_SPLITS_DEBUG 4096
780
static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS_DEBUG*GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only
781
#define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
782
#define GET_CAUSE(node) causes[hash_id(node)]
783
#else
784
#define SET_CAUSE(node, ...)
785
#define GET_CAUSE(node) ""
786
#endif
787
788
// returns the backend that should be used for the node based on the current locations
789
0
static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * tensor) {
790
    // assign pre-allocated nodes to their backend
791
0
    int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor, tensor);
792
0
    if (cur_backend_id != -1) {
793
0
        SET_CAUSE(tensor, "1.dst");
794
0
        return cur_backend_id;
795
0
    }
796
797
    // view_src
798
0
    if (tensor->view_src != NULL) {
799
0
        cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor);
800
0
        if (cur_backend_id != -1) {
801
0
            SET_CAUSE(tensor, "1.vsrc");
802
0
            return cur_backend_id;
803
0
        }
804
0
    }
805
806
0
    if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) {
807
        // since the tensor is pre-allocated, it cannot be moved to another backend
808
0
        ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
809
0
        GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, ggml_backend_buffer_name(buffer), ggml_op_name(tensor->op));
810
0
    }
811
812
    // graph input
813
0
    if (tensor->flags & GGML_TENSOR_FLAG_INPUT) {
814
0
        cur_backend_id = sched->n_backends - 1; // last backend (assumed CPU)
815
0
        SET_CAUSE(tensor, "1.inp");
816
0
        return cur_backend_id;
817
0
    }
818
819
    // operations with weights are preferably run on the same backend as the weights
820
0
    for (int i = 0; i < GGML_MAX_SRC; i++) {
821
0
        const struct ggml_tensor * src = tensor->src[i];
822
0
        if (src == NULL) {
823
0
            continue;
824
0
        }
825
        // skip ROPE since the rope freqs tensor is too small to choose a backend based on it
826
        // not an ideal solution
827
0
        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
828
0
            int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
829
            // check if a backend with higher prio wants to offload the op
830
0
            if (sched->op_offload && src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
831
0
                for (int b = 0; b < src_backend_id; b++) {
832
0
                    if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
833
0
                        SET_CAUSE(tensor, "1.off");
834
0
                        return b;
835
0
                    }
836
0
                }
837
0
            }
838
0
            SET_CAUSE(tensor, "1.wgt%d", i);
839
0
            return src_backend_id;
840
0
        }
841
0
    }
842
843
0
    return -1;
844
0
}
845
846
0
static char * fmt_size(size_t size) {
847
0
    static char buffer[128];
848
0
    if (size >= 1024*1024) {
849
0
        snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024);
850
0
    } else {
851
0
        snprintf(buffer, sizeof(buffer), "%zuK", size/1024);
852
0
    }
853
0
    return buffer;
854
0
}
855
856
0
static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
857
0
    int cur_split = 0;
858
0
    for (int i = 0; i < graph->n_nodes; i++) {
859
0
        if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
860
0
            ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id];
861
0
            GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs", cur_split, ggml_backend_name(split_backend),
862
0
                sched->splits[cur_split].n_inputs);
863
0
            for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
864
0
                if (j == 0) {
865
0
                    GGML_LOG_DEBUG(": ");
866
0
                }
867
0
                GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
868
0
                    fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
869
0
            }
870
0
            GGML_LOG_DEBUG("\n");
871
0
            cur_split++;
872
0
        }
873
0
        struct ggml_tensor * node = graph->nodes[i];
874
0
        if (ggml_is_view_op(node->op)) {
875
0
            continue;
876
0
        }
877
0
        if (sched->debug > 1) {
878
0
            ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node);
879
0
            GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s] use=%d,c=%d:", i, ggml_op_name(node->op), node->name,
880
0
                fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node),
881
0
                graph->use_counts[ggml_hash_find(&graph->visited_hash_set, node)], node->flags & GGML_TENSOR_FLAG_COMPUTE ? 1 : 0);
882
0
            for (int j = 0; j < GGML_MAX_SRC; j++) {
883
0
                struct ggml_tensor * src = node->src[j];
884
0
                if (src == NULL) {
885
0
                    continue;
886
0
                }
887
0
                ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend(sched, src);
888
0
                GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
889
0
                    fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
890
0
            }
891
0
            GGML_LOG_DEBUG("\n");
892
0
        }
893
0
    }
894
0
}
895
896
0
static bool ggml_backend_sched_buffer_supported(ggml_backend_sched_t sched, struct ggml_tensor * t, int backend_id) {
897
0
    ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer;
898
0
    ggml_backend_buffer_type_t buft = NULL;
899
900
0
    if (buf) {
901
        // the tensor is already allocated
902
0
        buft = buf->buft;
903
0
    } else {
904
        // see if the tensor already has a backend assigned, and use the buffer type of that backend
905
0
        int tensor_backend_id = tensor_backend_id(t);
906
0
        if (tensor_backend_id == -1 && t->view_src) {
907
0
            tensor_backend_id = tensor_backend_id(t->view_src);
908
0
        }
909
0
        if (tensor_backend_id != -1) {
910
0
            buft = sched->bufts[tensor_backend_id];
911
0
        }
912
0
    }
913
914
0
    return buft != NULL && ggml_backend_supports_buft(sched->backends[backend_id], buft);
915
0
}
916
917
0
static void ggml_backend_sched_set_if_supported(ggml_backend_sched_t sched, struct ggml_tensor * node, int cur_backend_id, int * node_backend_id) {
918
0
    if (ggml_backend_supports_op(sched->backends[cur_backend_id], node)) {
919
0
        *node_backend_id = cur_backend_id;
920
0
        SET_CAUSE(node, "2.sup");
921
0
    }
922
0
}
923
924
// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
925
0
void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
926
    // reset splits
927
0
    sched->n_splits = 0;
928
0
    sched->n_graph_inputs = 0;
929
0
    sched->is_reset = false;
930
931
0
    struct ggml_init_params params = {
932
0
        /* .mem_size =   */ sched->context_buffer_size,
933
0
        /* .mem_buffer = */ sched->context_buffer,
934
0
        /* .no_alloc =   */ true
935
0
    };
936
937
0
    ggml_free(sched->ctx);
938
939
0
    sched->ctx = ggml_init(params);
940
0
    if (sched->ctx == NULL) {
941
0
        GGML_ABORT("%s: failed to initialize context\n", __func__);
942
0
    }
943
944
    // pass 1: assign backends to ops with pre-allocated inputs
945
0
    for (int i = 0; i < graph->n_leafs; i++) {
946
0
        struct ggml_tensor * leaf = graph->leafs[i];
947
0
        int * leaf_backend_id = &tensor_backend_id(leaf);
948
        // do not overwrite user assignments
949
0
        if (*leaf_backend_id == -1) {
950
0
            *leaf_backend_id = ggml_backend_sched_backend_id_from_cur(sched, leaf);
951
0
        }
952
0
    }
953
954
0
    for (int i = 0; i < graph->n_nodes; i++) {
955
0
        struct ggml_tensor * node = graph->nodes[i];
956
0
        int * node_backend_id = &tensor_backend_id(node);
957
        // do not overwrite user assignments
958
0
        if (*node_backend_id == -1) {
959
0
            *node_backend_id = ggml_backend_sched_backend_id_from_cur(sched, node);
960
961
#if 0
962
            // src
963
            if (node->op == GGML_OP_NONE) {
964
                continue;
965
            }
966
967
            for (int j = 0; j < GGML_MAX_SRC; j++) {
968
                struct ggml_tensor * src = node->src[j];
969
                if (src == NULL) {
970
                    continue;
971
                }
972
                int * src_backend_id = &tensor_backend_id(src);
973
                if (*src_backend_id == -1) {
974
                    *src_backend_id = ggml_backend_sched_backend_id_from_cur(sched, src);
975
                }
976
            }
977
#endif
978
0
        }
979
0
    }
980
981
    // pass 2: expand current backend assignments
982
    // assign the same backend to adjacent nodes
983
    // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend)
984
    // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
985
    // ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known
986
    // expand gpu down
987
0
    {
988
0
        int cur_backend_id = -1;
989
0
        for (int i = 0; i < graph->n_nodes; i++) {
990
0
            struct ggml_tensor * node = graph->nodes[i];
991
0
            if (ggml_is_view_op(node->op)) {
992
0
                continue;
993
0
            }
994
0
            int * node_backend_id = &tensor_backend_id(node);
995
0
            if (*node_backend_id != -1) {
996
0
                if (*node_backend_id == sched->n_backends - 1) {
997
                    // skip cpu (lowest prio backend)
998
0
                    cur_backend_id = -1;
999
0
                } else {
1000
0
                    cur_backend_id = *node_backend_id;
1001
0
                }
1002
0
            } else if (cur_backend_id != -1) {
1003
0
                ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
1004
0
            }
1005
0
        }
1006
0
    }
1007
    // expand gpu up
1008
0
    {
1009
0
        int cur_backend_id = -1;
1010
0
        for (int i = graph->n_nodes - 1; i >= 0; i--) {
1011
0
            struct ggml_tensor * node = graph->nodes[i];
1012
0
            if (ggml_is_view_op(node->op)) {
1013
0
                continue;
1014
0
            }
1015
0
            int * node_backend_id = &tensor_backend_id(node);
1016
0
            if (*node_backend_id != -1) {
1017
0
                if (*node_backend_id == sched->n_backends - 1) {
1018
                    // skip cpu (lowest prio backend)
1019
0
                    cur_backend_id = -1;
1020
0
                } else {
1021
0
                    cur_backend_id = *node_backend_id;
1022
0
                }
1023
0
            } else if (cur_backend_id != -1) {
1024
0
                ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
1025
0
            }
1026
0
        }
1027
0
    }
1028
    // expand rest down
1029
0
    {
1030
0
        int cur_backend_id = -1;
1031
0
        for (int i = 0; i < graph->n_nodes; i++) {
1032
0
            struct ggml_tensor * node = graph->nodes[i];
1033
0
            if (ggml_is_view_op(node->op)) {
1034
0
                continue;
1035
0
            }
1036
0
            int * node_backend_id = &tensor_backend_id(node);
1037
0
            if (*node_backend_id != -1) {
1038
0
                cur_backend_id = *node_backend_id;
1039
0
            } else if (cur_backend_id != -1) {
1040
0
                ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
1041
0
            }
1042
0
        }
1043
0
    }
1044
    // expand rest up
1045
0
    {
1046
0
        int cur_backend_id = -1;
1047
0
        for (int i = graph->n_nodes - 1; i >= 0; i--) {
1048
0
            struct ggml_tensor * node = graph->nodes[i];
1049
0
            if (ggml_is_view_op(node->op)) {
1050
0
                continue;
1051
0
            }
1052
0
            int * node_backend_id = &tensor_backend_id(node);
1053
0
            if (*node_backend_id != -1) {
1054
0
                cur_backend_id = *node_backend_id;
1055
0
            } else if (cur_backend_id != -1) {
1056
0
                ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
1057
0
            }
1058
0
        }
1059
0
    }
1060
1061
    // pass 3: upgrade nodes to higher prio backends with compatible buffer types
1062
    // if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there
1063
    // however, we also need to verify that the sources are in compatible buffer types
1064
    // (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph
1065
    // however, this is slow to verify, so we have a more strict requirement that the buffer type is the same
1066
    // this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU)
1067
    // additionally, set remaining unassigned nodes to the backend with the most supported inputs
1068
    // only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point
1069
0
    for (int i = 0; i < graph->n_nodes; i++) {
1070
0
        struct ggml_tensor * node = graph->nodes[i];
1071
0
        if (ggml_is_view_op(node->op)) {
1072
0
            continue;
1073
0
        }
1074
0
        int * node_backend_id = &tensor_backend_id(node);
1075
0
        if (*node_backend_id == -1) {
1076
            // unassigned node: find the backend with the most supported inputs
1077
0
            int n_supported_best = -1;
1078
0
            for (int b = 0; b < sched->n_backends; b++) {
1079
0
                if (ggml_backend_supports_op(sched->backends[b], node)) {
1080
0
                    int n_supported = 0;
1081
0
                    for (int j = 0; j < GGML_MAX_SRC; j++) {
1082
0
                        struct ggml_tensor * src = node->src[j];
1083
0
                        if (src == NULL) {
1084
0
                            continue;
1085
0
                        }
1086
0
                        if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && ggml_backend_sched_buffer_supported(sched, src, b)) {
1087
0
                            n_supported++;
1088
0
                        }
1089
0
                    }
1090
0
                    if (n_supported > n_supported_best) {
1091
0
                        n_supported_best = n_supported;
1092
0
                        *node_backend_id = b;
1093
0
                        SET_CAUSE(node, "3.best");
1094
0
                    }
1095
0
                }
1096
0
            }
1097
0
        } else {
1098
            // assigned node: upgrade to higher prio backend if possible
1099
0
            for (int b = 0; b < *node_backend_id; b++) {
1100
0
                if (sched->bufts[b] == sched->bufts[*node_backend_id] && ggml_backend_supports_op(sched->backends[b], node)) {
1101
0
                    bool supported = true;
1102
0
                    for (int j = 0; j < GGML_MAX_SRC; j++) {
1103
0
                        struct ggml_tensor * src = node->src[j];
1104
0
                        if (src == NULL) {
1105
0
                            continue;
1106
0
                        }
1107
0
                        if (!ggml_backend_sched_buffer_supported(sched, src, b)) {
1108
0
                            supported = false;
1109
0
                            break;
1110
0
                        }
1111
0
                    }
1112
0
                    if (supported) {
1113
0
                        *node_backend_id = b;
1114
0
                        SET_CAUSE(node, "3.upg");
1115
0
                        break;
1116
0
                    }
1117
0
                }
1118
0
            }
1119
0
        }
1120
0
    }
1121
1122
    // pass 4: assign backends to remaining src from dst and view_src
1123
0
    for (int i = 0; i < graph->n_nodes; i++) {
1124
0
        struct ggml_tensor * node = graph->nodes[i];
1125
0
        int * cur_backend_id = &tensor_backend_id(node);
1126
0
        if (node->view_src != NULL && *cur_backend_id == -1) {
1127
0
            *cur_backend_id = tensor_backend_id(node->view_src);
1128
0
            SET_CAUSE(node, "4.vsrc");
1129
0
        }
1130
0
        for (int j = 0; j < GGML_MAX_SRC; j++) {
1131
0
            struct ggml_tensor * src = node->src[j];
1132
0
            if (src == NULL) {
1133
0
                continue;
1134
0
            }
1135
0
            int * src_backend_id = &tensor_backend_id(src);
1136
0
            if (*src_backend_id == -1) {
1137
0
                if (src->view_src != NULL) {
1138
                    // views are always on the same backend as the source
1139
0
                    *src_backend_id = tensor_backend_id(src->view_src);
1140
0
                    SET_CAUSE(src, "4.vsrc");
1141
0
                } else {
1142
0
                    *src_backend_id = *cur_backend_id;
1143
0
                    SET_CAUSE(src, "4.cur");
1144
0
                }
1145
0
            }
1146
0
        }
1147
        // if the node is still unassigned, assign it to the first backend that supports it
1148
0
        for (int b = 0; b < sched->n_backends && *cur_backend_id == -1; b++) {
1149
0
            ggml_backend_sched_set_if_supported(sched, node, b, cur_backend_id);
1150
0
        }
1151
0
        GGML_ASSERT(*cur_backend_id != -1);
1152
0
    }
1153
1154
    // pass 5: split graph, find tensors that need to be copied
1155
0
    {
1156
0
        int i_split = 0;
1157
0
        struct ggml_backend_sched_split * split = &sched->splits[0];
1158
        // find the backend of the first split, skipping view ops
1159
0
        int i = 0;
1160
0
        for (; i < graph->n_nodes; i++) {
1161
0
            struct ggml_tensor * node = graph->nodes[i];
1162
0
            if (!ggml_is_view_op(node->op)) {
1163
0
                split->backend_id = tensor_backend_id(node);
1164
0
                break;
1165
0
            }
1166
0
        }
1167
0
        split->i_start = 0;
1168
0
        split->n_inputs = 0;
1169
0
        int cur_backend_id = split->backend_id;
1170
0
        for (; i < graph->n_nodes; i++) {
1171
0
            struct ggml_tensor * node = graph->nodes[i];
1172
1173
0
            if (ggml_is_view_op(node->op)) {
1174
0
                continue;
1175
0
            }
1176
1177
0
            const int node_backend_id = tensor_backend_id(node);
1178
1179
0
            GGML_ASSERT(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback
1180
1181
            // check if we should start a new split based on the sources of the current node
1182
0
            bool need_new_split = false;
1183
0
            if (node_backend_id == cur_backend_id && split->n_inputs > 0) {
1184
0
                for (int j = 0; j < GGML_MAX_SRC; j++) {
1185
0
                    struct ggml_tensor * src = node->src[j];
1186
0
                    if (src == NULL) {
1187
0
                        continue;
1188
0
                    }
1189
                    // check if a weight is on a different and incompatible backend
1190
                    // by starting a new split, the memory of the previously offloaded weights can be reused
1191
0
                    if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
1192
0
                        int src_backend_id = tensor_backend_id(src);
1193
0
                        if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
1194
0
                            need_new_split = true;
1195
0
                            break;
1196
0
                        }
1197
0
                    }
1198
                    // check if the split has too many inputs
1199
                    // FIXME: count the number of inputs instead of only checking when full
1200
0
                    if (split->n_inputs == GGML_SCHED_MAX_SPLIT_INPUTS) {
1201
0
                        const size_t id = hash_id(src);
1202
0
                        int src_backend_id = sched->hv_tensor_backend_ids[id];
1203
0
                        bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id);
1204
0
                        if (src_backend_id != cur_backend_id && tensor_id_copy(id, cur_backend_id, 0) == NULL && !supported) {
1205
0
                            need_new_split = true;
1206
0
                            break;
1207
0
                        }
1208
0
                    }
1209
0
                }
1210
0
            }
1211
1212
0
            if (node_backend_id != cur_backend_id || need_new_split) {
1213
0
                split->i_end = i;
1214
0
                i_split++;
1215
0
                if (i_split >= sched->splits_capacity) {
1216
0
                    sched->splits_capacity *= 2;
1217
0
                    sched->splits = (ggml_backend_sched_split *)
1218
0
                        realloc(sched->splits, sched->splits_capacity * sizeof(struct ggml_backend_sched_split));
1219
0
                    GGML_ASSERT(sched->splits != NULL);
1220
0
                }
1221
0
                split = &sched->splits[i_split];
1222
0
                split->backend_id = node_backend_id;
1223
0
                split->i_start = i;
1224
0
                split->n_inputs = 0;
1225
0
                cur_backend_id = node_backend_id;
1226
0
            }
1227
1228
            // find inputs that are not on the same backend
1229
0
            for (int j = 0; j < GGML_MAX_SRC; j++) {
1230
0
                struct ggml_tensor * src = node->src[j];
1231
0
                if (src == NULL) {
1232
0
                    continue;
1233
0
                }
1234
1235
0
                size_t src_id = hash_id(src);
1236
0
                const int src_backend_id = sched->hv_tensor_backend_ids[src_id];
1237
0
                GGML_ASSERT(src_backend_id != -1); // all inputs should be assigned by now
1238
1239
0
                if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) {
1240
0
                    if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) {
1241
0
                        ggml_backend_t backend = sched->backends[src_backend_id];
1242
0
                        for (int c = 0; c < sched->n_copies; c++) {
1243
0
                            struct ggml_tensor * tensor_copy;
1244
0
                            if (c == sched->cur_copy) {
1245
0
                                tensor_copy = src; // use the original tensor as the current copy
1246
0
                            } else {
1247
0
                                tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
1248
0
                                ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c);
1249
0
                            }
1250
0
                            ggml_set_input(tensor_copy);
1251
0
                            ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
1252
0
                            tensor_id_copy(src_id, src_backend_id, c) = tensor_copy;
1253
0
                            SET_CAUSE(tensor_copy, "4.cpy");
1254
0
                        }
1255
0
                        int n_graph_inputs = sched->n_graph_inputs++;
1256
0
                        GGML_ASSERT(n_graph_inputs < GGML_SCHED_MAX_SPLIT_INPUTS);
1257
0
                        sched->graph_inputs[n_graph_inputs] = src;
1258
0
                    }
1259
0
                }
1260
1261
0
                if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
1262
                    // create a copy of the input in the split's backend
1263
0
                    if (tensor_id_copy(src_id, cur_backend_id, 0) == NULL) {
1264
0
                        ggml_backend_t backend = sched->backends[cur_backend_id];
1265
0
                        for (int c = 0; c < sched->n_copies; c++) {
1266
0
                            struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
1267
0
                            ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c);
1268
0
                            if (sched->n_copies > 1) {
1269
0
                                ggml_set_input(tensor_copy);
1270
0
                                ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
1271
0
                            }
1272
0
                            tensor_id_copy(src_id, cur_backend_id, c) = tensor_copy;
1273
0
                            SET_CAUSE(tensor_copy, "4.cpy");
1274
0
                        }
1275
0
                        int n_inputs = split->n_inputs++;
1276
0
                        GGML_ASSERT(n_inputs < GGML_SCHED_MAX_SPLIT_INPUTS);
1277
0
                        split->inputs[n_inputs] = src;
1278
0
                    }
1279
0
                    node->src[j] = tensor_id_copy(src_id, cur_backend_id, sched->cur_copy);
1280
0
                }
1281
0
            }
1282
0
        }
1283
0
        split->i_end = graph->n_nodes;
1284
0
        sched->n_splits = i_split + 1;
1285
0
    }
1286
1287
0
    if (sched->debug) {
1288
0
        ggml_backend_sched_print_assignments(sched, graph);
1289
0
    }
1290
1291
    // swap node_backend_ids and leaf _backend_ids with prevs
1292
0
    {
1293
0
        int * tmp = sched->node_backend_ids;
1294
0
        sched->node_backend_ids = sched->prev_node_backend_ids;
1295
0
        sched->prev_node_backend_ids = tmp;
1296
1297
0
        tmp = sched->leaf_backend_ids;
1298
0
        sched->leaf_backend_ids = sched->prev_leaf_backend_ids;
1299
0
        sched->prev_leaf_backend_ids = tmp;
1300
0
    }
1301
1302
0
    int graph_size = std::max(graph->n_nodes, graph->n_leafs) + sched->n_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sched->n_copies;
1303
1304
    // remember the actual graph_size for performing reallocation checks later [GGML_SCHED_DEBUG_REALLOC]
1305
0
    sched->debug_prev_graph_size = sched->debug_graph_size;
1306
0
    sched->debug_graph_size = graph_size;
1307
1308
0
    if (sched->graph.size < graph_size) {
1309
0
        sched->graph.size = graph_size;
1310
0
        sched->graph.nodes = (ggml_tensor **) realloc(sched->graph.nodes, graph_size * sizeof(struct ggml_tensor *));
1311
0
        sched->graph.leafs = (ggml_tensor **) realloc(sched->graph.leafs, graph_size * sizeof(struct ggml_tensor *));
1312
0
        GGML_ASSERT(sched->graph.nodes != NULL);
1313
0
        GGML_ASSERT(sched->graph.leafs != NULL);
1314
0
    }
1315
0
    sched->graph.n_nodes = 0;
1316
0
    sched->graph.n_leafs = 0;
1317
1318
0
    struct ggml_cgraph * graph_copy = &sched->graph;
1319
1320
0
    for (int i = 0; i < sched->n_splits; i++) {
1321
0
        struct ggml_backend_sched_split * split = &sched->splits[i];
1322
0
        split->graph = ggml_graph_view(graph, split->i_start, split->i_end);
1323
1324
        // Optimize this split of the graph. This needs to happen before we make graph_copy,
1325
        // so they are in sync.
1326
0
        ggml_backend_graph_optimize(sched->backends[split->backend_id], &split->graph);
1327
1328
        // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
1329
0
        for (int j = 0; j < split->n_inputs; j++) {
1330
0
            assert(graph_copy->size > (graph_copy->n_nodes + 1));
1331
1332
0
            struct ggml_tensor * input = split->inputs[j];
1333
0
            const size_t input_id = hash_id(input);
1334
0
            struct ggml_tensor * input_cpy = tensor_id_copy(input_id, split->backend_id, sched->cur_copy);
1335
1336
            // add a dependency to the input source so that it is not freed before the copy is done
1337
0
            struct ggml_tensor * input_dep = ggml_view_tensor(sched->ctx, input);
1338
0
            input_dep->src[0] = input;
1339
0
            sched->node_backend_ids[graph_copy->n_nodes] = sched->hv_tensor_backend_ids[input_id];
1340
0
            graph_copy->nodes[graph_copy->n_nodes++] = input_dep;
1341
1342
            // add a dependency to the input copy so that it is allocated at the start of the split
1343
0
            sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id;
1344
0
            graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
1345
0
        }
1346
1347
0
        for (int j = split->i_start; j < split->i_end; j++) {
1348
0
            assert(graph_copy->size > graph_copy->n_nodes);
1349
0
            sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]);
1350
0
            graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
1351
0
        }
1352
0
    }
1353
1354
0
    if (sched->n_copies > 1) {
1355
        // add input copies as leafs so that they are allocated first
1356
0
        for (int i = 0; i < sched->n_graph_inputs; i++) {
1357
0
            struct ggml_tensor * input = sched->graph_inputs[i];
1358
0
            size_t id = hash_id(input);
1359
0
            int backend_id = tensor_backend_id(input);
1360
0
            for (int c = 0; c < sched->n_copies; c++) {
1361
0
                struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
1362
0
                sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
1363
0
                assert(graph_copy->size > graph_copy->n_leafs);
1364
0
                graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
1365
0
            }
1366
0
        }
1367
1368
0
        for (int i = 0; i < sched->n_splits; i++) {
1369
0
            struct ggml_backend_sched_split * split = &sched->splits[i];
1370
0
            int backend_id = split->backend_id;
1371
0
            for (int j = 0; j < split->n_inputs; j++) {
1372
0
                struct ggml_tensor * input = split->inputs[j];
1373
0
                size_t id = hash_id(input);
1374
0
                for (int c = 0; c < sched->n_copies; c++) {
1375
0
                    struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
1376
0
                    sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
1377
0
                    assert(graph_copy->size > graph_copy->n_leafs);
1378
0
                    graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
1379
0
                }
1380
0
            }
1381
0
        }
1382
0
    }
1383
1384
    // add leafs from the original graph
1385
0
    for (int i = 0; i < graph->n_leafs; i++) {
1386
0
        struct ggml_tensor * leaf = graph->leafs[i];
1387
0
        sched->leaf_backend_ids[graph_copy->n_leafs] = tensor_backend_id(leaf);
1388
0
        assert(graph_copy->size > graph_copy->n_leafs);
1389
0
        graph_copy->leafs[graph_copy->n_leafs++] = leaf;
1390
0
    }
1391
0
}
1392
1393
0
static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
1394
0
    bool backend_ids_changed = false;
1395
0
    for (int i = 0; i < sched->graph.n_nodes; i++) {
1396
0
        if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] &&
1397
0
            sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) {
1398
0
            backend_ids_changed = true;
1399
0
            break;
1400
0
        }
1401
0
    }
1402
0
    if (!backend_ids_changed) {
1403
0
        for (int i = 0; i < sched->graph.n_leafs; i++) {
1404
0
            if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] &&
1405
0
                sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) {
1406
0
                backend_ids_changed = true;
1407
0
                break;
1408
0
            }
1409
0
        }
1410
0
    }
1411
1412
    // allocate graph
1413
0
    if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
1414
#ifndef NDEBUG
1415
        GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed);
1416
#endif
1417
1418
0
        if (sched->debug_realloc > 0) {
1419
            // we are interested only in situations where the graph was reallocated even though its size remained the same [GGML_SCHED_DEBUG_REALLOC]
1420
            // example: https://github.com/ggml-org/llama.cpp/pull/17143
1421
0
            const bool unexpected = !backend_ids_changed && sched->debug_prev_graph_size == sched->debug_graph_size;
1422
1423
0
            if (unexpected || sched->debug_realloc > 1) {
1424
0
                GGML_ABORT("%s: unexpected graph reallocation (graph size = %d, nodes = %d, leafs = %d), debug_realloc = %d\n", __func__,
1425
0
                        sched->debug_graph_size, sched->graph.n_nodes, sched->graph.n_leafs, sched->debug_realloc);
1426
0
            }
1427
0
        }
1428
1429
        // the re-allocation may cause the split inputs to be moved to a different address
1430
        // synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy
1431
0
        for (int i = 0; i < sched->n_backends; i++) {
1432
0
            ggml_backend_synchronize(sched->backends[i]);
1433
0
        }
1434
1435
0
        ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids);
1436
0
        if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
1437
0
            GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__);
1438
0
            return false;
1439
0
        }
1440
0
    }
1441
1442
0
    return true;
1443
0
}
1444
1445
0
static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) {
1446
0
    GGML_ASSERT(sched);
1447
0
    struct ggml_backend_sched_split * splits = sched->splits;
1448
1449
0
    ggml_tensor * prev_ids_tensor = nullptr;
1450
0
    std::vector<int32_t> ids;
1451
0
    std::vector<ggml_bitset_t> used_ids;
1452
1453
0
    for (int split_id = 0; split_id < sched->n_splits; split_id++) {
1454
0
        struct ggml_backend_sched_split * split = &splits[split_id];
1455
0
        int split_backend_id = split->backend_id;
1456
0
        ggml_backend_t split_backend = sched->backends[split_backend_id];
1457
1458
        // copy the input tensors to the split backend
1459
0
        for (int input_id = 0; input_id < split->n_inputs; input_id++) {
1460
0
            ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]);
1461
0
            struct ggml_tensor * input = split->inputs[input_id];
1462
0
            struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy);
1463
1464
0
            if (input->flags & GGML_TENSOR_FLAG_INPUT) {
1465
                // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
1466
0
                if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1467
0
                    ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1468
0
                } else {
1469
0
                    ggml_backend_synchronize(split_backend);
1470
0
                }
1471
0
                ggml_backend_tensor_copy(input, input_cpy);
1472
0
            } else {
1473
                // wait for the split backend to finish using the input before overwriting it
1474
0
                if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1475
0
                    ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]);
1476
0
                } else {
1477
0
                    ggml_backend_synchronize(split_backend);
1478
0
                }
1479
1480
                // when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used
1481
0
                ggml_tensor * node = split->graph.nodes[0];
1482
0
                if (split->graph.n_nodes > 0 &&
1483
0
                    ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS &&
1484
0
                    ggml_backend_buffer_is_host(input->buffer) && (
1485
0
                    (node->src[0] == input_cpy && node->op == GGML_OP_MUL_MAT_ID)
1486
                    //|| (node->src[1] == input_cpy && node->op == GGML_OP_ADD_ID) /* GGML_OP_ADD_ID weights are small and not worth splitting */
1487
0
                    )) {
1488
1489
0
                    const int64_t n_expert   = node->op == GGML_OP_MUL_MAT_ID ? input->ne[2] : input->ne[1];
1490
0
                    const size_t expert_size = node->op == GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1];
1491
1492
0
                    ggml_backend_synchronize(input_backend);
1493
1494
                    // get the ids
1495
0
                    ggml_tensor * ids_tensor = node->src[2];
1496
0
                    ggml_backend_t ids_backend = split_backend;
1497
1498
                    // if the ids tensor is also an input of the split, it may not have been copied yet to the split backend
1499
                    // in that case, we use the original ids tensor
1500
0
                    for (int i = input_id + 1; i < split->n_inputs; i++) {
1501
0
                        if (ids_tensor == tensor_copy(split->inputs[i], split_backend_id, sched->cur_copy)) {
1502
0
                            ids_tensor = split->inputs[i];
1503
0
                            ids_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[i]);
1504
0
                            break;
1505
0
                        }
1506
0
                    }
1507
1508
0
                    if (ids_tensor != prev_ids_tensor) {
1509
0
                        ids.resize(ggml_nbytes(ids_tensor) / sizeof(int32_t));
1510
0
                        ggml_backend_tensor_get_async(ids_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor));
1511
0
                        ggml_backend_synchronize(ids_backend);
1512
1513
                        // find the used experts
1514
0
                        used_ids.clear();
1515
0
                        used_ids.resize(ggml_bitset_size(n_expert));
1516
0
                        for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) {
1517
0
                            for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) {
1518
0
                                int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)];
1519
0
                                GGML_ASSERT(id >= 0 && id < n_expert);
1520
0
                                ggml_bitset_set(used_ids.data(), id);
1521
0
                            }
1522
0
                        }
1523
1524
0
                        prev_ids_tensor = ids_tensor;
1525
0
                    }
1526
1527
                    // group consecutive experts and copy them together
1528
0
                    auto copy_experts = [&](int32_t first_id, int32_t last_id) {
1529
0
                        const size_t expert_offset = first_id * expert_size;
1530
0
                        const size_t expert_size_copy =  (last_id - first_id + 1) * expert_size;
1531
0
                        const size_t padding = std::min<size_t>(expert_size, 512);
1532
0
                        const size_t padding_end = last_id < n_expert - 1 ? padding : 0;
1533
1534
0
                        ggml_backend_tensor_set_async(split_backend,
1535
0
                            input_cpy,
1536
0
                            (const uint8_t *)input->data + expert_offset, expert_offset,
1537
                            // copy a bit extra at the to ensure there are no NaNs in the padding of the last expert
1538
                            // this is necessary for MMQ in the CUDA backend
1539
0
                            expert_size_copy + padding_end);
1540
0
                    };
1541
1542
0
                    int id = 0;
1543
0
                    while (!ggml_bitset_get(used_ids.data(), id)) {
1544
0
                        id++;
1545
0
                    }
1546
0
                    int32_t first_id = id;
1547
0
                    int32_t last_id = first_id;
1548
1549
0
                    for (++id; id < n_expert; ++id) {
1550
0
                        if (!ggml_bitset_get(used_ids.data(), id)) {
1551
0
                            continue;
1552
0
                        }
1553
1554
0
                        if (id == last_id + 1) {
1555
0
                            last_id = id;
1556
0
                            continue;
1557
0
                        }
1558
1559
0
                        copy_experts(first_id, last_id);
1560
1561
0
                        first_id = id;
1562
0
                        last_id = id;
1563
0
                    }
1564
0
                    copy_experts(first_id, last_id);
1565
0
                } else {
1566
                    // try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
1567
                    // TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
1568
0
                    if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
1569
0
                        ggml_backend_synchronize(input_backend);
1570
0
                        if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1571
0
                            ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1572
0
                        } else {
1573
0
                            ggml_backend_synchronize(split_backend);
1574
0
                        }
1575
0
                        ggml_backend_tensor_copy(input, input_cpy);
1576
0
                    }
1577
0
                }
1578
0
            }
1579
0
        }
1580
1581
0
        if (!sched->callback_eval) {
1582
0
            enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph);
1583
0
            if (ec != GGML_STATUS_SUCCESS) {
1584
0
                return ec;
1585
0
            }
1586
0
        } else {
1587
            // similar to ggml_backend_compare_graph_backend
1588
0
            for (int j0 = 0; j0 < split->graph.n_nodes; j0++) {
1589
0
                struct ggml_tensor * t = split->graph.nodes[j0];
1590
1591
                // check if the user needs data from this node
1592
0
                bool need = sched->callback_eval(t, true, sched->callback_eval_user_data);
1593
1594
0
                int j1 = j0;
1595
1596
                // determine the range [j0, j1] of nodes that can be computed together
1597
0
                while (!need && j1 < split->graph.n_nodes - 1) {
1598
0
                    t = split->graph.nodes[++j1];
1599
0
                    need = sched->callback_eval(t, true, sched->callback_eval_user_data);
1600
0
                }
1601
1602
0
                struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1);
1603
1604
0
                enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &gv);
1605
0
                if (ec != GGML_STATUS_SUCCESS) {
1606
0
                    return ec;
1607
0
                }
1608
1609
                // TODO: pass backend to the callback, then the user can decide if they want to synchronize
1610
0
                ggml_backend_synchronize(split_backend);
1611
1612
0
                if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) {
1613
0
                    break;
1614
0
                }
1615
1616
0
                j0 = j1;
1617
0
            }
1618
0
        }
1619
1620
        // record the event of this copy
1621
0
        if (split->n_inputs > 0) {
1622
0
            if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1623
0
                ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy], split_backend);
1624
0
            }
1625
0
        }
1626
0
    }
1627
1628
0
    return GGML_STATUS_SUCCESS;
1629
0
}
1630
1631
ggml_backend_sched_t ggml_backend_sched_new(
1632
        ggml_backend_t * backends,
1633
        ggml_backend_buffer_type_t * bufts,
1634
        int n_backends,
1635
        size_t graph_size,
1636
        bool parallel,
1637
0
        bool op_offload) {
1638
0
    GGML_ASSERT(n_backends > 0);
1639
0
    GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS);
1640
0
    GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU);
1641
1642
0
    struct ggml_backend_sched * sched = (ggml_backend_sched *) calloc(1, sizeof(struct ggml_backend_sched));
1643
1644
0
    const char * GGML_SCHED_DEBUG = getenv("GGML_SCHED_DEBUG");
1645
0
    sched->debug = GGML_SCHED_DEBUG ? atoi(GGML_SCHED_DEBUG) : 0;
1646
1647
0
    sched->debug_realloc = 0;
1648
#ifdef GGML_SCHED_NO_REALLOC
1649
    sched->debug_realloc = 1;
1650
#endif
1651
0
    const char * GGML_SCHED_DEBUG_REALLOC = getenv("GGML_SCHED_DEBUG_REALLOC");
1652
0
    sched->debug_realloc = GGML_SCHED_DEBUG_REALLOC ? atoi(GGML_SCHED_DEBUG_REALLOC) : sched->debug_realloc;
1653
1654
0
    sched->n_backends = n_backends;
1655
0
    sched->n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1;
1656
1657
    // initialize hash table
1658
    // FIXME: needs to be size*2 to account for leafs (do it in graph_split instead)
1659
0
    sched->hash_set    = ggml_hash_set_new(graph_size);
1660
0
    sched->hv_tensor_backend_ids = (int *) malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
1661
0
    sched->hv_tensor_copies      = (ggml_tensor **) malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *));
1662
1663
0
    const size_t ggml_sched_max_splits = graph_size; // at most there is one split for each node in the graph
1664
0
    const size_t nodes_size = graph_size + ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2;
1665
0
    sched->node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->node_backend_ids[0]));
1666
0
    sched->leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->leaf_backend_ids[0]));
1667
0
    sched->prev_node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0]));
1668
0
    sched->prev_leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0]));
1669
1670
0
    sched->debug_graph_size = 0;
1671
0
    sched->debug_prev_graph_size = 0;
1672
1673
0
    sched->context_buffer_size = ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false);
1674
0
    sched->context_buffer = (char *) malloc(sched->context_buffer_size);
1675
1676
0
    const int initial_splits_capacity = 16;
1677
0
    sched->splits = (ggml_backend_sched_split *) calloc(initial_splits_capacity, sizeof(sched->splits[0]));
1678
0
    sched->splits_capacity = initial_splits_capacity;
1679
1680
0
    for (int b = 0; b < n_backends; b++) {
1681
0
        sched->backends[b] = backends[b];
1682
0
        sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]);
1683
0
        GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b]));
1684
1685
0
        if (sched->n_copies > 1) {
1686
0
            for (int c = 0; c < sched->n_copies; c++) {
1687
0
                sched->events[b][c] = ggml_backend_event_new(backends[b]->device);
1688
0
            }
1689
0
        }
1690
0
    }
1691
1692
0
    sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
1693
0
    sched->op_offload = op_offload;
1694
1695
0
    ggml_backend_sched_reset(sched);
1696
1697
0
    return sched;
1698
0
}
1699
1700
0
void ggml_backend_sched_free(ggml_backend_sched_t sched) {
1701
0
    if (sched == NULL) {
1702
0
        return;
1703
0
    }
1704
0
    for (int b = 0; b < sched->n_backends; b++) {
1705
0
        for (int c = 0; c < sched->n_copies; c++) {
1706
0
            ggml_backend_event_free(sched->events[b][c]);
1707
0
        }
1708
0
    }
1709
0
    ggml_gallocr_free(sched->galloc);
1710
0
    ggml_free(sched->ctx);
1711
0
    ggml_hash_set_free(&sched->hash_set);
1712
0
    free(sched->splits);
1713
0
    free(sched->hv_tensor_backend_ids);
1714
0
    free(sched->hv_tensor_copies);
1715
0
    free(sched->node_backend_ids);
1716
0
    free(sched->leaf_backend_ids);
1717
0
    free(sched->prev_node_backend_ids);
1718
0
    free(sched->prev_leaf_backend_ids);
1719
0
    free(sched->context_buffer);
1720
0
    free(sched->graph.nodes);
1721
0
    free(sched->graph.leafs);
1722
0
    free(sched);
1723
0
}
1724
1725
0
void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
1726
0
    GGML_ASSERT(sched);
1727
    // reset state for the next run
1728
0
    if (!sched->is_reset) {
1729
0
        ggml_hash_set_reset(&sched->hash_set);
1730
0
        memset(sched->hv_tensor_backend_ids, -1, sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
1731
0
        memset(sched->hv_tensor_copies,       0, sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *));
1732
0
        sched->is_reset = true;
1733
0
    }
1734
0
    sched->is_alloc = false;
1735
0
}
1736
1737
0
void ggml_backend_sched_reserve_size(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph, size_t * sizes) {
1738
0
    GGML_ASSERT(sched);
1739
0
    GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
1740
0
    GGML_ASSERT(sizes);
1741
1742
0
    ggml_backend_sched_reset(sched);
1743
1744
0
    ggml_backend_sched_synchronize(sched);
1745
1746
0
    ggml_backend_sched_split_graph(sched, measure_graph);
1747
1748
0
    ggml_gallocr_reserve_n_size(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids, sizes);
1749
0
}
1750
1751
0
bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
1752
0
    GGML_ASSERT(sched);
1753
0
    GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
1754
1755
0
    ggml_backend_sched_synchronize(sched);
1756
1757
0
    ggml_backend_sched_split_graph(sched, measure_graph);
1758
1759
0
    if (!ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) {
1760
0
        return false;
1761
0
    }
1762
1763
0
    ggml_backend_sched_reset(sched);
1764
1765
0
    return true;
1766
0
}
1767
1768
0
bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
1769
0
    GGML_ASSERT(sched);
1770
0
    GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs);
1771
0
    GGML_ASSERT(!sched->is_alloc);
1772
1773
0
    sched->cur_copy = sched->next_copy;
1774
0
    sched->next_copy = (sched->next_copy + 1) % sched->n_copies;
1775
1776
0
    ggml_backend_sched_split_graph(sched, graph);
1777
1778
0
    if (!ggml_backend_sched_alloc_splits(sched)) {
1779
0
        return false;
1780
0
    }
1781
1782
0
    sched->is_alloc = true;
1783
1784
0
    return true;
1785
0
}
1786
1787
0
enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
1788
0
    enum ggml_status err = ggml_backend_sched_graph_compute_async(sched, graph);
1789
0
    ggml_backend_sched_synchronize(sched);
1790
0
    return err;
1791
0
}
1792
1793
0
enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
1794
0
    GGML_ASSERT(sched);
1795
0
    if (!sched->is_reset && !sched->is_alloc) {
1796
0
        ggml_backend_sched_reset(sched);
1797
0
    }
1798
1799
0
    if (!sched->is_alloc) {
1800
0
        if (!ggml_backend_sched_alloc_graph(sched, graph)) {
1801
0
            return GGML_STATUS_ALLOC_FAILED;
1802
0
        }
1803
0
    }
1804
1805
0
    return ggml_backend_sched_compute_splits(sched);
1806
0
}
1807
1808
0
void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) {
1809
0
    GGML_ASSERT(sched);
1810
0
    for (int i = 0; i < sched->n_backends; i++) {
1811
0
        ggml_backend_synchronize(sched->backends[i]);
1812
0
    }
1813
0
    if (!sched->is_alloc) {
1814
        // if the graph is not already allocated, always use copy 0 after a synchronization
1815
        // this ensures that during generation the same copy is used every time,
1816
        // which avoids changes in the graph that could cause CUDA or other graphs to be disabled
1817
0
        sched->next_copy = 0;
1818
0
    }
1819
0
}
1820
1821
0
void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) {
1822
0
    GGML_ASSERT(sched);
1823
0
    sched->callback_eval = callback;
1824
0
    sched->callback_eval_user_data = user_data;
1825
0
}
1826
1827
0
int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
1828
0
    GGML_ASSERT(sched);
1829
0
    return sched->n_splits;
1830
0
}
1831
1832
0
int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched) {
1833
0
    GGML_ASSERT(sched);
1834
0
    return sched->n_copies;
1835
0
}
1836
1837
0
int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched) {
1838
0
    GGML_ASSERT(sched);
1839
0
    return sched->n_backends;
1840
0
}
1841
1842
0
ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i) {
1843
0
    GGML_ASSERT(sched);
1844
0
    GGML_ASSERT(i >= 0 && i < sched->n_backends);
1845
0
    return sched->backends[i];
1846
0
}
1847
1848
0
ggml_backend_buffer_type_t ggml_backend_sched_get_buffer_type(ggml_backend_sched_t sched, ggml_backend_t backend) {
1849
0
    GGML_ASSERT(sched);
1850
0
    int backend_index = ggml_backend_sched_backend_id(sched, backend);
1851
0
    GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
1852
1853
0
    return sched->bufts[backend_index];
1854
0
}
1855
1856
0
size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) {
1857
0
    GGML_ASSERT(sched);
1858
0
    int backend_index = ggml_backend_sched_backend_id(sched, backend);
1859
0
    GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
1860
1861
0
    return ggml_gallocr_get_buffer_size(sched->galloc, backend_index);
1862
0
}
1863
1864
0
void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
1865
0
    GGML_ASSERT(sched);
1866
0
    int backend_index = ggml_backend_sched_backend_id(sched, backend);
1867
0
    GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
1868
0
    tensor_backend_id(node) = backend_index;
1869
0
    SET_CAUSE(node, "usr");
1870
0
    sched->is_reset = false;
1871
0
}
1872
1873
0
ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) {
1874
0
    GGML_ASSERT(sched);
1875
0
    int backend_index = tensor_backend_id(node);
1876
0
    if (backend_index == -1) {
1877
0
        return NULL;
1878
0
    }
1879
0
    return sched->backends[backend_index];
1880
0
}
1881
1882
// utils
1883
1884
0
enum ggml_status ggml_backend_view_init(struct ggml_tensor * tensor) {
1885
0
    GGML_ASSERT(tensor);
1886
0
    GGML_ASSERT(tensor->buffer == NULL);
1887
0
    GGML_ASSERT(tensor->view_src != NULL);
1888
0
    GGML_ASSERT(tensor->view_src->buffer != NULL);
1889
0
    GGML_ASSERT(tensor->view_src->data != NULL);
1890
1891
0
    tensor->buffer = tensor->view_src->buffer;
1892
0
    tensor->data = (char *)tensor->view_src->data + tensor->view_offs;
1893
0
    return ggml_backend_buffer_init_tensor(tensor->buffer, tensor);
1894
0
}
1895
1896
0
enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr) {
1897
0
    GGML_ASSERT(tensor);
1898
0
    GGML_ASSERT(tensor->buffer == NULL);
1899
0
    GGML_ASSERT(tensor->data == NULL);
1900
0
    GGML_ASSERT(tensor->view_src == NULL);
1901
0
    GGML_ASSERT(addr >= ggml_backend_buffer_get_base(buffer));
1902
0
    GGML_ASSERT((char *)addr + ggml_backend_buffer_get_alloc_size(buffer, tensor) <=
1903
0
                (char *)ggml_backend_buffer_get_base(buffer) + ggml_backend_buffer_get_size(buffer));
1904
1905
0
    tensor->buffer = buffer;
1906
0
    tensor->data = addr;
1907
0
    return ggml_backend_buffer_init_tensor(buffer, tensor);
1908
0
}
1909
1910
static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies,
1911
0
    struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) {
1912
1913
0
    GGML_ASSERT(src != NULL);
1914
0
    GGML_ASSERT(src->data && "graph must be allocated");
1915
1916
0
    size_t id = ggml_hash_insert(&hash_set, src);
1917
0
    if (id == GGML_HASHSET_ALREADY_EXISTS) {
1918
0
        return node_copies[ggml_hash_find(&hash_set, src)];
1919
0
    }
1920
1921
0
    struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src);
1922
0
    if (src->view_src != NULL) {
1923
0
        dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src);
1924
0
        dst->view_offs = src->view_offs;
1925
0
    }
1926
0
    dst->op = src->op;
1927
0
    dst->flags = src->flags;
1928
0
    memcpy(dst->op_params, src->op_params, sizeof(dst->op_params));
1929
0
    ggml_set_name(dst, src->name);
1930
1931
    // copy src
1932
0
    for (int i = 0; i < GGML_MAX_SRC; i++) {
1933
0
        struct ggml_tensor * s = src->src[i];
1934
0
        if (s == NULL) {
1935
0
            continue;
1936
0
        }
1937
0
        dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
1938
0
    }
1939
1940
0
    node_copies[id] = dst;
1941
0
    return dst;
1942
0
}
1943
1944
0
static void graph_copy_init_tensor(struct ggml_hash_set * hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) {
1945
0
    size_t id = ggml_hash_find(hash_set, src);
1946
0
    if (node_init[id]) {
1947
0
        return;
1948
0
    }
1949
0
    node_init[id] = true;
1950
1951
0
    struct ggml_tensor * dst = node_copies[id];
1952
0
    if (dst->view_src != NULL) {
1953
0
        graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src);
1954
0
        enum ggml_status status = ggml_backend_view_init(dst);
1955
0
        GGML_ASSERT(status == GGML_STATUS_SUCCESS);
1956
0
    }
1957
0
    else {
1958
0
        ggml_backend_tensor_copy(src, dst);
1959
0
    }
1960
1961
    // init src
1962
0
    for (int i = 0; i < GGML_MAX_SRC; i++) {
1963
0
        struct ggml_tensor * s = src->src[i];
1964
0
        if (s == NULL) {
1965
0
            continue;
1966
0
        }
1967
0
        graph_copy_init_tensor(hash_set, node_copies, node_init, s);
1968
0
    }
1969
0
}
1970
1971
0
struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) {
1972
0
    GGML_ASSERT(graph);
1973
0
    struct ggml_hash_set hash_set = ggml_hash_set_new(graph->visited_hash_set.size);
1974
0
    struct ggml_tensor ** node_copies = (ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT
1975
0
    bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0]));
1976
1977
0
    struct ggml_init_params params = {
1978
0
        /* .mem_size   = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
1979
0
        /* .mem_buffer = */ NULL,
1980
0
        /* .no_alloc   = */ true
1981
0
    };
1982
1983
0
    struct ggml_context * ctx_allocated = ggml_init(params);
1984
0
    struct ggml_context * ctx_unallocated = ggml_init(params);
1985
1986
0
    if (ctx_allocated == NULL || ctx_unallocated == NULL) {
1987
0
        GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__);
1988
0
        ggml_hash_set_free(&hash_set);
1989
0
        free(node_copies);
1990
0
        free(node_init);
1991
0
        ggml_free(ctx_allocated);
1992
0
        ggml_free(ctx_unallocated);
1993
0
        return {
1994
0
            /* .buffer           = */ NULL,
1995
0
            /* .ctx_allocated    = */ NULL,
1996
0
            /* .ctx_unallocated  = */ NULL,
1997
0
            /* .graph            = */ NULL,
1998
0
        };
1999
0
    }
2000
2001
    // dup nodes
2002
0
    for (int i = 0; i < graph->n_nodes; i++) {
2003
0
        struct ggml_tensor * node = graph->nodes[i];
2004
0
        graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node);
2005
0
    }
2006
2007
    // allocate nodes
2008
0
    ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
2009
0
    if (buffer == NULL) {
2010
0
        GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__);
2011
0
        ggml_hash_set_free(&hash_set);
2012
0
        free(node_copies);
2013
0
        free(node_init);
2014
0
        ggml_free(ctx_allocated);
2015
0
        ggml_free(ctx_unallocated);
2016
0
        return {
2017
0
            /* .buffer           = */ NULL,
2018
0
            /* .ctx_allocated    = */ NULL,
2019
0
            /* .ctx_unallocated  = */ NULL,
2020
0
            /* .graph            = */ NULL,
2021
0
        };
2022
0
    }
2023
2024
    //printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
2025
2026
    // copy data and init views
2027
0
    for (int i = 0; i < graph->n_nodes; i++) {
2028
0
        struct ggml_tensor * node = graph->nodes[i];
2029
0
        graph_copy_init_tensor(&hash_set, node_copies, node_init, node);
2030
0
    }
2031
2032
    // build graph copy
2033
0
    struct ggml_cgraph * graph_copy = ggml_new_graph_custom(ctx_allocated, graph->size, false);
2034
0
    for (int i = 0; i < graph->n_nodes; i++) {
2035
0
        struct ggml_tensor * node = graph->nodes[i];
2036
0
        struct ggml_tensor * node_copy = node_copies[ggml_hash_find(&hash_set, node)];
2037
0
        graph_copy->nodes[i] = node_copy;
2038
0
    }
2039
0
    graph_copy->n_nodes = graph->n_nodes;
2040
2041
0
    ggml_hash_set_free(&hash_set);
2042
0
    free(node_copies);
2043
0
    free(node_init);
2044
2045
0
    return {
2046
0
        /* .buffer           = */ buffer,
2047
0
        /* .ctx_allocated    = */ ctx_allocated,
2048
0
        /* .ctx_unallocated  = */ ctx_unallocated,
2049
0
        /* .graph            = */ graph_copy,
2050
0
    };
2051
0
}
2052
2053
0
void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) {
2054
0
    ggml_backend_buffer_free(copy.buffer);
2055
0
    ggml_free(copy.ctx_allocated);
2056
0
    ggml_free(copy.ctx_unallocated);
2057
0
}
2058
2059
0
bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor const * const * test_nodes, size_t num_test_nodes) {
2060
0
    struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph);
2061
0
    if (copy.buffer == NULL) {
2062
0
        return false;
2063
0
    }
2064
2065
0
    struct ggml_cgraph * g1 = graph;
2066
0
    struct ggml_cgraph * g2 = copy.graph;
2067
2068
0
    assert(g1->n_nodes == g2->n_nodes);
2069
2070
0
    if (num_test_nodes != 0) {
2071
0
        GGML_ASSERT(test_nodes);
2072
        // Compute the whole graph and only test the output for specific tensors
2073
0
        ggml_backend_graph_compute(backend1, g1);
2074
0
        ggml_backend_graph_compute(backend2, g2);
2075
2076
0
        bool verified = false;
2077
0
        for (int i = 0; i < g1->n_nodes; i++) {
2078
0
            for (size_t j = 0; j < num_test_nodes; ++j) {
2079
0
                if (g1->nodes[i] == test_nodes[j]) {
2080
0
                    callback(i, g1->nodes[i], g2->nodes[i], user_data);
2081
0
                    verified = true;
2082
0
                }
2083
0
            }
2084
0
        }
2085
0
        GGML_ASSERT(verified);
2086
0
    } else {
2087
0
        for (int i = 0; i < g1->n_nodes; i++) {
2088
0
            struct ggml_tensor * t1 = g1->nodes[i];
2089
0
            struct ggml_tensor * t2 = g2->nodes[i];
2090
2091
0
            assert(t1->op == t2->op && ggml_are_same_layout(t1, t2));
2092
2093
0
            struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1);
2094
0
            struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1);
2095
2096
0
            ggml_backend_graph_compute(backend1, &g1v);
2097
0
            ggml_backend_graph_compute(backend2, &g2v);
2098
2099
0
            if (ggml_is_view_op(t1->op)) {
2100
0
                continue;
2101
0
            }
2102
2103
            // compare results, calculate rms etc
2104
0
            if (!callback(i, t1, t2, user_data)) {
2105
0
                break;
2106
0
            }
2107
0
        }
2108
0
    }
2109
0
    ggml_backend_graph_copy_free(copy);
2110
2111
0
    return true;
2112
0
}
2113
2114
// CPU backend - buffer
2115
2116
0
static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
2117
0
    GGML_ASSERT(buffer);
2118
0
    uintptr_t data = (uintptr_t)buffer->context;
2119
2120
    // align the buffer
2121
0
    if (data % TENSOR_ALIGNMENT != 0) {
2122
0
        data = GGML_PAD(data, TENSOR_ALIGNMENT);
2123
0
    }
2124
2125
0
    return (void *)data;
2126
0
}
2127
2128
0
static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
2129
0
    GGML_ASSERT(buffer);
2130
0
    ggml_aligned_free(buffer->context, buffer->size);
2131
0
}
2132
2133
0
static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
2134
0
    GGML_ASSERT(tensor);
2135
0
    memset((char *)tensor->data + offset, value, size);
2136
2137
0
    GGML_UNUSED(buffer);
2138
0
}
2139
2140
0
static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
2141
0
    GGML_ASSERT(tensor);
2142
0
    memcpy((char *)tensor->data + offset, data, size);
2143
2144
0
    GGML_UNUSED(buffer);
2145
0
}
2146
2147
0
static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
2148
0
    GGML_ASSERT(tensor);
2149
0
    memcpy(data, (const char *)tensor->data + offset, size);
2150
2151
0
    GGML_UNUSED(buffer);
2152
0
}
2153
2154
0
static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
2155
0
    GGML_ASSERT(src);
2156
0
    if (ggml_backend_buffer_is_host(src->buffer)) {
2157
0
        memcpy(dst->data, src->data, ggml_nbytes(src));
2158
0
        return true;
2159
0
    }
2160
0
    return false;
2161
2162
0
    GGML_UNUSED(buffer);
2163
0
}
2164
2165
0
static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
2166
0
    GGML_ASSERT(buffer);
2167
0
    memset(buffer->context, value, buffer->size);
2168
0
}
2169
2170
static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = {
2171
    /* .free_buffer     = */ ggml_backend_cpu_buffer_free_buffer,
2172
    /* .get_base        = */ ggml_backend_cpu_buffer_get_base,
2173
    /* .init_tensor     = */ NULL, // no initialization required
2174
    /* .memset_tensor   = */ ggml_backend_cpu_buffer_memset_tensor,
2175
    /* .set_tensor      = */ ggml_backend_cpu_buffer_set_tensor,
2176
    /* .get_tensor      = */ ggml_backend_cpu_buffer_get_tensor,
2177
    /* .cpy_tensor      = */ ggml_backend_cpu_buffer_cpy_tensor,
2178
    /* .clear           = */ ggml_backend_cpu_buffer_clear,
2179
    /* .reset           = */ NULL,
2180
};
2181
2182
static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = {
2183
    /* .free_buffer     = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
2184
    /* .get_base        = */ ggml_backend_cpu_buffer_get_base,
2185
    /* .init_tensor     = */ NULL, // no initialization required
2186
    /* .memset_tensor   = */ ggml_backend_cpu_buffer_memset_tensor,
2187
    /* .set_tensor      = */ ggml_backend_cpu_buffer_set_tensor,
2188
    /* .get_tensor      = */ ggml_backend_cpu_buffer_get_tensor,
2189
    /* .cpy_tensor      = */ ggml_backend_cpu_buffer_cpy_tensor,
2190
    /* .clear           = */ ggml_backend_cpu_buffer_clear,
2191
    /* .reset           = */ NULL,
2192
};
2193
2194
// CPU backend buffer type
2195
2196
// this buffer type is defined here to make it available to all backends
2197
2198
0
static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
2199
0
    return "CPU";
2200
2201
0
    GGML_UNUSED(buft);
2202
0
}
2203
2204
0
static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
2205
0
    void * data = ggml_aligned_malloc(size);
2206
2207
0
    if (data == NULL) {
2208
0
        GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size);
2209
0
        return NULL;
2210
0
    }
2211
2212
0
    return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, size);
2213
0
}
2214
2215
0
static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
2216
0
    return TENSOR_ALIGNMENT;
2217
2218
0
    GGML_UNUSED(buft);
2219
0
}
2220
2221
0
static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
2222
0
    return true;
2223
2224
0
    GGML_UNUSED(buft);
2225
0
}
2226
2227
0
ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
2228
0
    static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
2229
0
        /* .iface   = */ {
2230
0
            /* .get_name         = */ ggml_backend_cpu_buffer_type_get_name,
2231
0
            /* .alloc_buffer     = */ ggml_backend_cpu_buffer_type_alloc_buffer,
2232
0
            /* .get_alignment    = */ ggml_backend_cpu_buffer_type_get_alignment,
2233
0
            /* .get_max_size     = */ NULL, // defaults to SIZE_MAX
2234
0
            /* .get_alloc_size   = */ NULL, // defaults to ggml_nbytes
2235
0
            /* .is_host          = */ ggml_backend_cpu_buffer_type_is_host,
2236
0
        },
2237
0
        /* .device  = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
2238
0
        /* .context = */ NULL,
2239
0
    };
2240
2241
0
    return &ggml_backend_cpu_buffer_type;
2242
0
}
2243
2244
0
static const char * ggml_backend_cpu_buffer_from_ptr_type_get_name(ggml_backend_buffer_type_t buft) {
2245
0
    return "CPU_Mapped";
2246
2247
0
    GGML_UNUSED(buft);
2248
0
}
2249
2250
0
static ggml_backend_buffer_type_t ggml_backend_cpu_buffer_from_ptr_type(void) {
2251
0
    static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
2252
0
        /* .iface   = */ {
2253
0
            /* .get_name         = */ ggml_backend_cpu_buffer_from_ptr_type_get_name,
2254
0
            /* .alloc_buffer     = */ ggml_backend_cpu_buffer_type_alloc_buffer,
2255
0
            /* .get_alignment    = */ ggml_backend_cpu_buffer_type_get_alignment,
2256
0
            /* .get_max_size     = */ NULL, // defaults to SIZE_MAX
2257
0
            /* .get_alloc_size   = */ NULL, // defaults to ggml_nbytes
2258
0
            /* .is_host          = */ ggml_backend_cpu_buffer_type_is_host,
2259
0
        },
2260
0
        /* .device  = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
2261
0
        /* .context = */ NULL,
2262
0
    };
2263
2264
0
    return &ggml_backend_cpu_buffer_type;
2265
0
}
2266
2267
0
ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
2268
0
    GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned");
2269
0
    return ggml_backend_buffer_init(ggml_backend_cpu_buffer_from_ptr_type(), ggml_backend_cpu_buffer_from_ptr_i, ptr, size);
2270
0
}