Coverage Report

Created: 2026-02-01 07:21

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama.cpp
Line
Count
Source
1
#include "llama.h"
2
3
#include "llama-impl.h"
4
5
#include "llama-chat.h"
6
#include "llama-context.h"
7
#include "llama-mmap.h"
8
#include "llama-vocab.h"
9
#include "llama-model-loader.h"
10
#include "llama-model-saver.h"
11
#include "llama-model.h"
12
13
#include "ggml.h"
14
#include "ggml-backend.h"
15
16
#include <algorithm>
17
#include <cassert>
18
#include <cinttypes>
19
#include <cstddef>
20
#include <cstdint>
21
#include <cstdio>
22
#include <cstring>
23
#include <ctime>
24
#include <stdexcept>
25
26
#if defined(_MSC_VER)
27
#pragma warning(disable: 4244 4267) // possible loss of data
28
#endif
29
30
//
31
// interface implementation
32
//
33
34
0
const char * llama_flash_attn_type_name(enum llama_flash_attn_type flash_attn_type) {
35
0
    switch (flash_attn_type) {
36
0
        case LLAMA_FLASH_ATTN_TYPE_AUTO:
37
0
            return "auto";
38
0
        case LLAMA_FLASH_ATTN_TYPE_DISABLED:
39
0
            return "disabled";
40
0
        case LLAMA_FLASH_ATTN_TYPE_ENABLED:
41
0
            return "enabled";
42
0
    }
43
0
    GGML_ABORT("fatal error");
44
0
}
45
46
struct llama_device_memory_data {
47
    int64_t total;
48
    int64_t free;
49
    llama_memory_breakdown_data mb;
50
};
51
52
static std::vector<llama_device_memory_data> llama_get_device_memory_data(
53
        const char * path_model, const llama_model_params * mparams, const llama_context_params * cparams,
54
        std::vector<ggml_backend_dev_t> & devs, uint32_t & hp_ngl, uint32_t & hp_n_ctx_train, uint32_t & hp_n_expert,
55
0
        const ggml_log_level log_level) {
56
0
    struct user_data_t {
57
0
        struct {
58
0
            ggml_log_callback callback;
59
0
            void * user_data;
60
0
        } original_logger;
61
0
        ggml_log_level min_level; // prints below this log level go to debug log
62
0
    };
63
0
    user_data_t ud;
64
0
    llama_log_get(&ud.original_logger.callback, &ud.original_logger.user_data);
65
0
    ud.min_level = log_level;
66
67
0
    llama_log_set([](ggml_log_level level, const char * text, void * user_data) {
68
0
        const user_data_t * ud = (const user_data_t *) user_data;
69
0
        const ggml_log_level level_eff = level >= ud->min_level ? level : GGML_LOG_LEVEL_DEBUG;
70
0
        ud->original_logger.callback(level_eff, text, ud->original_logger.user_data);
71
0
    }, &ud);
72
73
0
    llama_model_params mparams_copy = *mparams;
74
0
    mparams_copy.no_alloc  = true;
75
0
    mparams_copy.use_mmap  = false;
76
0
    mparams_copy.use_mlock = false;
77
78
0
    llama_model * model = llama_model_load_from_file(path_model, mparams_copy);
79
0
    if (model == nullptr) {
80
0
        llama_log_set(ud.original_logger.callback, ud.original_logger.user_data);
81
0
        throw std::runtime_error("failed to load model");
82
0
    }
83
84
0
    llama_context * ctx = llama_init_from_model(model, *cparams);
85
0
    if (ctx == nullptr) {
86
0
        llama_model_free(model);
87
0
        llama_log_set(ud.original_logger.callback, ud.original_logger.user_data);
88
0
        throw std::runtime_error("failed to create llama_context from model");
89
0
    }
90
91
0
    std::vector<llama_device_memory_data> ret(model->devices.size());
92
93
0
    std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> memory_breakdown = ctx->memory_breakdown();
94
95
0
    for (const auto & [buft, mb] : memory_breakdown) {
96
0
        if (ggml_backend_buft_is_host(buft)) {
97
0
            continue;
98
0
        }
99
100
0
        ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
101
0
        if (!dev) {
102
0
            continue;
103
0
        }
104
0
        for (size_t i = 0; i < ret.size(); i++) {
105
0
            if (model->devices[i] == dev) {
106
0
                ret[i].mb.model   += mb.model;
107
0
                ret[i].mb.context += mb.context;
108
0
                ret[i].mb.compute += mb.compute;
109
0
                break;
110
0
            }
111
0
        }
112
0
    }
113
0
    for (size_t i = 0; i < ret.size(); i++) {
114
0
        size_t free;
115
0
        size_t total;
116
0
        ggml_backend_dev_memory(model->devices[i], &free, &total);
117
118
        // devices can return 0 bytes for free and total memory if they do not
119
        // have any to report. in this case, we will use the host memory as a fallback
120
        // fixes: https://github.com/ggml-org/llama.cpp/issues/18577
121
0
        if (free == 0 && total == 0) {
122
0
            ggml_backend_dev_t cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
123
0
            if (cpu_dev == nullptr) {
124
0
                throw std::runtime_error(format("%s: no CPU backend found", __func__));
125
0
            }
126
0
            ggml_backend_dev_memory(cpu_dev, &free, &total);
127
0
        }
128
0
        ret[i].free  = free;
129
0
        ret[i].total = total;
130
0
    }
131
132
0
    devs           = model->devices;
133
0
    hp_ngl         = model->hparams.n_layer;
134
0
    hp_n_ctx_train = model->hparams.n_ctx_train;
135
0
    hp_n_expert    = model->hparams.n_expert;
136
137
0
    llama_memory_breakdown_print(ctx); // goes to debug log
138
139
0
    llama_free(ctx);
140
0
    llama_model_free(model);
141
0
    llama_log_set(ud.original_logger.callback, ud.original_logger.user_data);
142
0
    return ret;
143
0
}
144
145
// enum to identify part of a layer for distributing its tensors:
146
enum layer_fraction_t {
147
    LAYER_FRACTION_NONE = 0, // nothing
148
    LAYER_FRACTION_ATTN = 1, // attention
149
    LAYER_FRACTION_UP   = 2, // attention + up
150
    LAYER_FRACTION_GATE = 3, // attention + up + gate
151
    LAYER_FRACTION_MOE  = 4, // everything but sparse MoE weights
152
};
153
// this enum is only used in llama_params_fit_impl but needs to be defined outside of it to fix a Windows compilation issue
154
155
class llama_params_fit_exception : public std::runtime_error {
156
    using std::runtime_error::runtime_error;
157
};
158
159
static void llama_params_fit_impl(
160
        const char * path_model, struct llama_model_params * mparams, struct llama_context_params * cparams,
161
        float * tensor_split, struct llama_model_tensor_buft_override * tensor_buft_overrides,
162
0
        size_t * margins_s, uint32_t n_ctx_min, enum ggml_log_level log_level) {
163
0
    constexpr int64_t MiB = 1024*1024;
164
0
    typedef std::vector<llama_device_memory_data> dmds_t;
165
0
    const llama_model_params default_mparams = llama_model_default_params();
166
167
0
    std::vector<ggml_backend_dev_t> devs;
168
0
    uint32_t hp_ngl = 0; // hparams.n_gpu_layers
169
0
    uint32_t hp_nct = 0; // hparams.n_ctx_train
170
0
    uint32_t hp_nex = 0; // hparams.n_expert
171
172
    // step 1: get data for default parameters and check whether any changes are necessary in the first place
173
174
0
    LLAMA_LOG_DEBUG("%s: getting device memory data for initial parameters:\n", __func__);
175
0
    const dmds_t dmds_full = llama_get_device_memory_data(path_model, mparams, cparams, devs, hp_ngl, hp_nct, hp_nex, log_level);
176
0
    const size_t nd = devs.size(); // number of devices
177
0
    if (nd == 0) {
178
0
        LLAMA_LOG_INFO("%s: no devices with dedicated memory found\n", __func__);
179
0
        return;
180
0
    }
181
182
0
    std::vector<int64_t> margins; // this function uses int64_t rather than size_t for memory sizes to more conveniently handle deficits
183
0
    margins.reserve(nd);
184
0
    for (size_t id = 0; id < nd; id++) {
185
0
        margins.push_back(margins_s[id]);
186
0
    }
187
188
0
    std::vector<std::string> dev_names;
189
0
    {
190
0
        dev_names.reserve(nd);
191
0
        size_t max_length = 0;
192
0
        for (ggml_backend_dev_t dev : devs) {
193
0
            std::string name = ggml_backend_dev_name(dev);
194
0
            name += " (";
195
0
            name += ggml_backend_dev_description(dev);
196
0
            name += ")";
197
0
            dev_names.push_back(name);
198
0
            max_length = std::max(max_length, name.length());
199
0
        }
200
0
        for (std::string & dn : dev_names) {
201
0
            dn.insert(dn.end(), max_length - dn.length(), ' ');
202
0
        }
203
0
    }
204
205
0
    int64_t sum_free            = 0;
206
0
    int64_t sum_projected_free  = 0;
207
0
    int64_t sum_projected_used  = 0;
208
0
    int64_t sum_projected_model = 0;
209
0
    std::vector<int64_t> projected_free_per_device;
210
0
    projected_free_per_device.reserve(nd);
211
212
0
    if (nd > 1) {
213
0
        LLAMA_LOG_INFO("%s: projected memory use with initial parameters [MiB]:\n", __func__);
214
0
    }
215
0
    for (size_t id = 0; id < nd; id++) {
216
0
        const llama_device_memory_data & dmd = dmds_full[id];
217
218
0
        const int64_t projected_used = dmd.mb.total();
219
0
        const int64_t projected_free = dmd.free - projected_used;
220
0
        projected_free_per_device.push_back(projected_free);
221
222
0
        sum_free            += dmd.free;
223
0
        sum_projected_used  += projected_used;
224
0
        sum_projected_free  += projected_free;
225
0
        sum_projected_model += dmd.mb.model;
226
227
0
        if (nd > 1) {
228
0
            LLAMA_LOG_INFO("%s:   - %s: %6" PRId64 " total, %6" PRId64 " used, %6" PRId64 " free vs. target of %6" PRId64 "\n",
229
0
                __func__, dev_names[id].c_str(), dmd.total/MiB, projected_used/MiB, projected_free/MiB, margins[id]/MiB);
230
0
        }
231
0
    }
232
0
    assert(sum_free >= 0 && sum_projected_used >= 0);
233
0
    LLAMA_LOG_INFO("%s: projected to use %" PRId64 " MiB of device memory vs. %" PRId64 " MiB of free device memory\n",
234
0
        __func__, sum_projected_used/MiB, sum_free/MiB);
235
0
    if (nd == 1) {
236
0
        if (projected_free_per_device[0] >= margins[0]) {
237
0
            LLAMA_LOG_INFO("%s: will leave %" PRId64 " >= %" PRId64 " MiB of free device memory, no changes needed\n",
238
0
                __func__, projected_free_per_device[0]/MiB, margins[0]/MiB);
239
0
            return;
240
0
        }
241
0
    } else {
242
0
        bool changes_needed = false;
243
0
        for (size_t id = 0; id < nd; id++) {
244
0
            if (projected_free_per_device[id] < margins[id]) {
245
0
                changes_needed = true;
246
0
                break;
247
0
            }
248
0
        }
249
0
        if (!changes_needed) {
250
0
            LLAMA_LOG_INFO("%s: targets for free memory can be met on all devices, no changes needed\n", __func__);
251
0
            return;
252
0
        }
253
0
    }
254
255
    // step 2: try reducing memory use by reducing the context size
256
257
0
    {
258
0
        int64_t global_surplus = sum_projected_free;
259
0
        for (size_t id = 0; id < nd; id++) {
260
0
            global_surplus -= margins[id];
261
0
        }
262
0
        if (global_surplus < 0) {
263
0
            if (nd == 1) {
264
0
                LLAMA_LOG_INFO("%s: cannot meet free memory target of %" PRId64 " MiB, need to reduce device memory by %" PRId64 " MiB\n",
265
0
                    __func__, margins[0]/MiB, -global_surplus/MiB);
266
0
            } else {
267
0
                LLAMA_LOG_INFO(
268
0
                    "%s: cannot meet free memory targets on all devices, need to use %" PRId64 " MiB less in total\n",
269
0
                    __func__, -global_surplus/MiB);
270
0
            }
271
0
            if (cparams->n_ctx == 0) {
272
0
                if (hp_nct > n_ctx_min) {
273
0
                    int64_t sum_used_target = sum_free;
274
0
                    for (size_t id = 0; id < nd; id++) {
275
0
                        sum_used_target -= margins[id];
276
0
                    }
277
0
                    if (nd > 1) {
278
                        // for multiple devices we need to be more conservative in terms of how much context we think can fit:
279
                        //   - for dense models only whole layers can be assigned to devices
280
                        //   - for MoE models only whole tensors can be assigned to devices, which we estimate to be <= 1/3 of a layer
281
                        //   - on average we expect a waste of 0.5 layers/tensors per device
282
                        //   - use slightly more than the expected average for nd devices to be safe
283
0
                        const int64_t model_per_layer = sum_projected_model / std::min(uint32_t(mparams->n_gpu_layers), hp_ngl);
284
0
                        sum_used_target -= (nd + 1) * model_per_layer / (hp_nex == 0 ? 2 : 6);
285
0
                    }
286
287
0
                    int64_t sum_projected_used_min_ctx = 0;
288
0
                    cparams->n_ctx = n_ctx_min;
289
0
                    const dmds_t dmds_min_ctx = llama_get_device_memory_data(path_model, mparams, cparams, devs, hp_ngl, hp_nct, hp_nex, log_level);
290
0
                    for (const auto & dmd : dmds_min_ctx) {
291
0
                        sum_projected_used_min_ctx += dmd.mb.total();
292
0
                    }
293
0
                    if (sum_used_target > sum_projected_used_min_ctx) {
294
                        // linear interpolation between minimum and maximum context size:
295
0
                        cparams->n_ctx += (hp_nct - n_ctx_min) * (sum_used_target - sum_projected_used_min_ctx)
296
0
                            / (sum_projected_used - sum_projected_used_min_ctx);
297
0
                        cparams->n_ctx = std::max(cparams->n_ctx - cparams->n_ctx % 256, n_ctx_min); // round down context for CUDA backend
298
299
0
                        const int64_t bytes_per_ctx = (sum_projected_used - sum_projected_used_min_ctx) / (hp_nct - n_ctx_min);
300
0
                        const int64_t memory_reduction = (hp_nct - cparams->n_ctx) * bytes_per_ctx;
301
0
                        LLAMA_LOG_INFO("%s: context size reduced from %" PRIu32 " to %" PRIu32 " -> need %" PRId64 " MiB less memory in total\n",
302
0
                            __func__, hp_nct, cparams->n_ctx, memory_reduction/MiB);
303
0
                        if (nd == 1) {
304
0
                            LLAMA_LOG_INFO("%s: entire model can be fit by reducing context\n", __func__);
305
0
                            return;
306
0
                        }
307
0
                        LLAMA_LOG_INFO("%s: entire model should be fit across devices by reducing context\n", __func__);
308
0
                    } else {
309
0
                        const int64_t memory_reduction = sum_projected_used - sum_projected_used_min_ctx;
310
0
                        LLAMA_LOG_INFO("%s: context size reduced from %" PRIu32 " to %" PRIu32 " -> need %" PRId64 " MiB less memory in total\n",
311
0
                            __func__, hp_nct, cparams->n_ctx, memory_reduction/MiB);
312
0
                    }
313
0
                } else {
314
0
                    if (n_ctx_min == UINT32_MAX) {
315
0
                        LLAMA_LOG_INFO("%s: user has requested full context size of %" PRIu32 " -> no change\n", __func__, hp_nct);
316
0
                    } else {
317
0
                        LLAMA_LOG_INFO("%s: default model context size is %" PRIu32 " which is <= the min. context size of %" PRIu32 " -> no change\n",
318
0
                            __func__, hp_nct, n_ctx_min);
319
0
                    }
320
0
                }
321
0
            } else {
322
0
                LLAMA_LOG_INFO("%s: context size set by user to %" PRIu32 " -> no change\n", __func__, cparams->n_ctx);
323
0
            }
324
0
        }
325
0
    }
326
327
0
    if (mparams->n_gpu_layers != default_mparams.n_gpu_layers) {
328
0
        throw llama_params_fit_exception("n_gpu_layers already set by user to " + std::to_string(mparams->n_gpu_layers) + ", abort");
329
0
    }
330
0
    if (nd > 1) {
331
0
        if (!tensor_split) {
332
0
            throw llama_params_fit_exception("did not provide a buffer to write the tensor_split to, abort");
333
0
        }
334
0
        if (mparams->tensor_split) {
335
0
            for (size_t id = 0; id < nd; id++) {
336
0
                if (mparams->tensor_split[id] != 0.0f) {
337
0
                    throw llama_params_fit_exception("model_params::tensor_split already set by user, abort");
338
0
                }
339
0
            }
340
0
        }
341
0
        if (mparams->split_mode == LLAMA_SPLIT_MODE_ROW) {
342
0
            throw llama_params_fit_exception("changing weight allocation for LLAMA_SPLIT_MODE_ROW not implemented, abort");
343
0
        }
344
0
    }
345
0
    if (!tensor_buft_overrides) {
346
0
        throw llama_params_fit_exception("did not provide buffer to set tensor_buft_overrides, abort");
347
0
    }
348
0
    if (mparams->tensor_buft_overrides && (mparams->tensor_buft_overrides->pattern || mparams->tensor_buft_overrides->buft)) {
349
0
        throw llama_params_fit_exception("model_params::tensor_buft_overrides already set by user, abort");
350
0
    }
351
352
    // step 3: iteratively fill the back to front with "dense" layers
353
    //   - for a dense model simply fill full layers, giving each device a contiguous slice of the model
354
    //   - for a MoE model, same as dense model but with all MoE tensors in system memory
355
356
    // utility function that returns a static C string matching the tensors for a specific layer index and layer fraction:
357
0
    auto get_overflow_pattern = [&](const size_t il, const layer_fraction_t lf) -> const char * {
358
0
        constexpr size_t n_strings = 1000;
359
0
        if (il >= n_strings) {
360
0
            throw std::runtime_error("at most " + std::to_string(n_strings) + " model layers are supported");
361
0
        }
362
0
        switch (lf) {
363
0
            case LAYER_FRACTION_ATTN: {
364
0
                static std::array<std::string, n_strings> patterns;
365
0
                if (patterns[il].empty()) {
366
0
                    patterns[il] = "blk\\." + std::to_string(il) + "\\.ffn_(up|gate|down).*";
367
0
                }
368
0
                return patterns[il].c_str();
369
0
            }
370
0
            case LAYER_FRACTION_UP: {
371
0
                static std::array<std::string, n_strings> patterns;
372
0
                if (patterns[il].empty()) {
373
0
                    patterns[il] = "blk\\." + std::to_string(il) + "\\.ffn_(gate|down).*";
374
0
                }
375
0
                return patterns[il].c_str();
376
0
            }
377
0
            case LAYER_FRACTION_GATE: {
378
0
                static std::array<std::string, n_strings> patterns;
379
0
                if (patterns[il].empty()) {
380
0
                    patterns[il] = "blk\\." + std::to_string(il) + "\\.ffn_down.*";
381
0
                }
382
0
                return patterns[il].c_str();
383
0
            }
384
0
            case LAYER_FRACTION_MOE: {
385
0
                static std::array<std::string, n_strings> patterns;
386
0
                if (patterns[il].empty()) {
387
0
                    patterns[il] = "blk\\." + std::to_string(il) + "\\.ffn_(up|down|gate)_(ch|)exps";
388
0
                }
389
0
                return patterns[il].c_str();
390
0
            }
391
0
            default:
392
0
                GGML_ABORT("fatal error");
393
0
        }
394
0
    };
395
396
0
    struct ngl_t {
397
0
        uint32_t n_layer = 0; // number of total layers
398
0
        uint32_t n_part  = 0; // number of partial layers, <= n_layer
399
400
        // for the first partial layer varying parts can overflow, all further layers use LAYER_FRACTION_MOE:
401
0
        layer_fraction_t overflow_type = LAYER_FRACTION_MOE;
402
403
0
        uint32_t n_full() const {
404
0
            assert(n_layer >= n_part);
405
0
            return n_layer - n_part;
406
0
        }
407
0
    };
408
409
0
    const size_t ntbo = llama_max_tensor_buft_overrides();
410
411
    // utility function to set n_gpu_layers and tensor_split
412
0
    auto set_ngl_tensor_split_tbo = [&](
413
0
            const std::vector<ngl_t> & ngl_per_device,
414
0
            const std::vector<ggml_backend_buffer_type_t> & overflow_bufts,
415
0
            llama_model_params & mparams) {
416
0
        mparams.n_gpu_layers = 0;
417
0
        for (size_t id = 0; id < nd; id++) {
418
0
            mparams.n_gpu_layers += ngl_per_device[id].n_layer;
419
0
            if (nd > 1) {
420
0
                tensor_split[id] = ngl_per_device[id].n_layer;
421
0
            }
422
0
        }
423
0
        assert(uint32_t(mparams.n_gpu_layers) <= hp_ngl + 1);
424
0
        uint32_t il0 = hp_ngl + 1 - mparams.n_gpu_layers; // start index for tensor buft overrides
425
426
0
        mparams.tensor_split = tensor_split;
427
428
0
        size_t itbo = 0;
429
0
        for (size_t id = 0; id < nd; id++) {
430
0
            il0 += ngl_per_device[id].n_full();
431
0
            for (uint32_t il = il0; il < il0 + ngl_per_device[id].n_part; il++) {
432
0
                if (itbo + 1 >= ntbo) {
433
0
                    tensor_buft_overrides[itbo].pattern = nullptr;
434
0
                    tensor_buft_overrides[itbo].buft    = nullptr;
435
0
                    itbo++;
436
0
                    mparams.tensor_buft_overrides = tensor_buft_overrides;
437
0
                    throw llama_params_fit_exception("llama_max_tensor_buft_overrides() == "
438
0
                        + std::to_string(ntbo) + " is insufficient for model");
439
0
                }
440
0
                tensor_buft_overrides[itbo].pattern = get_overflow_pattern(il, il == il0 ? ngl_per_device[id].overflow_type : LAYER_FRACTION_MOE);
441
0
                tensor_buft_overrides[itbo].buft = il == il0 ? overflow_bufts[id] : ggml_backend_cpu_buffer_type();
442
0
                itbo++;
443
0
            }
444
0
            il0 += ngl_per_device[id].n_part;
445
0
        }
446
0
        tensor_buft_overrides[itbo].pattern = nullptr;
447
0
        tensor_buft_overrides[itbo].buft    = nullptr;
448
0
        itbo++;
449
0
        mparams.tensor_buft_overrides = tensor_buft_overrides;
450
0
    };
451
452
    // utility function that returns the memory use per device for given numbers of layers per device
453
0
    auto get_memory_for_layers = [&](
454
0
            const char * func_name,
455
0
            const std::vector<ngl_t> & ngl_per_device,
456
0
            const std::vector<ggml_backend_buffer_type_t> & overflow_bufts) -> std::vector<int64_t> {
457
0
        llama_model_params mparams_copy = *mparams;
458
0
        set_ngl_tensor_split_tbo(ngl_per_device, overflow_bufts, mparams_copy);
459
460
0
        const dmds_t dmd_nl = llama_get_device_memory_data(
461
0
            path_model, &mparams_copy, cparams, devs, hp_ngl, hp_nct, hp_nex, log_level);
462
463
0
        LLAMA_LOG_DEBUG("%s: memory for test allocation by device:\n", func_name);
464
0
        for (size_t id = 0; id < nd; id++) {
465
0
            const ngl_t & n = ngl_per_device[id];
466
0
            LLAMA_LOG_DEBUG(
467
0
                "%s: id=%zu, n_layer=%2" PRIu32 ", n_part=%2" PRIu32 ", overflow_type=%d, mem=%6" PRId64 " MiB\n",
468
0
                func_name, id, n.n_layer, n.n_part, int(n.overflow_type), dmd_nl[id].mb.total()/MiB);
469
0
        }
470
471
0
        std::vector<int64_t> ret;
472
0
        ret.reserve(nd);
473
0
        for (const llama_device_memory_data & dmd : dmd_nl) {
474
0
            ret.push_back(dmd.mb.total());
475
0
        }
476
0
        return ret;
477
0
    };
478
479
0
    int64_t global_surplus_cpu_moe = 0;
480
0
    if (hp_nex > 0) {
481
0
        const static std::string pattern_moe_all = "blk\\.\\d+\\.ffn_(up|down|gate)_(ch|)exps"; // matches all MoE tensors
482
0
        ggml_backend_buffer_type_t cpu_buft = ggml_backend_cpu_buffer_type();
483
0
        tensor_buft_overrides[0] = {pattern_moe_all.c_str(), cpu_buft};
484
0
        tensor_buft_overrides[1] = {nullptr, nullptr};
485
0
        mparams->tensor_buft_overrides = tensor_buft_overrides;
486
487
0
        LLAMA_LOG_DEBUG("%s: getting device memory data with all MoE tensors moved to system memory:\n", __func__);
488
0
        const dmds_t dmds_cpu_moe = llama_get_device_memory_data(
489
0
            path_model, mparams, cparams, devs, hp_ngl, hp_nct, hp_nex, log_level);
490
491
0
        for (size_t id = 0; id < nd; id++) {
492
0
            global_surplus_cpu_moe += dmds_cpu_moe[id].free;
493
0
            global_surplus_cpu_moe -= int64_t(dmds_cpu_moe[id].mb.total()) + margins[id];
494
0
        }
495
496
0
        if (global_surplus_cpu_moe > 0) {
497
0
            LLAMA_LOG_INFO("%s: with only dense weights in device memory there is a total surplus of %" PRId64 " MiB\n",
498
0
                __func__, global_surplus_cpu_moe/MiB);
499
0
        } else {
500
0
            LLAMA_LOG_INFO("%s: with only dense weights in device memory there is still a total deficit of %" PRId64 " MiB\n",
501
0
                __func__, -global_surplus_cpu_moe/MiB);
502
0
        }
503
504
        // reset
505
0
        tensor_buft_overrides[0] = {nullptr, nullptr};
506
0
        mparams->tensor_buft_overrides = tensor_buft_overrides;
507
0
    }
508
509
0
    std::vector<int64_t> targets; // maximum acceptable memory use per device
510
0
    targets.reserve(nd);
511
0
    for (size_t id = 0; id < nd; id++) {
512
0
        targets.push_back(dmds_full[id].free - margins[id]);
513
0
        LLAMA_LOG_DEBUG("%s: id=%zu, target=%" PRId64 " MiB\n", __func__, id, targets[id]/MiB);
514
0
    }
515
516
0
    std::vector<ggml_backend_buffer_type_t> overflow_bufts; // which bufts the first partial layer of a device overflows to:
517
0
    overflow_bufts.reserve(nd);
518
0
    for (size_t id = 0; id < nd; id++) {
519
0
        overflow_bufts.push_back(ggml_backend_cpu_buffer_type());
520
0
    }
521
522
0
    std::vector<ngl_t> ngl_per_device(nd);
523
0
    std::vector<int64_t> mem = get_memory_for_layers(__func__, ngl_per_device, overflow_bufts);
524
525
    // optimize the number of layers per device using the method of false position:
526
    //   - ngl_per_device has 0 layers for each device, lower bound
527
    //   - try a "high" configuration where a device is given all unassigned layers
528
    //   - interpolate the memory use / layer between low and high linearly to get a guess where it meets our target
529
    //   - check memory use of our guess, replace either the low or high bound
530
    //   - once we only have a difference of a single layer, stop and return the lower bound that just barely still fits
531
    //   - the last device has the output layer, which cannot be a partial layer
532
0
    if (hp_nex == 0) {
533
0
        LLAMA_LOG_INFO("%s: filling dense layers back-to-front:\n", __func__);
534
0
    } else {
535
0
        LLAMA_LOG_INFO("%s: filling dense-only layers back-to-front:\n", __func__);
536
0
    }
537
0
    for (int id = nd - 1; id >= 0; id--) {
538
0
        uint32_t n_unassigned = hp_ngl + 1;
539
0
        for (size_t jd = id + 1; jd < nd; ++jd) {
540
0
            assert(n_unassigned >= ngl_per_device[jd].n_layer);
541
0
            n_unassigned -= ngl_per_device[jd].n_layer;
542
0
        }
543
544
0
        std::vector<ngl_t> ngl_per_device_high = ngl_per_device;
545
0
        ngl_per_device_high[id].n_layer = n_unassigned;
546
0
        if (hp_nex > 0) {
547
0
            ngl_per_device_high[id].n_part = size_t(id) < nd - 1 ? ngl_per_device_high[id].n_layer : ngl_per_device_high[id].n_layer - 1;
548
0
        }
549
0
        if (ngl_per_device_high[id].n_layer > 0) {
550
0
            std::vector<int64_t> mem_high = get_memory_for_layers(__func__, ngl_per_device_high, overflow_bufts);
551
0
            if (mem_high[id] > targets[id]) {
552
0
                assert(ngl_per_device_high[id].n_layer > ngl_per_device[id].n_layer);
553
0
                uint32_t delta = ngl_per_device_high[id].n_layer - ngl_per_device[id].n_layer;
554
0
                LLAMA_LOG_DEBUG("%s: start filling device %" PRIu32 ", delta=%" PRIu32 "\n", __func__, id, delta);
555
0
                while (delta > 1) {
556
0
                    uint32_t step_size = int64_t(delta) * (targets[id] - mem[id]) / (mem_high[id] - mem[id]);
557
0
                    step_size = std::max(step_size, uint32_t(1));
558
0
                    step_size = std::min(step_size, delta - 1);
559
560
0
                    std::vector<ngl_t> ngl_per_device_test = ngl_per_device;
561
0
                    ngl_per_device_test[id].n_layer += step_size;
562
0
                    if (hp_nex) {
563
0
                        ngl_per_device_test[id].n_part += size_t(id) == nd - 1 && ngl_per_device_test[id].n_part == 0 ?
564
0
                            step_size - 1 : step_size; // the first layer is the output layer which must always be full
565
0
                    }
566
0
                    const std::vector<int64_t> mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts);
567
568
0
                    if (mem_test[id] <= targets[id]) {
569
0
                        ngl_per_device = ngl_per_device_test;
570
0
                        mem            = mem_test;
571
0
                        LLAMA_LOG_DEBUG("%s: set ngl_per_device[%d].n_layer=%" PRIu32 "\n", __func__, id, ngl_per_device[id].n_layer);
572
0
                    } else {
573
0
                        ngl_per_device_high = ngl_per_device_test;
574
0
                        mem_high            = mem_test;
575
0
                        LLAMA_LOG_DEBUG("%s: set ngl_per_device_high[%d].n_layer=%" PRIu32 "\n", __func__, id, ngl_per_device_high[id].n_layer);
576
0
                    }
577
0
                    delta = ngl_per_device_high[id].n_layer - ngl_per_device[id].n_layer;
578
0
                }
579
0
            } else {
580
0
                assert(ngl_per_device_high[id].n_layer == n_unassigned);
581
0
                ngl_per_device = ngl_per_device_high;
582
0
                mem            = mem_high;
583
0
                LLAMA_LOG_DEBUG("%s: set ngl_per_device[%d].n_layer=%" PRIu32 "\n", __func__, id, ngl_per_device[id].n_layer);
584
0
            }
585
0
        }
586
587
0
        const int64_t projected_margin = dmds_full[id].free - mem[id];
588
0
        LLAMA_LOG_INFO(
589
0
            "%s:   - %s: %2" PRIu32 " layers, %6" PRId64 " MiB used, %6" PRId64 " MiB free\n",
590
0
            __func__, dev_names[id].c_str(), ngl_per_device[id].n_layer, mem[id]/MiB, projected_margin/MiB);
591
0
    }
592
0
    if (hp_nex == 0 || global_surplus_cpu_moe <= 0) {
593
0
        set_ngl_tensor_split_tbo(ngl_per_device, overflow_bufts, *mparams);
594
0
        return;
595
0
    }
596
597
    // step 4: for a MoE model where all dense tensors fit,
598
    //     convert the dense-only layers in the back to full layers in the front until all devices are full
599
    // essentially the same procedure as for the dense-only layers except front-to-back
600
    // also, try fitting at least part of one more layer to reduce waste for "small" GPUs with e.g. 24 GiB VRAM
601
602
0
    size_t id_dense_start = nd;
603
0
    for (int id = nd - 1; id >= 0; id--) {
604
0
        if (ngl_per_device[id].n_layer > 0) {
605
0
            id_dense_start = id;
606
0
            continue;
607
0
        }
608
0
        break;
609
0
    }
610
0
    assert(id_dense_start < nd);
611
612
0
    LLAMA_LOG_INFO("%s: converting dense-only layers to full layers and filling them front-to-back with overflow to next device/system memory:\n", __func__);
613
0
    for (size_t id = 0; id <= id_dense_start && id_dense_start < nd; id++) {
614
0
        std::vector<ngl_t> ngl_per_device_high = ngl_per_device;
615
0
        for (size_t jd = id_dense_start; jd < nd; jd++) {
616
0
            const uint32_t n_layer_move = jd < nd - 1 ? ngl_per_device_high[jd].n_layer : ngl_per_device_high[jd].n_layer - 1;
617
0
            ngl_per_device_high[id].n_layer += n_layer_move;
618
0
            ngl_per_device_high[jd].n_layer -= n_layer_move;
619
0
            ngl_per_device_high[jd].n_part = 0;
620
0
        }
621
0
        size_t id_dense_start_high = nd - 1;
622
0
        std::vector<int64_t> mem_high = get_memory_for_layers(__func__, ngl_per_device_high, overflow_bufts);
623
624
0
        if (mem_high[id] > targets[id]) {
625
0
            assert(ngl_per_device_high[id].n_full() >= ngl_per_device[id].n_full());
626
0
            uint32_t delta = ngl_per_device_high[id].n_full() - ngl_per_device[id].n_full();
627
0
            while (delta > 1) {
628
0
                uint32_t step_size = int64_t(delta) * (targets[id] - mem[id]) / (mem_high[id] - mem[id]);
629
0
                step_size = std::max(step_size, uint32_t(1));
630
0
                step_size = std::min(step_size, delta - 1);
631
632
0
                std::vector<ngl_t> ngl_per_device_test = ngl_per_device;
633
0
                size_t id_dense_start_test = id_dense_start;
634
0
                uint32_t n_converted_test = 0;
635
0
                for (;id_dense_start_test < nd; id_dense_start_test++) {
636
0
                    const uint32_t n_convert_jd = std::min(step_size - n_converted_test, ngl_per_device_test[id_dense_start_test].n_part);
637
0
                    ngl_per_device_test[id_dense_start_test].n_layer -= n_convert_jd;
638
0
                    ngl_per_device_test[id_dense_start_test].n_part -= n_convert_jd;
639
0
                    ngl_per_device_test[id].n_layer += n_convert_jd;
640
0
                    n_converted_test += n_convert_jd;
641
642
0
                    if (ngl_per_device_test[id_dense_start_test].n_part > 0) {
643
0
                        break;
644
0
                    }
645
0
                }
646
0
                const std::vector<int64_t> mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts);
647
648
0
                if (mem_test[id] <= targets[id]) {
649
0
                    ngl_per_device = ngl_per_device_test;
650
0
                    mem            = mem_test;
651
0
                    id_dense_start = id_dense_start_test;
652
0
                    LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part)=(%" PRIu32 ", %" PRIu32 "), id_dense_start=%zu\n",
653
0
                        __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start);
654
0
                } else {
655
0
                    ngl_per_device_high = ngl_per_device_test;
656
0
                    mem_high            = mem_test;
657
0
                    id_dense_start_high = id_dense_start_test;
658
0
                    LLAMA_LOG_DEBUG("%s: set ngl_per_device_high[%zu].(n_layer, n_part)=(%" PRIu32 ", %" PRIu32 "), id_dense_start_high=%zu\n",
659
0
                        __func__, id, ngl_per_device_high[id].n_layer, ngl_per_device_high[id].n_part, id_dense_start_high);
660
0
                }
661
0
                assert(ngl_per_device_high[id].n_full() >= ngl_per_device[id].n_full());
662
0
                delta = ngl_per_device_high[id].n_full() - ngl_per_device[id].n_full();
663
0
            }
664
0
        } else {
665
0
            ngl_per_device = ngl_per_device_high;
666
0
            mem            = mem_high;
667
0
            id_dense_start = id_dense_start_high;
668
0
            LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part)=(%" PRIu32 ", %" PRIu32 "), id_dense_start=%zu\n",
669
0
                __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start);
670
0
        }
671
672
        // try to fit at least part of one more layer
673
0
        if (ngl_per_device[id_dense_start].n_layer > (id < nd - 1 ? 0 : 1)) {
674
0
            std::vector<ngl_t> ngl_per_device_test = ngl_per_device;
675
0
            size_t id_dense_start_test = id_dense_start;
676
0
            ngl_per_device_test[id_dense_start_test].n_layer--;
677
0
            ngl_per_device_test[id_dense_start_test].n_part--;
678
0
            ngl_per_device_test[id].n_layer++;
679
0
            ngl_per_device_test[id].n_part++;
680
0
            if (ngl_per_device_test[id_dense_start_test].n_part == 0) {
681
0
                id_dense_start_test++;
682
0
            }
683
0
            ngl_per_device_test[id].overflow_type = LAYER_FRACTION_UP;
684
0
            std::vector<ggml_backend_buffer_type_t> overflow_bufts_test = overflow_bufts;
685
0
            if (id < nd - 1) {
686
0
                overflow_bufts_test[id] = ggml_backend_dev_buffer_type(devs[id + 1]);
687
0
            }
688
0
            LLAMA_LOG_DEBUG("%s: trying to fit one extra layer with overflow_type=LAYER_FRACTION_UP\n", __func__);
689
0
            std::vector<int64_t> mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts_test);
690
0
            if (mem_test[id] < targets[id] && (id + 1 == nd || mem_test[id + 1] < targets[id + 1])) {
691
0
                ngl_per_device = ngl_per_device_test;
692
0
                overflow_bufts = overflow_bufts_test;
693
0
                mem            = mem_test;
694
0
                id_dense_start = id_dense_start_test;
695
0
                LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part, overflow_type)=(%" PRIu32 ", %" PRIu32 ", UP), id_dense_start=%zu\n",
696
0
                    __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start);
697
698
0
                ngl_per_device_test[id].overflow_type = LAYER_FRACTION_GATE;
699
0
                LLAMA_LOG_DEBUG("%s: trying to fit one extra layer with overflow_type=LAYER_FRACTION_GATE\n", __func__);
700
0
                mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts_test);
701
0
                if (mem_test[id] < targets[id] && (id + 1 == nd || mem_test[id + 1] < targets[id + 1])) {
702
0
                    ngl_per_device = ngl_per_device_test;
703
0
                    overflow_bufts = overflow_bufts_test;
704
0
                    mem            = mem_test;
705
0
                    id_dense_start = id_dense_start_test;
706
0
                    LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part, overflow_type)=(%" PRIu32 ", %" PRIu32 ", GATE), id_dense_start=%zu\n",
707
0
                        __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start);
708
0
                }
709
0
            } else {
710
0
                ngl_per_device_test[id].overflow_type = LAYER_FRACTION_ATTN;
711
0
                LLAMA_LOG_DEBUG("%s: trying to fit one extra layer with overflow_type=LAYER_FRACTION_ATTN\n", __func__);
712
0
                mem_test = get_memory_for_layers(__func__, ngl_per_device_test, overflow_bufts_test);
713
0
                if (mem_test[id] < targets[id] && (id + 1 == nd || mem_test[id + 1] < targets[id + 1])) {
714
0
                    ngl_per_device = ngl_per_device_test;
715
0
                    overflow_bufts = overflow_bufts_test;
716
0
                    mem            = mem_test;
717
0
                    id_dense_start = id_dense_start_test;
718
0
                    LLAMA_LOG_DEBUG("%s: set ngl_per_device[%zu].(n_layer, n_part, overflow_type)=(%" PRIu32 ", %" PRIu32 ", ATTN), id_dense_start=%zu\n",
719
0
                        __func__, id, ngl_per_device[id].n_layer, ngl_per_device[id].n_part, id_dense_start);
720
0
                }
721
0
            }
722
0
        }
723
724
0
        const int64_t projected_margin = dmds_full[id].free - mem[id];
725
0
        LLAMA_LOG_INFO(
726
0
            "%s:   - %s: %2" PRIu32 " layers (%2" PRIu32 " overflowing), %6" PRId64 " MiB used, %6" PRId64 " MiB free\n",
727
0
            __func__, dev_names[id].c_str(), ngl_per_device[id].n_layer, ngl_per_device[id].n_part, mem[id]/MiB, projected_margin/MiB);
728
0
    }
729
730
    // print info for devices that were not changed during the conversion from dense only to full layers:
731
0
    for (size_t id = id_dense_start + 1; id < nd; id++) {
732
0
        const int64_t projected_margin = dmds_full[id].free - mem[id];
733
0
        LLAMA_LOG_INFO(
734
0
            "%s:   - %s: %2" PRIu32 " layers (%2" PRIu32 " overflowing), %6" PRId64 " MiB used, %6" PRId64 " MiB free\n",
735
0
            __func__, dev_names[id].c_str(), ngl_per_device[id].n_layer, ngl_per_device[id].n_part, mem[id]/MiB, projected_margin/MiB);
736
0
    }
737
738
0
    set_ngl_tensor_split_tbo(ngl_per_device, overflow_bufts, *mparams);
739
0
}
740
741
enum llama_params_fit_status llama_params_fit(
742
        const char * path_model, struct llama_model_params * mparams, struct llama_context_params * cparams,
743
        float * tensor_split, struct llama_model_tensor_buft_override * tensor_buft_overrides,
744
0
        size_t * margins, uint32_t n_ctx_min, enum ggml_log_level log_level) {
745
0
    const int64_t t0_us = llama_time_us();
746
0
    llama_params_fit_status status = LLAMA_PARAMS_FIT_STATUS_SUCCESS;
747
0
    try {
748
0
        llama_params_fit_impl(path_model, mparams, cparams, tensor_split, tensor_buft_overrides, margins, n_ctx_min, log_level);
749
0
        LLAMA_LOG_INFO("%s: successfully fit params to free device memory\n", __func__);
750
0
    } catch (const llama_params_fit_exception & e) {
751
0
        LLAMA_LOG_WARN("%s: failed to fit params to free device memory: %s\n", __func__, e.what());
752
0
        status = LLAMA_PARAMS_FIT_STATUS_FAILURE;
753
0
    } catch (const std::runtime_error & e) {
754
0
        LLAMA_LOG_ERROR("%s: encountered an error while trying to fit params to free device memory: %s\n", __func__, e.what());
755
0
        status = LLAMA_PARAMS_FIT_STATUS_ERROR;
756
0
    }
757
0
    const int64_t t1_us = llama_time_us();
758
0
    LLAMA_LOG_INFO("%s: fitting params to free memory took %.2f seconds\n", __func__, (t1_us - t0_us) * 1e-6);
759
0
    return status;
760
0
}
761
762
0
struct llama_sampler_chain_params llama_sampler_chain_default_params() {
763
0
    struct llama_sampler_chain_params result = {
764
0
        /*.no_perf =*/ true,
765
0
    };
766
767
0
    return result;
768
0
}
769
770
0
size_t llama_max_devices(void) {
771
0
    return 16;
772
0
}
773
774
0
size_t llama_max_tensor_buft_overrides() {
775
0
    return 4096;
776
0
}
777
778
0
bool llama_supports_mmap(void) {
779
0
    return llama_mmap::SUPPORTED;
780
0
}
781
782
0
bool llama_supports_mlock(void) {
783
0
    return llama_mlock::SUPPORTED;
784
0
}
785
786
0
bool llama_supports_gpu_offload(void) {
787
0
    return ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU) != nullptr ||
788
0
           ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU) != nullptr ||
789
0
           llama_supports_rpc();
790
0
}
791
792
0
bool llama_supports_rpc(void) {
793
0
    return ggml_backend_reg_by_name("RPC") != nullptr;
794
0
}
795
796
4
void llama_backend_init(void) {
797
4
    ggml_time_init();
798
799
    // needed to initialize f16 tables
800
4
    {
801
4
        struct ggml_init_params params = { 0, NULL, false };
802
4
        struct ggml_context * ctx = ggml_init(params);
803
4
        ggml_free(ctx);
804
4
    }
805
4
}
806
807
0
void llama_numa_init(enum ggml_numa_strategy numa) {
808
0
    if (numa != GGML_NUMA_STRATEGY_DISABLED) {
809
0
        auto * dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
810
0
        GGML_ASSERT(dev && "CPU backend is not loaded");
811
0
        auto * reg = ggml_backend_dev_backend_reg(dev);
812
0
        auto * numa_init_fn = (decltype(ggml_numa_init) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_numa_init");
813
0
        if (numa_init_fn) {
814
0
            numa_init_fn(numa);
815
0
        }
816
0
    }
817
0
}
818
819
2
void llama_backend_free(void) {
820
2
    ggml_quantize_free();
821
2
}
822
823
0
int64_t llama_time_us(void) {
824
0
    return ggml_time_us();
825
0
}
826
827
// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
828
0
static int llama_model_load(const std::string & fname, std::vector<std::string> & splits, llama_model & model, llama_model_params & params) {
829
    // loading time will be recalculated after the first eval, so
830
    // we take page faults deferred by mmap() into consideration
831
0
    model.t_load_us = 0;
832
0
    time_meas tm(model.t_load_us);
833
834
0
    model.t_start_us = tm.t_start_us;
835
836
0
    try {
837
0
        llama_model_loader ml(fname, splits, params.use_mmap, params.use_direct_io, params.check_tensors, params.no_alloc, params.kv_overrides, params.tensor_buft_overrides);
838
839
0
        ml.print_info();
840
841
0
        model.hparams.vocab_only = params.vocab_only;
842
0
        model.hparams.no_alloc   = params.no_alloc;
843
844
0
        try {
845
0
            model.load_arch(ml);
846
0
        } catch(const std::exception & e) {
847
0
            throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
848
0
        }
849
0
        try {
850
0
            model.load_hparams(ml);
851
0
        } catch(const std::exception & e) {
852
0
            throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
853
0
        }
854
0
        if (model.arch == LLM_ARCH_CLIP) {
855
0
            throw std::runtime_error("CLIP cannot be used as main model, use it with --mmproj instead");
856
0
        }
857
0
        try {
858
0
            model.load_vocab(ml);
859
0
        } catch(const std::exception & e) {
860
0
            throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
861
0
        }
862
863
0
        model.load_stats(ml);
864
0
        model.print_info();
865
866
0
        if (params.vocab_only) {
867
0
            LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
868
0
            return 0;
869
0
        }
870
871
0
        if (!model.load_tensors(ml)) {
872
0
            return -2;
873
0
        }
874
0
    } catch (const std::exception & err) {
875
0
        LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
876
0
        return -1;
877
0
    }
878
879
0
    return 0;
880
0
}
881
882
static struct llama_model * llama_model_load_from_file_impl(
883
        const std::string & path_model,
884
        std::vector<std::string> & splits,
885
2
        struct llama_model_params params) {
886
2
    ggml_time_init();
887
888
2
    if (!params.vocab_only && ggml_backend_reg_count() == 0) {
889
0
        LLAMA_LOG_ERROR("%s: no backends are loaded. hint: use ggml_backend_load() or ggml_backend_load_all() to load a backend before calling this function\n", __func__);
890
0
        return nullptr;
891
0
    }
892
893
2
    unsigned cur_percentage = 0;
894
2
    if (params.progress_callback == NULL) {
895
0
        params.progress_callback_user_data = &cur_percentage;
896
0
        params.progress_callback = [](float progress, void * ctx) {
897
0
            unsigned * cur_percentage_p = (unsigned *) ctx;
898
0
            unsigned percentage = (unsigned) (100 * progress);
899
0
            while (percentage > *cur_percentage_p) {
900
0
                *cur_percentage_p = percentage;
901
0
                LLAMA_LOG_CONT(".");
902
0
                if (percentage >= 100) {
903
0
                    LLAMA_LOG_CONT("\n");
904
0
                }
905
0
            }
906
0
            return true;
907
0
        };
908
0
    }
909
910
2
    llama_model * model = new llama_model(params);
911
912
    // create list of devices to use with this model
913
2
    if (params.devices) {
914
0
        for (ggml_backend_dev_t * dev = params.devices; *dev; ++dev) {
915
0
            model->devices.push_back(*dev);
916
0
        }
917
2
    } else {
918
        // default device selection
919
920
        // build list of available devices
921
2
        std::vector<ggml_backend_dev_t> gpus;
922
2
        std::vector<ggml_backend_dev_t> igpus;
923
2
        std::vector<ggml_backend_dev_t> rpc_servers;
924
925
4
        for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
926
2
            ggml_backend_dev_t dev = ggml_backend_dev_get(i);
927
2
            switch (ggml_backend_dev_type(dev)) {
928
2
                case GGML_BACKEND_DEVICE_TYPE_CPU:
929
2
                case GGML_BACKEND_DEVICE_TYPE_ACCEL:
930
                    // skip CPU backends since they are handled separately
931
2
                    break;
932
933
0
                case GGML_BACKEND_DEVICE_TYPE_GPU: {
934
0
                    ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
935
0
                    if (ggml_backend_reg_name(reg) == std::string("RPC")) {
936
0
                        rpc_servers.push_back(dev);
937
0
                    } else {
938
                        // check if there is already a GPU with the same device id
939
0
                        ggml_backend_dev_props props;
940
0
                        ggml_backend_dev_get_props(dev, &props);
941
0
                        auto it = std::find_if(gpus.begin(), gpus.end(), [&props](ggml_backend_dev_t d) {
942
0
                            ggml_backend_dev_props d_props;
943
0
                            ggml_backend_dev_get_props(d, &d_props);
944
0
                            if (props.device_id && d_props.device_id) {
945
0
                                return strcmp(props.device_id, d_props.device_id) == 0;
946
0
                            }
947
0
                            return false;
948
0
                        });
949
950
0
                        if (it != gpus.end()) {
951
0
                            LLAMA_LOG_INFO("%s: skipping device %s (%s) with id %s - already using device %s (%s) with the same id\n",
952
0
                                    __func__,
953
0
                                    ggml_backend_dev_name(dev), ggml_backend_dev_description(dev),
954
0
                                    props.device_id ? props.device_id : "unknown id",
955
0
                                    ggml_backend_dev_name(*it), ggml_backend_dev_description(*it));
956
0
                        } else {
957
0
                            gpus.push_back(dev);
958
0
                        }
959
0
                    }
960
0
                    break;
961
2
                }
962
963
0
                case GGML_BACKEND_DEVICE_TYPE_IGPU:
964
0
                    igpus.push_back(dev);
965
0
                    break;
966
2
            }
967
2
        }
968
969
        // add RPC servers at the front of the list to minimize network transfers
970
2
        model->devices.insert(model->devices.begin(), rpc_servers.begin(), rpc_servers.end());
971
972
        // add GPUs
973
2
        model->devices.insert(model->devices.end(), gpus.begin(), gpus.end());
974
975
        // add integrated GPUs only if no other devices were found
976
2
        if (model->devices.empty()) {
977
2
            model->devices.insert(model->devices.end(), igpus.begin(), igpus.end());
978
2
        }
979
2
    }
980
981
    // if using single GPU mode, remove all except the main GPU
982
2
    if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
983
2
        if (params.main_gpu < 0) {
984
0
            model->devices.clear();
985
2
        } else {
986
2
            if (params.main_gpu >= (int)model->devices.size()) {
987
2
                LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %zu)\n", __func__, params.main_gpu, model->devices.size());
988
2
                llama_model_free(model);
989
2
                return nullptr;
990
2
            }
991
0
            ggml_backend_dev_t main_gpu = model->devices[params.main_gpu];
992
0
            model->devices.clear();
993
0
            model->devices.push_back(main_gpu);
994
0
        }
995
2
    }
996
997
0
    for (auto * dev : model->devices) {
998
0
        ggml_backend_dev_props props;
999
0
        ggml_backend_dev_get_props(dev, &props);
1000
0
        LLAMA_LOG_INFO("%s: using device %s (%s) (%s) - %zu MiB free\n", __func__,
1001
0
                ggml_backend_dev_name(dev), ggml_backend_dev_description(dev),
1002
0
                props.device_id ? props.device_id : "unknown id",
1003
0
                props.memory_free/1024/1024);
1004
0
    }
1005
1006
0
    const int status = llama_model_load(path_model, splits, *model, params);
1007
0
    GGML_ASSERT(status <= 0);
1008
0
    if (status < 0) {
1009
0
        if (status == -1) {
1010
0
            LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
1011
0
        } else if (status == -2) {
1012
0
            LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
1013
0
        }
1014
1015
0
        llama_model_free(model);
1016
0
        return nullptr;
1017
0
    }
1018
1019
0
    return model;
1020
0
}
1021
1022
// deprecated
1023
struct llama_model * llama_load_model_from_file(
1024
        const char * path_model,
1025
2
        struct llama_model_params params) {
1026
2
    return llama_model_load_from_file(path_model, params);
1027
2
}
1028
1029
struct llama_model * llama_model_load_from_file(
1030
        const char * path_model,
1031
2
        struct llama_model_params params) {
1032
2
    std::vector<std::string> splits = {};
1033
2
    return llama_model_load_from_file_impl(path_model, splits, params);
1034
2
}
1035
1036
struct llama_model * llama_model_load_from_splits(
1037
        const char ** paths,
1038
        size_t n_paths,
1039
0
        struct llama_model_params params) {
1040
0
    std::vector<std::string> splits;
1041
0
    if (n_paths == 0) {
1042
0
        LLAMA_LOG_ERROR("%s: list of splits is empty\n", __func__);
1043
0
        return nullptr;
1044
0
    }
1045
0
    splits.reserve(n_paths);
1046
0
    for (size_t i = 0; i < n_paths; ++i) {
1047
0
        splits.push_back(paths[i]);
1048
0
    }
1049
0
    return llama_model_load_from_file_impl(splits.front(), splits, params);
1050
0
}
1051
1052
0
void llama_model_save_to_file(const struct llama_model * model, const char * path_model) {
1053
0
    llama_model_saver ms(*model);
1054
0
    ms.add_kv_from_model();
1055
0
    ms.add_tensors_from_model();
1056
0
    ms.save(path_model);
1057
0
}
1058
1059
//
1060
// chat templates
1061
//
1062
1063
int32_t llama_chat_apply_template(
1064
                              const char * tmpl,
1065
         const struct llama_chat_message * chat,
1066
                                  size_t   n_msg,
1067
                                    bool   add_ass,
1068
                                    char * buf,
1069
0
                                 int32_t   length) {
1070
0
    const std::string curr_tmpl(tmpl == nullptr ? "chatml" : tmpl);
1071
1072
    // format the chat to string
1073
0
    std::vector<const llama_chat_message *> chat_vec;
1074
0
    chat_vec.resize(n_msg);
1075
0
    for (size_t i = 0; i < n_msg; i++) {
1076
0
        chat_vec[i] = &chat[i];
1077
0
    }
1078
1079
0
    std::string formatted_chat;
1080
0
    llm_chat_template detected_tmpl = llm_chat_detect_template(curr_tmpl);
1081
0
    if (detected_tmpl == LLM_CHAT_TEMPLATE_UNKNOWN) {
1082
0
        return -1;
1083
0
    }
1084
0
    int32_t res = llm_chat_apply_template(detected_tmpl, chat_vec, formatted_chat, add_ass);
1085
0
    if (res < 0) {
1086
0
        return res;
1087
0
    }
1088
0
    if (buf && length > 0) {
1089
0
        strncpy(buf, formatted_chat.c_str(), length);
1090
0
    }
1091
0
    return res;
1092
0
}
1093
1094
//
1095
// model split
1096
//
1097
1098
int32_t llama_split_path(
1099
    char * split_path,
1100
    size_t maxlen,
1101
    const char * path_prefix,
1102
    int32_t split_no,
1103
0
    int32_t split_count) {
1104
1105
0
    static const char * const SPLIT_PATH_FORMAT = "%s-%05d-of-%05d.gguf";
1106
1107
0
    const int written = snprintf(
1108
0
        split_path,
1109
0
        maxlen,
1110
0
        SPLIT_PATH_FORMAT,
1111
0
        path_prefix,
1112
0
        split_no + 1,
1113
0
        split_count
1114
0
    );
1115
1116
0
    if (written < 0 || (size_t) written >= maxlen) {
1117
0
        return 0;
1118
0
    }
1119
1120
0
    return (int32_t) written;
1121
0
}
1122
1123
int32_t llama_split_prefix(
1124
    char * split_prefix,
1125
    size_t maxlen,
1126
    const char * split_path,
1127
    int32_t split_no,
1128
0
    int32_t split_count) {
1129
1130
0
    const std::string str_split_path(split_path);
1131
1132
0
    char postfix[32];
1133
0
    snprintf(postfix, sizeof(postfix), "-%05d-of-%05d.gguf", split_no + 1, split_count);
1134
1135
0
    const std::string str_postfix(postfix);
1136
0
    if (str_split_path.size() <= str_postfix.size()) {
1137
0
        return 0;
1138
0
    }
1139
1140
0
    const size_t size_prefix = str_split_path.size() - str_postfix.size();
1141
1142
0
    if (str_split_path.compare(size_prefix, std::string::npos, str_postfix) == 0) {
1143
0
        const size_t copy_len = std::min(size_prefix + 1, maxlen);
1144
0
        snprintf(split_prefix, copy_len, "%s", split_path);
1145
1146
0
        return (int32_t) size_prefix;
1147
0
    }
1148
1149
0
    return 0;
1150
0
}
1151
1152
0
const char * llama_print_system_info(void) {
1153
0
    static std::string s;
1154
0
    s.clear(); // Clear the string, since it's static, otherwise it will accumulate data from previous calls.
1155
1156
0
    for (size_t i = 0; i < ggml_backend_reg_count(); i++) {
1157
0
        auto * reg = ggml_backend_reg_get(i);
1158
0
        auto * get_features_fn = (ggml_backend_get_features_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_get_features");
1159
0
        if (get_features_fn) {
1160
0
            ggml_backend_feature * features = get_features_fn(reg);
1161
0
            s += ggml_backend_reg_name(reg);
1162
0
            s += " : ";
1163
0
            for (; features->name; features++) {
1164
0
                s += features->name;
1165
0
                s += " = ";
1166
0
                s += features->value;
1167
0
                s += " | ";
1168
0
            }
1169
0
        }
1170
0
    }
1171
1172
0
    return s.c_str();
1173
0
}
1174