Coverage Report

Created: 2026-01-18 06:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-model.cpp
Line
Count
Source
1
#include "llama-model.h"
2
3
#include "llama-impl.h"
4
#include "llama-mmap.h"
5
#include "llama-cparams.h"
6
#include "llama-model-loader.h"
7
8
#include "llama-kv-cache.h"
9
#include "llama-kv-cache-iswa.h"
10
#include "llama-memory-hybrid.h"
11
#include "llama-memory-recurrent.h"
12
13
#include "ggml-cpp.h"
14
15
#include "models/models.h"
16
17
#include <algorithm>
18
#include <cassert>
19
#include <cfloat>
20
#include <cstring>
21
#include <cmath>
22
#include <functional>
23
#include <map>
24
#include <regex>
25
#include <sstream>
26
#include <stdexcept>
27
28
0
const char * llm_type_name(llm_type type) {
29
0
    switch (type) {
30
0
        case LLM_TYPE_14M:           return "14M";
31
0
        case LLM_TYPE_17M:           return "17M";
32
0
        case LLM_TYPE_22M:           return "22M";
33
0
        case LLM_TYPE_33M:           return "33M";
34
0
        case LLM_TYPE_47M:           return "47M";
35
0
        case LLM_TYPE_60M:           return "60M";
36
0
        case LLM_TYPE_70M:           return "70M";
37
0
        case LLM_TYPE_80M:           return "80M";
38
0
        case LLM_TYPE_109M:          return "109M";
39
0
        case LLM_TYPE_137M:          return "137M";
40
0
        case LLM_TYPE_140M:          return "140M";
41
0
        case LLM_TYPE_149M:          return "149M";
42
0
        case LLM_TYPE_160M:          return "160M";
43
0
        case LLM_TYPE_190M:          return "190M";
44
0
        case LLM_TYPE_220M:          return "220M";
45
0
        case LLM_TYPE_250M:          return "250M";
46
0
        case LLM_TYPE_256M:          return "256M";
47
0
        case LLM_TYPE_270M:          return "270M";
48
0
        case LLM_TYPE_335M:          return "335M";
49
0
        case LLM_TYPE_350M:          return "350M";
50
0
        case LLM_TYPE_360M:          return "360M";
51
0
        case LLM_TYPE_395M:          return "395M";
52
0
        case LLM_TYPE_410M:          return "410M";
53
0
        case LLM_TYPE_450M:          return "450M";
54
0
        case LLM_TYPE_475M:          return "475M";
55
0
        case LLM_TYPE_558M:          return "558M";
56
0
        case LLM_TYPE_700M:          return "700M";
57
0
        case LLM_TYPE_770M:          return "770M";
58
0
        case LLM_TYPE_780M:          return "780M";
59
0
        case LLM_TYPE_950M:          return "950M";
60
0
        case LLM_TYPE_0_3B:          return "0.3B";
61
0
        case LLM_TYPE_0_5B:          return "0.5B";
62
0
        case LLM_TYPE_0_6B:          return "0.6B";
63
0
        case LLM_TYPE_1B:            return "1B";
64
0
        case LLM_TYPE_1_2B:          return "1.2B";
65
0
        case LLM_TYPE_1_3B:          return "1.3B";
66
0
        case LLM_TYPE_1_4B:          return "1.4B";
67
0
        case LLM_TYPE_1_5B:          return "1.5B";
68
0
        case LLM_TYPE_1_6B:          return "1.6B";
69
0
        case LLM_TYPE_1_7B:          return "1.7B";
70
0
        case LLM_TYPE_1_8B:          return "1.8B";
71
0
        case LLM_TYPE_2B:            return "2B";
72
0
        case LLM_TYPE_2_6B:          return "2.6B";
73
0
        case LLM_TYPE_2_8B:          return "2.8B";
74
0
        case LLM_TYPE_2_9B:          return "2.9B";
75
0
        case LLM_TYPE_3B:            return "3B";
76
0
        case LLM_TYPE_4B:            return "4B";
77
0
        case LLM_TYPE_6B:            return "6B";
78
0
        case LLM_TYPE_6_9B:          return "6.9B";
79
0
        case LLM_TYPE_7B:            return "7B";
80
0
        case LLM_TYPE_8B:            return "8B";
81
0
        case LLM_TYPE_9B:            return "9B";
82
0
        case LLM_TYPE_11B:           return "11B";
83
0
        case LLM_TYPE_12B:           return "12B";
84
0
        case LLM_TYPE_13B:           return "13B";
85
0
        case LLM_TYPE_14B:           return "14B";
86
0
        case LLM_TYPE_15B:           return "15B";
87
0
        case LLM_TYPE_16B:           return "16B";
88
0
        case LLM_TYPE_20B:           return "20B";
89
0
        case LLM_TYPE_26B:           return "26B";
90
0
        case LLM_TYPE_27B:           return "27B";
91
0
        case LLM_TYPE_30B:           return "30B";
92
0
        case LLM_TYPE_32B:           return "32B";
93
0
        case LLM_TYPE_34B:           return "34B";
94
0
        case LLM_TYPE_35B:           return "35B";
95
0
        case LLM_TYPE_36B:           return "36B";
96
0
        case LLM_TYPE_40B:           return "40B";
97
0
        case LLM_TYPE_65B:           return "65B";
98
0
        case LLM_TYPE_70B:           return "70B";
99
0
        case LLM_TYPE_120B:          return "120B";
100
0
        case LLM_TYPE_142B:          return "142B";
101
0
        case LLM_TYPE_236B:          return "236B";
102
0
        case LLM_TYPE_290B:          return "290B";
103
0
        case LLM_TYPE_314B:          return "314B";
104
0
        case LLM_TYPE_405B:          return "405B";
105
0
        case LLM_TYPE_671B:          return "671B";
106
0
        case LLM_TYPE_SMALL:         return "0.1B";
107
0
        case LLM_TYPE_MEDIUM:        return "0.4B";
108
0
        case LLM_TYPE_LARGE:         return "0.8B";
109
0
        case LLM_TYPE_XL:            return "1.5B";
110
0
        case LLM_TYPE_A1_7B:         return "A1.7B";
111
0
        case LLM_TYPE_A2_7B:         return "A2.7B";
112
0
        case LLM_TYPE_8x7B:          return "8x7B";
113
0
        case LLM_TYPE_8x22B:         return "8x22B";
114
0
        case LLM_TYPE_16x12B:        return "16x12B";
115
0
        case LLM_TYPE_16x3_8B:       return "16x3.8B";
116
0
        case LLM_TYPE_10B_128x3_66B: return "10B+128x3.66B";
117
0
        case LLM_TYPE_57B_A14B:      return "57B.A14B";
118
0
        case LLM_TYPE_17B_16E:       return "17Bx16E (Scout)";
119
0
        case LLM_TYPE_17B_128E:      return "17Bx128E (Maverick)";
120
0
        case LLM_TYPE_A13B:          return "A13B";
121
0
        case LLM_TYPE_7B_A1B:        return "7B.A1B";
122
0
        case LLM_TYPE_8B_A1B:        return "8B.A1B";
123
0
        case LLM_TYPE_16B_A1B:       return "16B.A1B";
124
0
        case LLM_TYPE_21B_A3B:       return "21B.A3B";
125
0
        case LLM_TYPE_30B_A3B:       return "30B.A3B";
126
0
        case LLM_TYPE_31B_A3_5B:     return "31B.A3.5B";
127
0
        case LLM_TYPE_80B_A3B:       return "80B.A3B";
128
0
        case LLM_TYPE_100B_A6B:      return "100B.A6B";
129
0
        case LLM_TYPE_102B_A12B:     return "102B.A12B";
130
0
        case LLM_TYPE_106B_A12B:     return "106B.A12B";
131
0
        case LLM_TYPE_230B_A10B:     return "230B.A10B";
132
0
        case LLM_TYPE_235B_A22B:     return "235B.A22B";
133
0
        case LLM_TYPE_300B_A47B:     return "300B.A47B";
134
0
        case LLM_TYPE_310B_A15B:     return "310B.A15B";
135
0
        case LLM_TYPE_355B_A32B:     return "355B.A32B";
136
0
        case LLM_TYPE_E2B:           return "E2B";
137
0
        case LLM_TYPE_E4B:           return "E4B";
138
0
        default:                     return "?B";
139
0
    }
140
0
}
141
142
0
static const char * llama_expert_gating_func_name(llama_expert_gating_func_type type) {
143
0
    switch (type) {
144
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: return "softmax";
145
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: return "sigmoid";
146
0
        default:                                    return "unknown";
147
0
    }
148
0
}
149
150
static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
151
    { LLAMA_ROPE_SCALING_TYPE_NONE,       "none"       },
152
    { LLAMA_ROPE_SCALING_TYPE_LINEAR,     "linear"     },
153
    { LLAMA_ROPE_SCALING_TYPE_YARN,       "yarn"       },
154
    { LLAMA_ROPE_SCALING_TYPE_LONGROPE,   "longrope"   },
155
};
156
157
0
std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type) {
158
0
    return LLAMA_ROPE_SCALING_TYPES.at(rope_scaling_type);
159
0
}
160
161
0
static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
162
0
    for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
163
0
        if (kv.second == name) {
164
0
            return (llama_rope_scaling_type) kv.first;
165
0
        }
166
0
    }
167
168
0
    return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
169
0
}
170
171
// checks if the weight tensor can be used with the specified buffer type and device
172
0
static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w, ggml_op op, ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev) {
173
0
    GGML_ASSERT(w != nullptr);
174
175
0
    if (op == GGML_OP_NONE) {
176
0
        return true;
177
0
    }
178
179
0
    ggml_init_params params = {
180
0
        /*.mem_size   =*/ ggml_tensor_overhead()*8,
181
0
        /*.mem_buffer =*/ NULL,
182
0
        /*.no_alloc   =*/ true,
183
0
    };
184
0
    ggml_context_ptr ctx_ptr { ggml_init(params) };
185
0
    if (!ctx_ptr) {
186
0
        throw std::runtime_error(format("failed to create ggml context"));
187
0
    }
188
0
    ggml_context * ctx = ctx_ptr.get();
189
190
0
    ggml_tensor * op_tensor = nullptr;
191
192
0
    switch (op) {
193
0
        case GGML_OP_GET_ROWS:
194
0
            {
195
0
                ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
196
0
                op_tensor = ggml_get_rows(ctx, w, b);
197
0
            } break;
198
0
        case GGML_OP_MUL_MAT:
199
0
            {
200
0
                ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], 512, w->ne[2], w->ne[3]);
201
0
                op_tensor = ggml_mul_mat(ctx, w, b);
202
0
            } break;
203
0
        case GGML_OP_MUL_MAT_ID:
204
0
            {
205
0
                int n_expert_used = hparams.n_expert_used;
206
0
                ggml_tensor * b = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512);
207
0
                ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512);
208
0
                op_tensor = ggml_mul_mat_id(ctx, w, b, ids);
209
0
            } break;
210
0
        case GGML_OP_ADD:
211
0
            {
212
0
                ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
213
0
                op_tensor = ggml_add(ctx, a, w);
214
0
            } break;
215
0
        case GGML_OP_ADD_ID:
216
0
            {
217
0
                int n_expert_used = hparams.n_expert_used;
218
0
                ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512);
219
0
                ggml_tensor * c = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512);
220
0
                op_tensor = ggml_add_id(ctx, a, w, c);
221
0
            } break;
222
0
        case GGML_OP_MUL:
223
0
            {
224
0
                ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
225
0
                op_tensor = ggml_mul(ctx, a, w);
226
0
            } break;
227
0
        case GGML_OP_DIV:
228
0
            {
229
0
                ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, w->ne[0]);
230
0
                op_tensor = ggml_div(ctx, a, w);
231
0
            } break;
232
0
        case GGML_OP_ROPE:
233
0
            {
234
0
                int n_embd_head = hparams.n_embd_head_v;
235
0
                int n_head = hparams.n_head();
236
0
                ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_embd_head, n_head, 512);
237
0
                ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
238
0
                op_tensor = ggml_rope_ext(
239
0
                    ctx, a, b, w,
240
0
                    0, 0, 0, 0, 0,
241
0
                    0, 0, 0, 0
242
0
                );
243
244
0
            } break;
245
0
        case GGML_OP_SSM_CONV:
246
0
            {
247
0
                const int64_t n_seq_tokens = 512;
248
0
                const int64_t n_seqs       = 3;
249
0
                ggml_tensor * conv_x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0] - 1 + n_seq_tokens, w->ne[1], n_seqs);
250
0
                op_tensor = ggml_ssm_conv(ctx, conv_x, w);
251
0
            } break;
252
0
        case GGML_OP_SSM_SCAN:
253
0
            {
254
                // w is ssm_a, which is used to distinguish Mamba-1 and Mamba-2
255
0
                const int64_t d_state      = w->ne[0] == 1 ? hparams.ssm_d_state : w->ne[0];
256
0
                const int64_t n_head       = w->ne[1];
257
0
                const int64_t head_dim     = hparams.ssm_d_inner / n_head;
258
0
                const int64_t n_group      = hparams.ssm_n_group ? hparams.ssm_n_group : 1;
259
0
                const int64_t n_seq_tokens = 512;
260
0
                const int64_t n_seqs       = 3;
261
0
                ggml_tensor * s   = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, head_dim, n_head, n_seqs);
262
0
                ggml_tensor * x   = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, head_dim, n_head, n_seq_tokens, n_seqs);
263
0
                ggml_tensor * dt  = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_head, n_seq_tokens, n_seqs);
264
0
                ggml_tensor * B   = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, n_group, n_seq_tokens, n_seqs);
265
0
                ggml_tensor * C   = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, n_group, n_seq_tokens, n_seqs);
266
0
                ggml_tensor * ids = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_seqs);
267
0
                op_tensor = ggml_ssm_scan(ctx, s, x, dt, w, B, C, ids);
268
0
            } break;
269
0
        case GGML_OP_RWKV_WKV6:
270
0
            {
271
                // FIXME
272
0
                const int64_t S = 123;
273
0
                const int64_t H = 123;
274
0
                const int64_t n_tokens = 123;
275
0
                const int64_t n_seqs = 123;
276
0
                ggml_tensor  * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
277
0
                ggml_tensor  * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
278
0
                ggml_tensor  * r = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
279
0
                ggml_tensor  * tf = w;
280
0
                ggml_tensor  * td = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
281
0
                ggml_tensor  * state = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, n_seqs, S, H);
282
0
                op_tensor = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, state);
283
0
            } break;
284
0
        case GGML_OP_IM2COL:
285
0
            {
286
0
                const int n_embd_inp = hparams.n_embd_inp();
287
0
                ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd_inp, w->ne[1], 1, 1);
288
0
                op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16);
289
0
            } break;
290
0
        case GGML_OP_SCALE:
291
0
            {
292
0
                op_tensor = ggml_scale(ctx, w, 1.0f);
293
0
            } break;
294
0
        default:
295
0
            GGML_ABORT("%s: missing test for op %s for tensor %s", __func__, ggml_op_name(op), w->name);
296
0
    }
297
298
    // create a temporary dummy buffer for the weight so that supports_op can check the buffer type
299
0
    GGML_ASSERT(w->buffer == nullptr);
300
0
    w->buffer = ggml_backend_buft_alloc_buffer(buft, 0);
301
0
    bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
302
0
    ggml_backend_buffer_free(w->buffer);
303
0
    w->buffer = nullptr;
304
305
0
    return op_supported;
306
0
}
307
308
// lists of buffer types used for each layer
309
using buft_list_t = std::vector<std::pair<ggml_backend_dev_t, ggml_backend_buffer_type_t>>;
310
311
// find the first buffer type in the list that can use the tensor
312
0
static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hparams, ggml_tensor * tensor, ggml_op op, const buft_list_t & buft_list) {
313
0
    GGML_ASSERT(!buft_list.empty());
314
0
    for (const auto & cur : buft_list) {
315
0
        ggml_backend_dev_t cur_dev = cur.first;
316
0
        ggml_backend_buffer_type_t cur_buft = cur.second;
317
0
        if (weight_buft_supported(hparams, tensor, op, cur_buft, cur_dev)) {
318
0
            return cur_buft;
319
0
        }
320
0
    }
321
322
0
    return nullptr;
323
0
}
324
325
// CPU: ACCEL -> GPU host -> CPU extra -> CPU
326
0
static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices, bool use_extra_bufts, bool no_host) {
327
0
    buft_list_t buft_list;
328
329
    // add ACCEL buffer types
330
0
    for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
331
0
        ggml_backend_dev_t dev = ggml_backend_dev_get(i);
332
0
        if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
333
0
            auto * buft = ggml_backend_dev_buffer_type(dev);
334
            // skip
335
0
            if (buft != ggml_backend_cpu_buffer_type()) {
336
0
                buft_list.emplace_back(dev, buft);
337
0
            }
338
0
        }
339
0
    }
340
341
    // add a host buffer type
342
    // storing the tensors in a host buffer is useful when the processing of large batches
343
    // is offloaded to a GPU device, since it reduces the time spent on data transfers
344
    // generally, this will be done using the first device in the list
345
    // a better approach would be to handle this on a weight-by-weight basis using the offload_op
346
    // function of the device to determine if it would benefit from being stored in a host buffer
347
0
    if (!no_host) {
348
0
        for (auto * dev : devices) {
349
0
            ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
350
0
            if (buft) {
351
0
                buft_list.emplace_back(dev, buft);
352
0
                break;
353
0
            }
354
0
        }
355
0
    }
356
357
    // add extra buffer types
358
0
    if (use_extra_bufts) {
359
0
        auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
360
0
        if (cpu_dev == nullptr) {
361
0
            throw std::runtime_error(format("%s: no CPU backend found", __func__));
362
0
        }
363
364
0
        auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
365
0
        auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
366
0
            ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts");
367
0
        if (ggml_backend_dev_get_extra_bufts_fn) {
368
0
            ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
369
0
            while (extra_bufts && *extra_bufts) {
370
0
                buft_list.emplace_back(cpu_dev, *extra_bufts);
371
0
                ++extra_bufts;
372
0
            }
373
0
        }
374
0
    }
375
376
    // add the CPU buffer type
377
0
    for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
378
0
        ggml_backend_dev_t dev = ggml_backend_dev_get(i);
379
0
        if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU) {
380
0
            buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
381
0
        }
382
0
    }
383
384
0
    return buft_list;
385
0
}
386
387
// GPU: split if LLAMA_SPLIT_MODE_ROW -> GPU
388
0
static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode split_mode, const float * tensor_split) {
389
0
    buft_list_t buft_list;
390
391
    // add the device split buffer type if requested and available
392
0
    if (split_mode == LLAMA_SPLIT_MODE_ROW) {
393
0
        ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
394
0
        auto ggml_backend_split_buffer_type_fn = (ggml_backend_split_buffer_type_t)
395
0
            ggml_backend_reg_get_proc_address(reg, "ggml_backend_split_buffer_type");
396
0
        if (ggml_backend_split_buffer_type_fn) {
397
0
            size_t dev_index = [&]() {
398
0
                auto * reg = ggml_backend_dev_backend_reg(dev);
399
0
                for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); ++i) {
400
0
                    if (ggml_backend_reg_dev_get(reg, i) == dev) {
401
0
                        return i;
402
0
                    }
403
0
                }
404
0
                throw std::runtime_error(format("device %s not found in its backend reg", ggml_backend_dev_name(dev)));
405
0
            }();
406
0
            auto * buft = ggml_backend_split_buffer_type_fn(dev_index, tensor_split);
407
0
            if (buft != nullptr) {
408
0
                buft_list.emplace_back(dev, buft);
409
0
            }
410
0
        }
411
0
    }
412
413
    // add the device default buffer type
414
0
    buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
415
416
    // add the device extra buffer type (if any)
417
0
    ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
418
0
    auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
419
0
        ggml_backend_reg_get_proc_address(reg, "ggml_backend_dev_get_extra_bufts");
420
421
0
    if (ggml_backend_dev_get_extra_bufts_fn) {
422
0
        ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(dev);
423
0
        while (extra_bufts && *extra_bufts) {
424
0
            buft_list.emplace_back(dev, *extra_bufts);
425
0
            ++extra_bufts;
426
0
        }
427
0
    }
428
429
0
    return buft_list;
430
0
}
431
432
struct llama_model::impl {
433
0
    impl() = default;
434
0
    ~impl() = default;
435
436
    uint64_t n_elements = 0;
437
438
    size_t n_bytes = 0;
439
440
    std::string desc_str;
441
442
    // model memory mapped files
443
    llama_mmaps mappings;
444
445
    // objects representing data potentially being locked in memory
446
    llama_mlocks mlock_bufs;
447
    llama_mlocks mlock_mmaps;
448
449
    // contexts where the model tensors metadata is stored as well as the corresponding buffers:
450
    std::vector<std::pair<ggml_context_ptr, std::vector<ggml_backend_buffer_ptr>>> ctxs_bufs;
451
452
    buft_list_t cpu_buft_list;
453
    std::map<ggml_backend_dev_t, buft_list_t> gpu_buft_list;
454
455
    struct layer_dev {
456
        ggml_backend_dev_t dev;
457
        buft_list_t * buft_list;
458
    };
459
460
    layer_dev dev_input = {};
461
    layer_dev dev_output = {};
462
    std::vector<layer_dev> dev_layer;
463
464
    bool has_tensor_overrides;
465
};
466
467
0
llama_model::llama_model(const llama_model_params & params) : params(params), pimpl(std::make_unique<impl>()) {
468
0
    pimpl->has_tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern;
469
0
}
470
471
0
llama_model::~llama_model() {
472
0
    for (auto * lora : loras) {
473
0
        delete lora;
474
0
    }
475
0
}
476
477
0
void llama_model::load_stats(llama_model_loader & ml) {
478
0
    pimpl->n_elements = ml.n_elements;
479
0
    pimpl->n_bytes = ml.n_bytes;
480
0
}
481
482
0
void llama_model::load_arch(llama_model_loader & ml) {
483
0
    arch = ml.get_arch();
484
0
    if (arch == LLM_ARCH_UNKNOWN) {
485
0
        throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
486
0
    }
487
0
}
488
489
0
void llama_model::load_hparams(llama_model_loader & ml) {
490
0
    const gguf_context * ctx = ml.meta.get();
491
492
    // get metadata as string
493
0
    for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
494
0
        gguf_type type = gguf_get_kv_type(ctx, i);
495
0
        if (type == GGUF_TYPE_ARRAY) {
496
0
            continue;
497
0
        }
498
0
        const char * name = gguf_get_key(ctx, i);
499
0
        const std::string value = gguf_kv_to_str(ctx, i);
500
0
        gguf_kv.emplace(name, value);
501
0
    }
502
503
    // get general kv
504
0
    ml.get_key(LLM_KV_GENERAL_NAME, name, false);
505
506
    // everything past this point is not vocab-related
507
    // for CLIP models, we only need to load tensors, no hparams
508
0
    if (hparams.vocab_only || ml.get_arch() == LLM_ARCH_CLIP) {
509
0
        return;
510
0
    }
511
512
0
    ml.get_key(LLM_KV_CONTEXT_LENGTH,          hparams.n_ctx_train);
513
0
    ml.get_key(LLM_KV_EMBEDDING_LENGTH,        hparams.n_embd);
514
0
    ml.get_key(LLM_KV_EMBEDDING_LENGTH_OUT,    hparams.n_embd_out, false);
515
0
    ml.get_key(LLM_KV_BLOCK_COUNT,             hparams.n_layer);
516
0
    ml.get_key(LLM_KV_EXPERT_COUNT,            hparams.n_expert,        false);
517
0
    ml.get_key(LLM_KV_EXPERT_USED_COUNT,       hparams.n_expert_used,   false);
518
0
    ml.get_key(LLM_KV_EXPERT_GROUP_COUNT,      hparams.n_expert_groups, false);
519
0
    ml.get_key(LLM_KV_EXPERT_GROUP_USED_COUNT, hparams.n_group_used,    false);
520
521
0
    if (arch == LLM_ARCH_WAVTOKENIZER_DEC) {
522
0
        ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features);
523
524
0
        ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd);
525
0
        ml.get_key(LLM_KV_POSNET_BLOCK_COUNT,      hparams.posnet.n_layer);
526
527
0
        ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd);
528
0
        ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT,      hparams.convnext.n_layer);
529
0
    }
530
531
0
    GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
532
0
    GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
533
0
    if (hparams.n_expert > 0) {
534
0
        GGML_ASSERT(hparams.n_expert_used > 0);
535
0
        GGML_ASSERT(hparams.n_expert_groups < hparams.n_expert);
536
0
        if (hparams.n_expert_groups > 1) {
537
0
            GGML_ASSERT(hparams.n_expert % hparams.n_expert_groups == 0);
538
0
            GGML_ASSERT(hparams.n_group_used > 0);
539
0
            GGML_ASSERT(hparams.n_group_used < hparams.n_expert_groups);
540
0
        }
541
0
    } else {
542
0
        GGML_ASSERT(hparams.n_expert_used == 0);
543
0
        GGML_ASSERT(hparams.n_expert_groups == 0);
544
0
    }
545
546
0
    std::fill(hparams.n_head_arr.begin(),    hparams.n_head_arr.end(),    0);
547
0
    std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
548
0
    std::fill(hparams.n_ff_arr.begin(),      hparams.n_ff_arr.end(),      0);
549
0
    std::fill(
550
0
        hparams.recurrent_layer_arr.begin(),
551
0
        hparams.recurrent_layer_arr.end(),
552
0
        llm_arch_is_recurrent(ml.get_arch()));
553
554
0
    std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0);
555
0
    std::fill(hparams.swa_layers.begin(), hparams.swa_layers.end(), 0);
556
557
0
    std::fill(hparams.xielu_alpha_n.begin(), hparams.xielu_alpha_n.end(), 0.0f);
558
0
    std::fill(hparams.xielu_alpha_p.begin(), hparams.xielu_alpha_p.end(), 0.0f);
559
0
    std::fill(hparams.xielu_beta.begin(), hparams.xielu_beta.end(), 0.0f);
560
0
    std::fill(hparams.xielu_eps.begin(), hparams.xielu_eps.end(), 0.0f);
561
562
0
    ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH,  hparams.n_ff_arr,   hparams.n_layer, false);
563
0
    ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);
564
565
    // n_head_kv is optional, default to n_head
566
0
    hparams.n_head_kv_arr = hparams.n_head_arr;
567
568
0
    ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false);
569
570
0
    bool rope_finetuned = false;
571
0
    ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
572
0
    hparams.rope_finetuned = rope_finetuned;
573
574
0
    hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
575
0
    ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
576
577
    // rope_freq_base (optional)
578
0
    hparams.rope_freq_base_train = 10000.0f;
579
0
    ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
580
581
0
    std::string rope_scaling("linear");
582
0
    ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
583
0
    hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
584
0
    GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
585
586
    // TODO: Handle SWA metadata similarly when models start implementing it
587
    // rope_freq_scale (inverse of the kv) is optional
588
0
    float ropescale = 0.0f;
589
0
    if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
590
        // try the old key name
591
0
        ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
592
0
    }
593
0
    hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
594
595
0
    ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false);
596
597
    // non-transformer models do not have attention heads
598
0
    if (hparams.n_head() > 0) {
599
        // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
600
        // gpt-j n_rot = rotary_dim
601
602
0
        hparams.n_embd_head_k = hparams.n_embd / hparams.n_head();
603
0
        ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
604
605
0
        hparams.n_embd_head_v = hparams.n_embd / hparams.n_head();
606
0
        ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
607
608
        // sanity check for n_rot (optional)
609
0
        hparams.n_rot = hparams.n_embd_head_k;
610
611
0
        ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
612
613
0
        if (arch == LLM_ARCH_LLAMA || arch == LLM_ARCH_DECI || arch == LLM_ARCH_FALCON || arch == LLM_ARCH_LLAMA_EMBED) {
614
0
            if (hparams.n_rot != hparams.n_embd_head_k) {
615
0
                throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k));
616
0
            }
617
0
        }
618
0
    } else {
619
0
        hparams.n_rot = 0;
620
0
        hparams.n_embd_head_k = 0;
621
0
        hparams.n_embd_head_v = 0;
622
0
    }
623
624
    // for differentiating model types
625
0
    uint32_t n_vocab = 0;
626
0
    ml.get_key(LLM_KV_VOCAB_SIZE, n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, n_vocab, false);
627
628
    // for classifier models
629
0
    ml.get_arr(LLM_KV_CLASSIFIER_OUTPUT_LABELS, classifier_labels, false);
630
0
    if (!classifier_labels.empty()) {
631
0
        hparams.n_cls_out = classifier_labels.size();
632
0
    }
633
634
    // arch-specific KVs
635
0
    switch (arch) {
636
0
        case LLM_ARCH_LLAMA:
637
0
        case LLM_ARCH_LLAMA_EMBED:
638
0
            {
639
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
640
641
0
                if (hparams.n_expert == 8) {
642
0
                    switch (hparams.n_layer) {
643
0
                        case 32: type = LLM_TYPE_8x7B; break;
644
0
                        case 56: type = LLM_TYPE_8x22B; break;
645
0
                        default: type = LLM_TYPE_UNKNOWN;
646
0
                    }
647
0
                } else {
648
0
                    switch (hparams.n_layer) {
649
0
                        case 16: type = LLM_TYPE_1B; break; // Llama 3.2 1B
650
0
                        case 22: type = LLM_TYPE_1B; break;
651
0
                        case 26: type = LLM_TYPE_3B; break;
652
0
                        case 28: type = LLM_TYPE_3B; break; // Llama 3.2 3B
653
0
                        case 30: type = LLM_TYPE_256M; break; // smoldocling 256M
654
                        // granite uses a vocab with len 49152
655
0
                        case 32: type = n_vocab == 49152 ? LLM_TYPE_3B : (n_vocab < 40000 ? LLM_TYPE_7B : LLM_TYPE_8B); break;
656
0
                        case 36: type = LLM_TYPE_8B; break; // granite
657
0
                        case 40: type = LLM_TYPE_13B; break;
658
0
                        case 48: type = LLM_TYPE_34B; break;
659
0
                        case 60: type = LLM_TYPE_30B; break;
660
0
                        case 80: type = hparams.n_head() == hparams.n_head_kv() ? LLM_TYPE_65B : LLM_TYPE_70B; break;
661
0
                        default: type = LLM_TYPE_UNKNOWN;
662
0
                    }
663
0
                }
664
0
            } break;
665
0
        case LLM_ARCH_LLAMA4:
666
0
            {
667
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
668
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
669
0
                ml.get_key(LLM_KV_INTERLEAVE_MOE_LAYER_STEP,   hparams.n_moe_layer_step);
670
671
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
672
0
                if (found_swa && hparams.n_swa == 0) {
673
0
                    hparams.swa_type             = LLAMA_SWA_TYPE_NONE;
674
0
                    hparams.n_no_rope_layer_step = hparams.n_layer; // always use rope
675
0
                } else {
676
0
                    hparams.swa_type                = LLAMA_SWA_TYPE_CHUNKED;
677
0
                    hparams.n_swa                   = 8192;
678
0
                    hparams.n_attn_temp_floor_scale = 8192;
679
0
                    hparams.f_attn_temp_scale       = 0.1f;
680
0
                    hparams.f_attn_temp_offset      = 1.0f;
681
0
                    hparams.set_swa_pattern(4);   // pattern: 3 chunked - 1 full
682
683
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
684
0
                    hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
685
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
686
0
                }
687
688
0
                switch (hparams.n_expert) {
689
0
                    case 0: {
690
                        // MobileLLM (no MoE)
691
0
                        switch (hparams.n_embd) {
692
0
                            case 2048: type = LLM_TYPE_140M; break;
693
0
                            case 4096: type = LLM_TYPE_360M; break;
694
0
                            case 6144: type = LLM_TYPE_950M; break;
695
0
                            default:   type = LLM_TYPE_UNKNOWN;
696
0
                        }
697
0
                    } break;
698
0
                    case 16:  type = LLM_TYPE_17B_16E; break;
699
0
                    case 128: type = LLM_TYPE_17B_128E; break;
700
0
                    default:  type = LLM_TYPE_UNKNOWN;
701
0
                }
702
703
0
                hparams.use_kq_norm = type != LLM_TYPE_17B_128E;
704
0
            } break;
705
0
        case LLM_ARCH_ARCEE:
706
0
            {
707
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
708
709
                // Arcee uses the same structure as Llama
710
0
                switch (hparams.n_layer) {
711
0
                    case 36: type = LLM_TYPE_4B; break;
712
0
                    default: type = LLM_TYPE_UNKNOWN;
713
0
                }
714
0
            } break;
715
0
        case LLM_ARCH_AFMOE:
716
0
            {
717
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
718
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
719
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
720
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
721
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func, false);
722
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale, false);
723
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,         hparams.expert_weights_norm, false);
724
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa, false);
725
726
                // Set up interleaved sliding window attention (ISWA)
727
                // Pattern: 3 sliding - 1 full (global_attn_every_n_layers = 4)
728
0
                if (hparams.n_swa > 0) {
729
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
730
0
                    hparams.set_swa_pattern(4);
731
732
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
733
0
                    hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
734
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
735
0
                } else {
736
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
737
0
                }
738
739
                // Default to sigmoid if not set
740
0
                if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
741
0
                    hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID;
742
0
                }
743
744
0
                switch (hparams.n_layer) {
745
0
                    case 56: type = LLM_TYPE_6B; break;
746
0
                    case 32: type = LLM_TYPE_26B; break;
747
0
                    default: type = LLM_TYPE_UNKNOWN;
748
0
                }
749
0
            } break;
750
0
        case LLM_ARCH_DECI:
751
0
            {
752
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
753
0
                switch (hparams.n_layer) {
754
0
                    case 32: type = LLM_TYPE_7B; break;
755
0
                    case 80: type = LLM_TYPE_70B; break;
756
0
                    case 162: type = LLM_TYPE_405B; break;
757
0
                    default: type = LLM_TYPE_UNKNOWN;
758
0
                }
759
0
            } break;
760
0
        case LLM_ARCH_MINICPM:
761
0
            {
762
                // Backward-compatible defaults for older MiniCPM GGUFs
763
0
                hparams.f_embedding_scale = 12.0f;
764
0
                hparams.f_residual_scale  = 1.4f / sqrtf(float(hparams.n_layer));
765
0
                hparams.f_logit_scale     = hparams.n_embd ? (256.0f / float(hparams.n_embd)) : 1.0f;
766
767
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
768
769
                // Optional KV reads, override defaults if present in newer GGUF exports
770
0
                ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale, /*required=*/false);
771
0
                ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale, /*required=*/false);
772
0
                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale, /*required=*/false);
773
774
                // MiniCPM uses rope by default, unlike Granite which uses it as a switch
775
0
                hparams.rope_finetuned = true;
776
777
0
                switch (hparams.n_layer) {
778
0
                    case 52: type = LLM_TYPE_1B; break;
779
0
                    case 40: type = LLM_TYPE_2B; break;
780
0
                    default: type = LLM_TYPE_UNKNOWN;
781
0
                }
782
0
            } break;
783
0
        case LLM_ARCH_MINICPM3:
784
0
            {
785
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
786
0
                ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK,       hparams.n_lora_q);
787
0
                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK,      hparams.n_lora_kv);
788
789
0
                switch (hparams.n_layer) {
790
0
                    case 62: type = LLM_TYPE_4B; break;
791
0
                    default: type = LLM_TYPE_UNKNOWN;
792
0
                }
793
0
            } break;
794
0
        case LLM_ARCH_GROK:
795
0
            {
796
                // defaults for old GGUFs
797
0
                hparams.yarn_beta_fast = 8.0f;
798
0
                hparams.f_logit_scale = 0.5773502691896257f;
799
0
                hparams.f_embedding_scale = 78.38367176906169f;
800
0
                hparams.f_attn_out_scale = 0.08838834764831845f;
801
0
                hparams.f_attn_logit_softcapping = 30.0f;
802
0
                hparams.f_router_logit_softcapping = 30.0f;
803
                // no final_logit_softcapping in grok-1
804
0
                hparams.f_final_logit_softcapping = 0.0f;
805
806
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,  hparams.f_norm_rms_eps);
807
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,   hparams.n_ff_exp, false);
808
0
                ml.get_key(LLM_KV_LOGIT_SCALE,                  hparams.f_logit_scale, false);
809
0
                ml.get_key(LLM_KV_EMBEDDING_SCALE,              hparams.f_embedding_scale, false);
810
0
                ml.get_key(LLM_KV_ATTENTION_OUTPUT_SCALE,       hparams.f_attn_out_scale, false);
811
0
                ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING,       hparams.f_attn_logit_softcapping, false);
812
0
                ml.get_key(LLM_KV_ROUTER_LOGIT_SOFTCAPPING,     hparams.f_router_logit_softcapping, false);
813
0
                ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING,      hparams.f_final_logit_softcapping, false);
814
815
0
                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH,  hparams.attn_temp_length, false);
816
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_EXT_FACTOR,  hparams.yarn_ext_factor, false);
817
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_ATTN_FACTOR, hparams.yarn_attn_factor, false);
818
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_FAST,   hparams.yarn_beta_fast, false);
819
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_SLOW,   hparams.yarn_beta_slow, false);
820
821
0
                switch (hparams.n_layer) {
822
0
                    case 64: type = LLM_TYPE_314B; break;
823
0
                    default: type = LLM_TYPE_UNKNOWN;
824
0
                }
825
0
            } break;
826
0
        case LLM_ARCH_FALCON:
827
0
            {
828
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
829
830
0
                switch (hparams.n_layer) {
831
0
                    case 32: type = LLM_TYPE_7B; break;
832
0
                    case 60: type = LLM_TYPE_40B; break;
833
0
                    default: type = LLM_TYPE_UNKNOWN;
834
0
                }
835
0
            } break;
836
0
        case LLM_ARCH_BAICHUAN:
837
0
            {
838
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
839
0
                switch (hparams.n_layer) {
840
0
                    case 32: type = LLM_TYPE_7B; break;
841
0
                    case 40: type = LLM_TYPE_13B; break;
842
0
                    default: type = LLM_TYPE_UNKNOWN;
843
0
                }
844
845
0
                if (type == LLM_TYPE_13B) {
846
                    // TODO: become GGUF KV parameter
847
0
                    hparams.f_max_alibi_bias = 8.0f;
848
0
                }
849
0
            } break;
850
0
        case LLM_ARCH_STARCODER:
851
0
            {
852
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
853
0
                switch (hparams.n_layer) {
854
0
                    case 24: type = LLM_TYPE_1B; break;
855
0
                    case 36: type = LLM_TYPE_3B; break;
856
0
                    case 42: type = LLM_TYPE_7B; break;
857
0
                    case 40: type = LLM_TYPE_15B; break;
858
0
                    default: type = LLM_TYPE_UNKNOWN;
859
0
                }
860
0
            } break;
861
0
        case LLM_ARCH_REFACT:
862
0
            {
863
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
864
0
                switch (hparams.n_layer) {
865
0
                    case 32: type = LLM_TYPE_1B; break;
866
0
                    default: type = LLM_TYPE_UNKNOWN;
867
0
                }
868
869
                // TODO: become GGUF KV parameter
870
0
                hparams.f_max_alibi_bias = 8.0f;
871
0
            } break;
872
0
        case LLM_ARCH_BERT:
873
0
            {
874
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
875
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
876
0
                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
877
878
0
                switch (hparams.n_layer) {
879
0
                    case 3:
880
0
                        type = LLM_TYPE_17M; break; // bge-micro
881
0
                    case 6:
882
0
                        type = LLM_TYPE_22M; break; // MiniLM-L6
883
0
                    case 12:
884
0
                        switch (hparams.n_embd) {
885
0
                            case 384: type = LLM_TYPE_33M; break; // MiniLM-L12, bge-small
886
0
                            case 768: type = LLM_TYPE_109M; break; // bge-base
887
0
                            default: type = LLM_TYPE_UNKNOWN;
888
0
                        } break;
889
0
                    case 24:
890
0
                        type = LLM_TYPE_335M; break; // bge-large
891
0
                    default: type = LLM_TYPE_UNKNOWN;
892
0
                }
893
0
            } break;
894
0
        case LLM_ARCH_MODERN_BERT:
895
0
            {
896
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
897
0
                if (found_swa && hparams.n_swa > 0) {
898
0
                    uint32_t swa_period = 3;
899
0
                    hparams.swa_type = LLAMA_SWA_TYPE_SYMMETRIC;
900
901
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa);
902
0
                    ml.get_key_or_arr(LLM_KV_ATTENTION_SLIDING_WINDOW_PATTERN, swa_period, false);
903
0
                    hparams.set_swa_pattern(swa_period);
904
0
                } else {
905
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
906
0
                }
907
908
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
909
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,        hparams.causal_attn);
910
0
                ml.get_key(LLM_KV_POOLING_TYPE,            hparams.pooling_type, false);
911
912
0
                switch (hparams.n_layer) {
913
0
                    case 12:
914
0
                        type = LLM_TYPE_47M; break; // granite-embedding-small
915
0
                    case 22:
916
0
                        type = LLM_TYPE_149M; break; // modern-bert-base
917
0
                    case 28:
918
0
                        type = LLM_TYPE_395M; break; // modern-bert-large
919
0
                    default: type = LLM_TYPE_UNKNOWN;
920
0
                }
921
0
            } break;
922
0
        case LLM_ARCH_JINA_BERT_V2:
923
0
            {
924
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
925
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
926
0
                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
927
0
                hparams.f_max_alibi_bias = 8.0f;
928
929
0
                switch (hparams.n_layer) {
930
0
                    case 4:  type = LLM_TYPE_33M;  break; // jina-embeddings-small
931
0
                    case 12: type = LLM_TYPE_137M; break; // jina-embeddings-base
932
0
                    default: type = LLM_TYPE_UNKNOWN;
933
0
                }
934
0
            } break;
935
0
        case LLM_ARCH_JINA_BERT_V3:
936
0
            {
937
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
938
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
939
0
                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
940
941
0
                switch (hparams.n_layer) {
942
0
                    case 24:
943
0
                        type = LLM_TYPE_558M; break;
944
0
                    default: type = LLM_TYPE_UNKNOWN;
945
0
                }
946
0
            } break;
947
0
        case LLM_ARCH_NOMIC_BERT:
948
0
        case LLM_ARCH_NOMIC_BERT_MOE:
949
0
            {
950
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
951
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
952
0
                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type);
953
0
                ml.get_key(LLM_KV_MOE_EVERY_N_LAYERS,         hparams.moe_every_n_layers, 0);
954
955
0
                if (hparams.n_layer == 12 && hparams.n_embd == 768) {
956
0
                    if (arch == LLM_ARCH_NOMIC_BERT) {
957
0
                        type = LLM_TYPE_137M;
958
0
                    } else if (arch == LLM_ARCH_NOMIC_BERT_MOE && hparams.moe_every_n_layers == 2) {
959
0
                        type = LLM_TYPE_475M;
960
0
                    }
961
0
                }
962
0
            } break;
963
0
        case LLM_ARCH_NEO_BERT:
964
0
            {
965
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
966
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,            hparams.causal_attn);
967
0
                ml.get_key(LLM_KV_POOLING_TYPE,                hparams.pooling_type);
968
969
0
                if (hparams.n_layer == 28) {
970
0
                    type = LLM_TYPE_250M;
971
0
                }
972
0
            } break;
973
0
        case LLM_ARCH_BLOOM:
974
0
            {
975
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
976
977
0
                switch (hparams.n_layer) {
978
0
                    case 24: type = LLM_TYPE_1B; break;
979
0
                    case 30:
980
0
                        switch (hparams.n_embd) {
981
0
                            case 2560: type = LLM_TYPE_3B; break;
982
0
                            case 4096: type = LLM_TYPE_7B; break;
983
0
                            default: type = LLM_TYPE_UNKNOWN;
984
0
                        } break;
985
0
                    default: type = LLM_TYPE_UNKNOWN;
986
0
                }
987
988
                // TODO: become GGUF KV parameter
989
0
                hparams.f_max_alibi_bias = 8.0f;
990
0
            } break;
991
0
        case LLM_ARCH_MPT:
992
0
            {
993
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,  hparams.f_norm_eps);
994
0
                ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,      hparams.f_clamp_kqv, false);
995
0
                ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
996
997
0
                switch (hparams.n_layer) {
998
0
                    case 32: type = LLM_TYPE_7B; break;
999
0
                    case 48: type = LLM_TYPE_30B; break;
1000
0
                    default: type = LLM_TYPE_UNKNOWN;
1001
0
                }
1002
0
            } break;
1003
0
        case LLM_ARCH_STABLELM:
1004
0
            {
1005
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1006
1007
0
                switch (hparams.n_layer) {
1008
0
                    case 24: type = LLM_TYPE_1B; break;
1009
0
                    case 32: type = LLM_TYPE_3B; break;
1010
0
                    case 40: type = LLM_TYPE_12B; break;
1011
0
                    default: type = LLM_TYPE_UNKNOWN;
1012
0
               }
1013
0
            } break;
1014
0
        case LLM_ARCH_QWEN:
1015
0
            {
1016
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1017
1018
0
                switch (hparams.n_layer) {
1019
0
                    case 32: type = LLM_TYPE_7B; break;
1020
0
                    case 40: type = LLM_TYPE_13B; break;
1021
0
                    default: type = LLM_TYPE_UNKNOWN;
1022
0
                }
1023
0
            } break;
1024
0
        case LLM_ARCH_QWEN2VL:
1025
0
            {
1026
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
1027
0
            }
1028
            // fall through
1029
0
        case LLM_ARCH_QWEN2:
1030
0
            {
1031
0
                ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
1032
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1033
0
                switch (hparams.n_layer) {
1034
0
                    case 24: type = hparams.n_embd == 1024 ? LLM_TYPE_0_5B : LLM_TYPE_1B; break;
1035
0
                    case 28: type = hparams.n_embd == 1536 ? LLM_TYPE_1_5B : LLM_TYPE_7B; break;
1036
0
                    case 32: type = LLM_TYPE_7B; break;
1037
0
                    case 36: type = LLM_TYPE_3B; break;
1038
0
                    case 40: type = hparams.n_head() == 20 ? LLM_TYPE_4B : LLM_TYPE_13B; break;
1039
0
                    case 48: type = LLM_TYPE_14B; break;
1040
0
                    case 64: type = LLM_TYPE_32B; break;
1041
0
                    case 80: type = LLM_TYPE_70B; break;
1042
0
                    default: type = LLM_TYPE_UNKNOWN;
1043
0
                }
1044
0
            } break;
1045
0
        case LLM_ARCH_DREAM:
1046
0
            {
1047
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1048
                // Dream models are primarily 7B with 28 layers
1049
0
                switch (hparams.n_layer) {
1050
0
                    case 28:
1051
0
                        type = LLM_TYPE_7B;
1052
0
                        break;
1053
0
                    default:
1054
0
                        type = LLM_TYPE_UNKNOWN;
1055
0
                }
1056
                // Set non-causal attention for diffusion models
1057
0
                hparams.causal_attn = false;
1058
0
            }
1059
0
            break;
1060
0
        case LLM_ARCH_LLADA:
1061
0
            {
1062
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1063
                // LLaDA-8B has 32 layers, similar to LLaMA but for diffusion
1064
0
                switch (hparams.n_layer) {
1065
0
                    case 32:
1066
0
                        type = LLM_TYPE_8B;
1067
0
                        break;
1068
0
                    default:
1069
0
                        type = LLM_TYPE_UNKNOWN;
1070
0
                }
1071
                // Set non-causal attention for diffusion models
1072
0
                hparams.causal_attn = false;
1073
0
            }
1074
0
            break;
1075
0
        case LLM_ARCH_LLADA_MOE:
1076
0
            {
1077
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
1078
1079
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1080
                // diffusion language model uses non-causal attention
1081
0
                hparams.causal_attn = false;
1082
0
                switch (hparams.n_layer) {
1083
0
                    case 16: type = LLM_TYPE_A1_7B; break;
1084
0
                    default: type = LLM_TYPE_UNKNOWN;
1085
0
                }
1086
0
            } break;
1087
0
        case LLM_ARCH_RND1:
1088
0
            {
1089
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
1090
1091
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1092
0
                switch (hparams.n_layer) {
1093
0
                    case 48: type = LLM_TYPE_30B_A3B; break;
1094
0
                    default: type = LLM_TYPE_UNKNOWN;
1095
0
                }
1096
                // Set non-causal attention for diffusion models
1097
0
                hparams.causal_attn = false;
1098
0
            } break;
1099
0
        case LLM_ARCH_QWEN2MOE:
1100
0
            {
1101
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp, false);
1102
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
1103
1104
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1105
0
                switch (hparams.n_layer) {
1106
0
                    case 24: type = LLM_TYPE_A2_7B; break;
1107
0
                    case 28: type = LLM_TYPE_57B_A14B; break;
1108
0
                    default: type = LLM_TYPE_UNKNOWN;
1109
0
                }
1110
0
            } break;
1111
0
        case LLM_ARCH_QWEN3:
1112
0
            {
1113
0
                ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
1114
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1115
0
                switch (hparams.n_layer) {
1116
0
                    case 28: type = hparams.n_embd == 1024 ? LLM_TYPE_0_6B : LLM_TYPE_1_7B; break;
1117
0
                    case 36: type = hparams.n_embd == 2560 ? LLM_TYPE_4B : LLM_TYPE_8B; break;
1118
0
                    case 40: type = LLM_TYPE_14B; break;
1119
0
                    case 64: type = LLM_TYPE_32B; break;
1120
0
                    default: type = LLM_TYPE_UNKNOWN;
1121
0
                }
1122
0
            } break;
1123
0
        case LLM_ARCH_MAINCODER:
1124
0
            {
1125
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1126
0
                switch (hparams.n_layer) {
1127
0
                    case 32: type = LLM_TYPE_1B; break;
1128
0
                    default: type = LLM_TYPE_UNKNOWN;
1129
0
                }
1130
0
            } break;
1131
0
        case LLM_ARCH_QWEN3VL:
1132
0
            {
1133
0
                ml.get_key(LLM_KV_NUM_DEEPSTACK_LAYERS, hparams.n_deepstack_layers, false);
1134
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
1135
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1136
0
                switch (hparams.n_layer) {
1137
0
                    case 28: type = LLM_TYPE_1_7B; break;
1138
0
                    case 36: type = hparams.n_embd == 2560 ? LLM_TYPE_4B : LLM_TYPE_8B; break;
1139
0
                    case 64: type = LLM_TYPE_32B; break;
1140
0
                    default: type = LLM_TYPE_UNKNOWN;
1141
0
                }
1142
0
            } break;
1143
0
        case LLM_ARCH_QWEN3MOE:
1144
0
            {
1145
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp, false);
1146
1147
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1148
0
                switch (hparams.n_layer) {
1149
0
                    case 48: type = LLM_TYPE_30B_A3B; break;
1150
0
                    case 94: type = LLM_TYPE_235B_A22B; break;
1151
0
                    default: type = LLM_TYPE_UNKNOWN;
1152
0
                }
1153
0
            } break;
1154
0
        case LLM_ARCH_QWEN3VLMOE:
1155
0
            {
1156
0
                ml.get_key(LLM_KV_NUM_DEEPSTACK_LAYERS, hparams.n_deepstack_layers, false);
1157
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
1158
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
1159
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1160
0
                switch (hparams.n_layer) {
1161
0
                    case 48: type = LLM_TYPE_30B_A3B; break;
1162
0
                    case 94: type = LLM_TYPE_235B_A22B; break;
1163
0
                    default: type = LLM_TYPE_UNKNOWN;
1164
0
                }
1165
0
            } break;
1166
0
        case LLM_ARCH_PHI2:
1167
0
            {
1168
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1169
1170
0
                switch (hparams.n_layer) {
1171
0
                    case 24: type = LLM_TYPE_1B; break;
1172
0
                    case 32: type = LLM_TYPE_3B; break;
1173
0
                    default: type = LLM_TYPE_UNKNOWN;
1174
0
                }
1175
0
            } break;
1176
0
        case LLM_ARCH_PHI3:
1177
0
            {
1178
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1179
1180
0
                switch (hparams.n_layer) {
1181
0
                    case 24: type = LLM_TYPE_1B; break;
1182
0
                    case 32: type = LLM_TYPE_3B; break;
1183
0
                    case 40: type = LLM_TYPE_14B; break;
1184
0
                    default: type = LLM_TYPE_UNKNOWN;
1185
0
                }
1186
1187
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
1188
1189
0
                if (found_swa && hparams.n_swa > 0) {
1190
0
                    LLAMA_LOG_WARN("%s: Phi SWA is currently disabled - results might be suboptimal for some models (see %s)\n",
1191
0
                            __func__, "https://github.com/ggml-org/llama.cpp/pull/13676");
1192
1193
                    // TODO: fix conversion scripts to correctly populate `n_swa` and `n_swa_pattern`
1194
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
1195
1196
0
                    hparams.n_swa         = 0;
1197
0
                    hparams.set_swa_pattern(1);
1198
0
                }
1199
0
            } break;
1200
0
        case LLM_ARCH_PHIMOE:
1201
0
            {
1202
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1203
1204
0
                switch (hparams.n_layer) {
1205
0
                    case 32: type = LLM_TYPE_16x3_8B; break;
1206
0
                    default: type = LLM_TYPE_UNKNOWN;
1207
0
                }
1208
0
            } break;
1209
0
        case LLM_ARCH_PLAMO:
1210
0
            {
1211
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1212
1213
0
                switch (hparams.n_layer) {
1214
0
                    case 40: type = LLM_TYPE_13B; break;
1215
0
                    default: type = LLM_TYPE_UNKNOWN;
1216
0
               }
1217
0
            } break;
1218
0
        case LLM_ARCH_PLAMO2:
1219
0
            {
1220
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1221
1222
                // Load Mamba SSM parameters
1223
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1224
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1225
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1226
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1227
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
1228
1229
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
1230
0
                    hparams.recurrent_layer_arr[i] = hparams.n_head_kv(i) == 0;
1231
0
                }
1232
1233
0
                switch (hparams.n_layer) {
1234
0
                    case 16: type = LLM_TYPE_1B; break;
1235
0
                    case 32:
1236
0
                        if (hparams.n_embd == 2048) {
1237
0
                            type = LLM_TYPE_2B;
1238
0
                        } else if (hparams.n_embd == 4096) {
1239
0
                            type = LLM_TYPE_8B;
1240
0
                        }
1241
0
                        break;
1242
0
                    default: type = LLM_TYPE_UNKNOWN;
1243
0
                }
1244
1245
                // Load attention parameters
1246
0
                ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH,   hparams.n_embd_head_k, false);
1247
0
                ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
1248
0
            } break;
1249
0
        case LLM_ARCH_PLAMO3:
1250
0
            {
1251
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1252
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
1253
0
                if (found_swa && hparams.n_swa > 0) {
1254
0
                    uint32_t swa_period = 8;
1255
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1256
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa);
1257
0
                    ml.get_key_or_arr(LLM_KV_ATTENTION_SLIDING_WINDOW_PATTERN, swa_period, false);
1258
0
                    hparams.set_swa_pattern(swa_period);
1259
0
                } else {
1260
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
1261
0
                }
1262
1263
0
                switch (hparams.n_layer) {
1264
0
                    case 24: type = LLM_TYPE_2B; break;
1265
0
                    default: type = LLM_TYPE_UNKNOWN;
1266
0
                }
1267
0
            } break;
1268
0
        case LLM_ARCH_GPT2:
1269
0
            {
1270
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1271
0
                switch (hparams.n_layer) {
1272
0
                    case 12: type = LLM_TYPE_SMALL; break;
1273
0
                    case 24: type = LLM_TYPE_MEDIUM; break;
1274
0
                    case 36: type = LLM_TYPE_LARGE; break;
1275
0
                    case 48: type = LLM_TYPE_XL; break;
1276
0
                    default: type = LLM_TYPE_UNKNOWN;
1277
0
                }
1278
0
            } break;
1279
0
        case LLM_ARCH_CODESHELL:
1280
0
            {
1281
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1282
0
                switch (hparams.n_layer) {
1283
0
                    case 42: type = LLM_TYPE_7B; break;
1284
0
                    default: type = LLM_TYPE_UNKNOWN;
1285
0
                }
1286
0
            } break;
1287
0
        case LLM_ARCH_ORION:
1288
0
            {
1289
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1290
1291
0
                switch (hparams.n_layer) {
1292
0
                    case 40: type = LLM_TYPE_14B; break;
1293
0
                    default: type = LLM_TYPE_UNKNOWN;
1294
0
                }
1295
0
            } break;
1296
0
        case LLM_ARCH_INTERNLM2:
1297
0
            {
1298
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1299
0
                switch (hparams.n_layer) {
1300
0
                    case 32: type = LLM_TYPE_7B; break;
1301
0
                    case 48: type = LLM_TYPE_20B; break;
1302
0
                    default: type = LLM_TYPE_UNKNOWN;
1303
0
                }
1304
0
            } break;
1305
0
        case LLM_ARCH_GEMMA:
1306
0
            {
1307
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1308
1309
0
                switch (hparams.n_layer) {
1310
0
                    case 18: type = LLM_TYPE_2B; break;
1311
0
                    case 28: type = LLM_TYPE_7B; break;
1312
0
                    default: type = LLM_TYPE_UNKNOWN;
1313
0
               }
1314
0
            } break;
1315
0
        case LLM_ARCH_GEMMA2:
1316
0
            {
1317
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1318
0
                hparams.n_swa = 4096; // default value of gemma 2
1319
0
                hparams.set_swa_pattern(2);
1320
0
                hparams.attn_soft_cap = true;
1321
0
                hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
1322
0
                hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
1323
1324
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA,          hparams.rope_freq_base_train_swa, false);
1325
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa, false);
1326
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1327
0
                ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING,      hparams.f_attn_logit_softcapping, false);
1328
0
                ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING,     hparams.f_final_logit_softcapping, false);
1329
1330
0
                switch (hparams.n_layer) {
1331
0
                    case 26: type = LLM_TYPE_2B; break;
1332
0
                    case 42: type = LLM_TYPE_9B; break;
1333
0
                    case 46: type = LLM_TYPE_27B; break;
1334
0
                    default: type = LLM_TYPE_UNKNOWN;
1335
0
               }
1336
1337
                // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/config.py#L173
1338
0
                hparams.f_attention_scale = type == LLM_TYPE_27B
1339
0
                    ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0)))
1340
0
                    : 1.0f / std::sqrt(float(hparams.n_embd_head_k));
1341
0
            } break;
1342
0
        case LLM_ARCH_GEMMA3:
1343
0
            {
1344
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
1345
0
                if (found_swa && hparams.n_swa > 0) {
1346
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1347
0
                    hparams.set_swa_pattern(6);
1348
1349
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
1350
0
                } else {
1351
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
1352
0
                }
1353
1354
0
                hparams.f_final_logit_softcapping = 0.0f;
1355
0
                ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
1356
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1357
1358
0
                switch (hparams.n_layer) {
1359
0
                    case 18: type = LLM_TYPE_270M; break;
1360
0
                    case 26: type = LLM_TYPE_1B; break;
1361
0
                    case 32: type = LLM_TYPE_8B; break; // Rnj-1
1362
0
                    case 34: type = LLM_TYPE_4B; break;
1363
0
                    case 48: type = LLM_TYPE_12B; break;
1364
0
                    case 62: type = LLM_TYPE_27B; break;
1365
0
                    default: type = LLM_TYPE_UNKNOWN;
1366
0
                }
1367
1368
                // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/config.py#L289
1369
0
                hparams.f_attention_scale = type == LLM_TYPE_27B
1370
0
                    ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0)))
1371
0
                    : 1.0f / std::sqrt(float(hparams.n_embd_head_k));
1372
0
            } break;
1373
0
        case LLM_ARCH_GEMMA3N:
1374
0
            {
1375
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1376
0
                hparams.set_swa_pattern(5);
1377
1378
0
                hparams.n_layer_kv_from_start     = 20;
1379
0
                hparams.f_attention_scale         = 1.0f;
1380
1381
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA,          hparams.rope_freq_base_train_swa, false);
1382
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa);
1383
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1384
1385
0
                switch (hparams.n_layer) {
1386
0
                    case 30: type = LLM_TYPE_E2B; break;
1387
0
                    case 35: type = LLM_TYPE_E4B; break;
1388
0
                    default: type = LLM_TYPE_UNKNOWN;
1389
0
                }
1390
0
            } break;
1391
0
        case LLM_ARCH_GEMMA_EMBEDDING:
1392
0
            {
1393
0
                hparams.swa_type = LLAMA_SWA_TYPE_SYMMETRIC;
1394
0
                hparams.set_swa_pattern(6);
1395
1396
0
                hparams.causal_attn = false; // embeddings do not use causal attention
1397
1398
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
1399
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
1400
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1401
0
                ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
1402
1403
                //applied only if model converted with --sentence-transformers-dense-modules
1404
0
                ml.get_key(LLM_KV_DENSE_2_FEAT_IN, hparams.dense_2_feat_in, false);
1405
0
                ml.get_key(LLM_KV_DENSE_2_FEAT_OUT, hparams.dense_2_feat_out, false);
1406
0
                ml.get_key(LLM_KV_DENSE_3_FEAT_IN, hparams.dense_3_feat_in, false);
1407
0
                ml.get_key(LLM_KV_DENSE_3_FEAT_OUT, hparams.dense_3_feat_out, false);
1408
1409
0
                GGML_ASSERT((hparams.dense_2_feat_in == 0 || hparams.dense_2_feat_in == hparams.n_embd) && "dense_2_feat_in must be equal to n_embd");
1410
0
                GGML_ASSERT((hparams.dense_3_feat_out == 0 || hparams.dense_3_feat_out == hparams.n_embd) && "dense_3_feat_out must be equal to n_embd");
1411
1412
0
                switch (hparams.n_layer) {
1413
0
                    case 24: type = LLM_TYPE_0_3B; break;
1414
0
                    default: type = LLM_TYPE_UNKNOWN;
1415
0
                }
1416
0
                hparams.f_attention_scale = 1.0f / std::sqrt(float(hparams.n_embd_head_k));
1417
1418
0
            } break;
1419
0
        case LLM_ARCH_STARCODER2:
1420
0
            {
1421
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1422
0
                switch (hparams.n_layer) {
1423
0
                    case 30: type = LLM_TYPE_3B; break;
1424
0
                    case 32: type = LLM_TYPE_7B; break;
1425
0
                    case 40: type = LLM_TYPE_15B; break;
1426
0
                    case 52: type = LLM_TYPE_20B; break; // granite
1427
0
                    case 88: type = LLM_TYPE_34B; break; // granite
1428
0
                    default: type = LLM_TYPE_UNKNOWN;
1429
0
                }
1430
0
            } break;
1431
0
        case LLM_ARCH_MAMBA:
1432
0
            {
1433
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1434
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1435
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1436
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1437
0
                ml.get_key(LLM_KV_SSM_DT_B_C_RMS,     hparams.ssm_dt_b_c_rms, false);
1438
1439
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1440
1441
0
                switch (hparams.n_layer) {
1442
0
                    case 24:
1443
0
                        switch (hparams.n_embd) {
1444
0
                            case 768: type = LLM_TYPE_SMALL; break;
1445
0
                            default: type = LLM_TYPE_UNKNOWN;
1446
0
                        } break;
1447
0
                    case 48:
1448
0
                        switch (hparams.n_embd) {
1449
0
                            case 1024: type = LLM_TYPE_MEDIUM; break;
1450
0
                            case 1536: type = LLM_TYPE_LARGE; break;
1451
0
                            case 2048: type = LLM_TYPE_XL; break;
1452
0
                            default:   type = LLM_TYPE_UNKNOWN;
1453
0
                        } break;
1454
0
                    case 64:
1455
0
                        switch (hparams.n_embd) {
1456
0
                            case 2560: type = LLM_TYPE_3B; break;
1457
0
                            default: type = LLM_TYPE_UNKNOWN;
1458
0
                        } break;
1459
0
                    default: type = LLM_TYPE_UNKNOWN;
1460
0
                }
1461
0
            } break;
1462
0
        case LLM_ARCH_MAMBA2:
1463
0
            {
1464
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1465
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1466
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1467
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1468
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
1469
1470
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1471
1472
0
                switch (hparams.n_layer) {
1473
0
                    case 24:
1474
0
                        switch (hparams.n_embd) {
1475
0
                            case 768: type = LLM_TYPE_SMALL; break;
1476
0
                            default: type = LLM_TYPE_UNKNOWN;
1477
0
                        } break;
1478
0
                    case 48:
1479
0
                        switch (hparams.n_embd) {
1480
0
                            case 1024: type = LLM_TYPE_MEDIUM; break;
1481
0
                            case 1536: type = LLM_TYPE_LARGE; break;
1482
0
                            case 2048: type = LLM_TYPE_XL; break;
1483
0
                            default: type = LLM_TYPE_UNKNOWN;
1484
0
                        } break;
1485
0
                    case 64:
1486
0
                        switch (hparams.n_embd) {
1487
0
                            case 2560: type = LLM_TYPE_3B; break;
1488
0
                            case 4096: type = LLM_TYPE_7B; break;
1489
0
                            default: type = LLM_TYPE_UNKNOWN;
1490
0
                        } break;
1491
0
                    default: type = LLM_TYPE_UNKNOWN;
1492
0
                }
1493
0
            } break;
1494
0
        case LLM_ARCH_JAMBA:
1495
0
            {
1496
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1497
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1498
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1499
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1500
1501
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1502
1503
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
1504
0
                    hparams.recurrent_layer_arr[i] = hparams.n_head_kv(i) == 0;
1505
0
                }
1506
1507
0
                switch (hparams.n_layer) {
1508
                    // TODO: Jamba layers are a bit heterogenous, so naming this is hard.
1509
0
                    case 12: // 900M  8x???M
1510
0
                    case 32: // 51B  16x?B
1511
0
                    default: type = LLM_TYPE_UNKNOWN;
1512
0
                }
1513
0
            } break;
1514
0
        case LLM_ARCH_XVERSE:
1515
0
            {
1516
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1517
0
                switch (hparams.n_layer) {
1518
0
                    case 32: type = LLM_TYPE_7B; break;
1519
0
                    case 40: type = LLM_TYPE_13B; break;
1520
0
                    case 80: type = LLM_TYPE_65B; break;
1521
0
                    default: type = LLM_TYPE_UNKNOWN;
1522
0
                }
1523
0
            } break;
1524
0
        case LLM_ARCH_COMMAND_R:
1525
0
            {
1526
0
                ml.get_key(LLM_KV_LOGIT_SCALE,             hparams.f_logit_scale);
1527
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1528
0
                switch (hparams.n_layer) {
1529
0
                    case 40: type = LLM_TYPE_35B; break;
1530
0
                    default: type = LLM_TYPE_UNKNOWN;
1531
0
                }
1532
0
            } break;
1533
0
        case LLM_ARCH_COHERE2:
1534
0
            {
1535
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1536
0
                hparams.set_swa_pattern(4);
1537
0
                hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
1538
0
                hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
1539
1540
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA,       hparams.rope_freq_base_train_swa, false);
1541
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
1542
0
                ml.get_key(LLM_KV_LOGIT_SCALE,              hparams.f_logit_scale);
1543
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,  hparams.f_norm_eps);
1544
0
                switch (hparams.n_layer) {
1545
0
                    case 32: type = LLM_TYPE_8B; break;
1546
0
                    default: type = LLM_TYPE_UNKNOWN;
1547
0
                }
1548
0
            } break;
1549
0
        case LLM_ARCH_DBRX:
1550
0
        {
1551
0
            ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1552
0
            ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,     hparams.f_clamp_kqv);
1553
1554
0
            switch (hparams.n_layer) {
1555
0
                case 40: type = LLM_TYPE_16x12B; break;
1556
0
                default: type = LLM_TYPE_UNKNOWN;
1557
0
            }
1558
0
        } break;
1559
0
        case LLM_ARCH_OLMO:
1560
0
            {
1561
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1562
0
                ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,     hparams.f_clamp_kqv, false);
1563
1564
0
                switch (hparams.n_layer) {
1565
0
                    case 22: type = LLM_TYPE_1B; break;
1566
0
                    case 32: type = LLM_TYPE_7B; break;
1567
0
                    case 80: type = LLM_TYPE_70B; break;
1568
0
                    default: type = LLM_TYPE_UNKNOWN;
1569
0
                }
1570
0
            } break;
1571
0
        case LLM_ARCH_OLMO2:
1572
0
            {
1573
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1574
1575
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
1576
0
                if (found_swa && hparams.n_swa > 0) {
1577
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1578
0
                    hparams.set_swa_pattern(4);
1579
1580
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
1581
0
                    hparams.rope_freq_scale_train_swa = 1.0; // See olmo2.cpp
1582
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
1583
0
                } else {
1584
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
1585
0
                }
1586
1587
0
                switch (hparams.n_layer) {
1588
0
                    case 16: type = LLM_TYPE_1B; break;
1589
0
                    case 32: type = LLM_TYPE_7B; break;
1590
0
                    case 40: type = LLM_TYPE_13B; break;
1591
0
                    case 64: type = LLM_TYPE_32B; break;
1592
0
                    default: type = LLM_TYPE_UNKNOWN;
1593
0
                }
1594
0
            } break;
1595
0
        case LLM_ARCH_SEED_OSS:
1596
0
            {
1597
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1598
0
                switch (hparams.n_layer) {
1599
0
                    case 64: type = LLM_TYPE_36B; break;
1600
0
                    default: type = LLM_TYPE_UNKNOWN;
1601
0
                }
1602
0
            } break;
1603
0
        case LLM_ARCH_OLMOE:
1604
0
            {
1605
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1606
0
                switch (hparams.n_layer) {
1607
0
                    case 16: type = LLM_TYPE_A1_7B; break;
1608
0
                    default: type = LLM_TYPE_UNKNOWN;
1609
0
                }
1610
0
            } break;
1611
0
        case LLM_ARCH_OPENELM:
1612
0
            {
1613
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1614
1615
0
                switch (hparams.n_layer) {
1616
0
                case 16: type = LLM_TYPE_270M; break;
1617
0
                case 20: type = LLM_TYPE_450M; break;
1618
0
                case 28: type = LLM_TYPE_1B; break;
1619
0
                case 36: type = LLM_TYPE_3B; break;
1620
0
                default: type = LLM_TYPE_UNKNOWN;
1621
0
                }
1622
0
            } break;
1623
0
        case LLM_ARCH_GPTNEOX:
1624
0
            {
1625
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1626
0
                ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL,   hparams.use_par_res);
1627
0
                switch (hparams.n_layer) {
1628
0
                    case 6:
1629
0
                        switch (hparams.n_ff()) {
1630
0
                            case 512:  type = LLM_TYPE_14M; break;
1631
0
                            case 2048: type = LLM_TYPE_70M; break;
1632
0
                            default:   type = LLM_TYPE_UNKNOWN;
1633
0
                        } break;
1634
0
                    case 12:
1635
0
                        switch (hparams.n_ff()) {
1636
0
                            case 3072: type = LLM_TYPE_160M; break;
1637
0
                            default: type = LLM_TYPE_UNKNOWN;
1638
0
                        } break;
1639
0
                    case 16:
1640
0
                        switch (hparams.n_ff()) {
1641
0
                            case 8192: type = LLM_TYPE_1B; break;
1642
0
                            default: type = LLM_TYPE_UNKNOWN;
1643
0
                        } break;
1644
0
                    case 24:
1645
0
                        switch (hparams.n_ff()) {
1646
0
                            case 4096: type = LLM_TYPE_410M; break;
1647
0
                            case 8192: type = LLM_TYPE_1_4B; break;
1648
0
                            default: type = LLM_TYPE_UNKNOWN;
1649
0
                        } break;
1650
0
                    case 32:
1651
0
                        switch (hparams.n_ff()) {
1652
0
                            case 10240: type = LLM_TYPE_2_8B; break;
1653
0
                            case 16384: type = LLM_TYPE_6_9B; break;
1654
0
                            default: type = LLM_TYPE_UNKNOWN;
1655
0
                        } break;
1656
0
                    case 36:
1657
0
                        switch (hparams.n_ff()) {
1658
0
                            case 20480: type = LLM_TYPE_12B; break;
1659
0
                            default: type = LLM_TYPE_UNKNOWN;
1660
0
                        } break;
1661
0
                    case 44:
1662
0
                        switch (hparams.n_ff()) {
1663
0
                            case 24576: type = LLM_TYPE_20B; break;
1664
0
                            default: type = LLM_TYPE_UNKNOWN;
1665
0
                        } break;
1666
0
                    default: type = LLM_TYPE_UNKNOWN;
1667
0
                }
1668
0
            } break;
1669
0
        case LLM_ARCH_ARCTIC:
1670
0
            {
1671
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1672
1673
0
                if (hparams.n_expert == 128) {
1674
0
                    switch (hparams.n_layer) {
1675
0
                        case 35: type = LLM_TYPE_10B_128x3_66B; break;
1676
0
                        default: type = LLM_TYPE_UNKNOWN;
1677
0
                    }
1678
0
                } else {
1679
0
                    type = LLM_TYPE_UNKNOWN;
1680
0
                }
1681
0
            } break;
1682
0
        case LLM_ARCH_DEEPSEEK:
1683
0
            {
1684
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1685
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
1686
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
1687
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
1688
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale);
1689
1690
0
                switch (hparams.n_ff_exp) {
1691
0
                    case 1408: type = LLM_TYPE_16B; break;
1692
0
                    case 1792: type = LLM_TYPE_20B; break;
1693
0
                    default: type = LLM_TYPE_UNKNOWN;
1694
0
                }
1695
0
            } break;
1696
0
        case LLM_ARCH_DEEPSEEK2:
1697
0
            {
1698
                // lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B
1699
0
                bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26);
1700
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1701
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
1702
0
                if (!is_lite) {
1703
0
                    ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
1704
0
                }
1705
0
                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK,     hparams.n_lora_kv);
1706
0
                ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH_MLA,   hparams.n_embd_head_k_mla, false);
1707
0
                ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH_MLA, hparams.n_embd_head_v_mla, false);
1708
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
1709
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,        hparams.n_expert_shared);
1710
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,       hparams.expert_weights_scale, false);
1711
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,        hparams.expert_weights_norm, false);
1712
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,         hparams.expert_gating_func, false);
1713
0
                if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
1714
                    // for compatibility with existing DeepSeek V2 and V2.5 GGUFs
1715
                    // that have no expert_gating_func model parameter set
1716
0
                    hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
1717
0
                }
1718
1719
0
                if (ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, 0.0f)) {
1720
                    // [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX]
1721
                    // cancel the factor from the convert script
1722
0
                    hparams.rope_yarn_log_mul /= 0.1f;
1723
0
                }
1724
1725
                // (optional) temperature tuning - used by mistral-large
1726
0
                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_SCALE,  hparams.f_attn_temp_scale,       false);
1727
0
                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH, hparams.n_attn_temp_floor_scale, false);
1728
1729
0
                hparams.f_attn_temp_offset = 0.0f;
1730
1731
0
                switch (hparams.n_layer) {
1732
0
                    case 27: type = LLM_TYPE_16B; break;
1733
0
                    case 60: type = LLM_TYPE_236B; break;
1734
0
                    case 61: type = LLM_TYPE_671B; break;
1735
0
                    default: type = LLM_TYPE_UNKNOWN;
1736
0
                }
1737
0
            } break;
1738
0
        case LLM_ARCH_PLM:
1739
0
            {
1740
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1741
0
                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
1742
0
                switch (hparams.n_layer) {
1743
0
                    case 32: type = LLM_TYPE_1_8B; break;
1744
0
                    default: type = LLM_TYPE_UNKNOWN;
1745
0
                }
1746
0
            } break;
1747
0
        case LLM_ARCH_CHATGLM:
1748
0
            {
1749
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1750
0
                switch (hparams.n_layer) {
1751
0
                    case 28: {
1752
0
                        if (hparams.n_head(0) == 16) {
1753
0
                            type = LLM_TYPE_1_5B;
1754
0
                        } else {
1755
0
                            type = LLM_TYPE_6B;
1756
0
                        }
1757
0
                    } break;
1758
0
                    case 40: {
1759
0
                        if (hparams.n_head(0) == 24) {
1760
0
                            type = LLM_TYPE_4B;
1761
0
                        } else {
1762
0
                            type = LLM_TYPE_9B;
1763
0
                        }
1764
0
                    } break;
1765
0
                    default: type = LLM_TYPE_UNKNOWN;
1766
0
                }
1767
0
            } break;
1768
0
        case LLM_ARCH_GLM4:
1769
0
            {
1770
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,    hparams.f_norm_rms_eps);
1771
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, false);
1772
0
                switch (hparams.n_layer) {
1773
0
                    case 40: type = LLM_TYPE_9B; break;
1774
0
                    case 61: type = LLM_TYPE_32B; break;
1775
0
                    default: type = LLM_TYPE_UNKNOWN;
1776
0
                }
1777
0
            } break;
1778
0
        case LLM_ARCH_GLM4_MOE:
1779
0
            {
1780
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,     hparams.n_ff_exp);
1781
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,    hparams.f_norm_rms_eps);
1782
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, false);
1783
1784
                // MoE parameters
1785
0
                ml.get_key(LLM_KV_EXPERT_COUNT,                hparams.n_expert);
1786
0
                ml.get_key(LLM_KV_EXPERT_USED_COUNT,           hparams.n_expert_used);
1787
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
1788
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead, false);
1789
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale);
1790
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,         hparams.expert_weights_norm, false);
1791
1792
                // Expert gating function (GLM-4.5 uses sigmoid)
1793
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func, false);
1794
0
                if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
1795
0
                    hparams.expert_gating_func =  LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID;
1796
0
                }
1797
1798
                // NextN/MTP parameters
1799
0
                ml.get_key(LLM_KV_NEXTN_PREDICT_LAYERS,        hparams.nextn_predict_layers, false);
1800
1801
                // TODO: when MTP is implemented, this should probably be updated if needed
1802
0
                hparams.n_layer_kv_from_start = hparams.n_layer - hparams.nextn_predict_layers;
1803
1804
0
                switch (hparams.n_layer) {
1805
0
                    case 47: type = LLM_TYPE_106B_A12B; break; // GLM-4.5-Air (46 layers + 1 NextN layer)
1806
0
                    case 48: type = LLM_TYPE_102B_A12B; break; // Solar Open
1807
0
                    case 93: type = LLM_TYPE_355B_A32B; break; // GLM-4.5 (92 layers + 1 NextN layer)
1808
0
                    default: type = LLM_TYPE_UNKNOWN;
1809
0
                }
1810
0
            } break;
1811
0
        case LLM_ARCH_BITNET:
1812
0
            {
1813
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1814
1815
0
                switch (hparams.n_layer) {
1816
0
                    case 26: type = LLM_TYPE_3B; break;
1817
0
                    default: type = LLM_TYPE_UNKNOWN;
1818
0
                }
1819
0
            } break;
1820
0
        case LLM_ARCH_T5:
1821
0
            {
1822
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,      hparams.f_norm_rms_eps);
1823
0
                ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
1824
1825
0
                uint32_t dec_start_token_id;
1826
0
                if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) {
1827
0
                    hparams.dec_start_token_id = dec_start_token_id;
1828
0
                }
1829
1830
0
                hparams.dec_n_layer = hparams.n_layer;
1831
0
                ml.get_key(LLM_KV_DECODER_BLOCK_COUNT, hparams.dec_n_layer, false);
1832
1833
0
                switch (hparams.n_layer) {
1834
0
                    case 6:  type = LLM_TYPE_60M;  break; // t5-small
1835
0
                    case 8:  type = LLM_TYPE_80M;  break; // flan-t5-small
1836
0
                    case 12:
1837
0
                        switch (hparams.n_ff()) {
1838
0
                            case 3072: type = LLM_TYPE_220M; break; // t5-base
1839
0
                            case 2048: type = LLM_TYPE_250M; break; // flan-t5-base
1840
0
                            default: type = LLM_TYPE_UNKNOWN;
1841
0
                        } break;
1842
0
                    case 24:
1843
0
                        switch (hparams.n_ff()) {
1844
0
                            case 4096:  type = LLM_TYPE_770M; break; // t5-large
1845
0
                            case 2816:  type = LLM_TYPE_780M; break; // flan-t5-large
1846
0
                            case 16384: type = LLM_TYPE_3B;   break; // t5-3b
1847
0
                            case 5120:  type = LLM_TYPE_3B;   break; // flan-t5-xl
1848
0
                            case 65536: type = LLM_TYPE_11B;  break; // t5-11b
1849
0
                            case 10240: type = LLM_TYPE_11B;  break; // flan-t5-xxl
1850
0
                            default: type = LLM_TYPE_UNKNOWN;
1851
0
                        } break;
1852
0
                    default: type = LLM_TYPE_UNKNOWN;
1853
0
               }
1854
0
            } break;
1855
0
        case LLM_ARCH_T5ENCODER:
1856
0
            {
1857
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1858
0
                ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
1859
0
                type = LLM_TYPE_UNKNOWN;
1860
0
            } break;
1861
0
        case LLM_ARCH_JAIS:
1862
0
            {
1863
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1864
0
                ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
1865
1866
0
                switch (hparams.n_layer) {
1867
0
                    case 24: type = LLM_TYPE_1_3B; break;
1868
0
                    case 40: type = LLM_TYPE_13B; break;
1869
                    /* TODO: add variants */
1870
0
                    default: type = LLM_TYPE_UNKNOWN;
1871
0
                }
1872
0
            } break;
1873
0
        case LLM_ARCH_NEMOTRON:
1874
0
            {
1875
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1876
0
                switch (hparams.n_layer) {
1877
0
                    case 32: type = LLM_TYPE_4B; break;
1878
0
                    default: type = LLM_TYPE_UNKNOWN;
1879
0
                }
1880
0
            } break;
1881
0
        case LLM_ARCH_NEMOTRON_H:
1882
0
        case LLM_ARCH_NEMOTRON_H_MOE:
1883
0
            {
1884
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1885
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1886
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1887
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1888
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
1889
1890
                // A layer is recurrent IFF the n_head_kv value is set to 0 and
1891
                // the n_ff value is set to 0
1892
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
1893
0
                    hparams.recurrent_layer_arr[i] = (hparams.n_head_kv(i) == 0 && hparams.n_ff(i) == 0);
1894
0
                }
1895
1896
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1897
1898
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp,        false);
1899
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp,      false);
1900
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,               hparams.n_expert_shared, false);
1901
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,               hparams.expert_weights_norm, false);
1902
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,              hparams.expert_weights_scale, false);
1903
1904
0
                switch (hparams.n_layer) {
1905
0
                    case 52: type = LLM_TYPE_31B_A3_5B; break; // Nemotron-H_MOE 31B
1906
0
                    case 56: type = LLM_TYPE_9B; break;
1907
0
                    default: type = LLM_TYPE_UNKNOWN;
1908
0
                }
1909
0
            } break;
1910
0
        case LLM_ARCH_EXAONE:
1911
0
            {
1912
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1913
1914
0
                switch (hparams.n_layer) {
1915
0
                    case 32: type = LLM_TYPE_8B; break;
1916
0
                    default: type = LLM_TYPE_UNKNOWN;
1917
0
                }
1918
0
            } break;
1919
0
        case LLM_ARCH_EXAONE4:
1920
0
            {
1921
0
                if (hparams.n_layer == 64) {    // 32B
1922
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1923
0
                    hparams.n_swa = 4096;
1924
0
                    hparams.set_swa_pattern(4);
1925
1926
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
1927
0
                    hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
1928
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
1929
0
                }
1930
1931
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa, false);
1932
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1933
1934
0
                switch (hparams.n_layer) {
1935
0
                    case 30: type = LLM_TYPE_1_2B; break;
1936
0
                    case 64: type = LLM_TYPE_32B; break;
1937
0
                    default: type = LLM_TYPE_UNKNOWN;
1938
0
                }
1939
0
            } break;
1940
0
        case LLM_ARCH_EXAONE_MOE:
1941
0
            {
1942
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1943
0
                hparams.n_swa = 128;
1944
0
                hparams.set_swa_pattern(4);
1945
0
                hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
1946
0
                hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
1947
1948
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA,                hparams.rope_freq_base_train_swa, false);
1949
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,          hparams.n_swa);
1950
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,       hparams.f_norm_rms_eps);
1951
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,               hparams.n_expert_shared, false);
1952
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp);
1953
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
1954
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,                hparams.expert_gating_func);
1955
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,              hparams.expert_weights_scale, false);
1956
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,               hparams.expert_weights_norm, false);
1957
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,         hparams.n_layer_dense_lead);
1958
1959
0
                ml.get_key(LLM_KV_NEXTN_PREDICT_LAYERS,              hparams.nextn_predict_layers, false);
1960
1961
0
                switch (hparams.n_layer) {
1962
0
                    case 32: type = LLM_TYPE_30B_A3B; break;
1963
0
                    case 48:
1964
0
                    case 49: type = LLM_TYPE_235B_A22B; break;
1965
0
                    default: type = LLM_TYPE_UNKNOWN;
1966
0
                }
1967
0
            } break;
1968
0
        case LLM_ARCH_RWKV6:
1969
0
        case LLM_ARCH_RWKV6QWEN2:
1970
0
            {
1971
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,     hparams.f_norm_eps, false);
1972
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps, false);
1973
0
                ml.get_key(LLM_KV_WKV_HEAD_SIZE,               hparams.wkv_head_size);
1974
0
                ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM,          hparams.time_mix_extra_dim);
1975
0
                ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM,        hparams.time_decay_extra_dim);
1976
0
                ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS,      hparams.rescale_every_n_layers, false);
1977
0
                ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT,           hparams.token_shift_count, false);
1978
1979
0
                switch (hparams.n_layer) {
1980
0
                    case 24: type = LLM_TYPE_1_6B; break;
1981
0
                    case 32:
1982
0
                        switch (hparams.n_embd) {
1983
0
                            case 2560: type = LLM_TYPE_3B; break;
1984
0
                            case 4096: type = LLM_TYPE_7B; break;
1985
0
                            default: type = LLM_TYPE_UNKNOWN;
1986
0
                        } break;
1987
0
                    case 61: type = LLM_TYPE_14B; break;
1988
0
                    case 64: type = LLM_TYPE_32B; break;
1989
0
                    default: type = LLM_TYPE_UNKNOWN;
1990
0
                }
1991
0
            } break;
1992
0
        case LLM_ARCH_RWKV7:
1993
0
        case LLM_ARCH_ARWKV7:
1994
0
            {
1995
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,                hparams.f_norm_eps, false);
1996
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,            hparams.f_norm_rms_eps, false);
1997
0
                ml.get_key(LLM_KV_WKV_HEAD_SIZE,                          hparams.wkv_head_size);
1998
0
                ml.get_key(LLM_KV_ATTENTION_DECAY_LORA_RANK,              hparams.n_lora_decay);
1999
0
                ml.get_key(LLM_KV_ATTENTION_ICLR_LORA_RANK,               hparams.n_lora_iclr);
2000
0
                ml.get_key(LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, hparams.n_lora_value_res_mix);
2001
0
                ml.get_key(LLM_KV_ATTENTION_GATE_LORA_RANK,               hparams.n_lora_gate, false);
2002
0
                ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT,                      hparams.token_shift_count, false);
2003
2004
0
                switch (hparams.n_layer) {
2005
0
                    case 12:
2006
0
                        switch (hparams.n_embd) {
2007
0
                            case 768: type = LLM_TYPE_190M; break;
2008
0
                            default: type = LLM_TYPE_UNKNOWN;
2009
0
                        } break;
2010
0
                    case 24:
2011
0
                        switch (hparams.n_embd) {
2012
0
                            case 1024: type = LLM_TYPE_450M; break;
2013
0
                            case 2048: type = LLM_TYPE_1_5B; break;
2014
0
                            default: type = LLM_TYPE_UNKNOWN;
2015
0
                        } break;
2016
0
                    case 28:
2017
0
                        switch (hparams.n_embd) {
2018
0
                            case 1536: type = LLM_TYPE_1_5B; break;
2019
0
                            case 3584: type = LLM_TYPE_7B; break;
2020
0
                            default: type = LLM_TYPE_UNKNOWN;
2021
0
                        } break;
2022
0
                    case 32:
2023
0
                        switch (hparams.n_embd) {
2024
0
                            case 2560: type = LLM_TYPE_2_9B; break;
2025
0
                            case 4096: type = LLM_TYPE_7B; break;
2026
0
                            default: type = LLM_TYPE_UNKNOWN;
2027
0
                        } break;
2028
0
                    case 61:
2029
0
                        switch (hparams.n_embd) {
2030
0
                            case 4096: type = LLM_TYPE_14B; break;
2031
0
                            default: type = LLM_TYPE_UNKNOWN;
2032
0
                        } break;
2033
0
                    default: type = LLM_TYPE_UNKNOWN;
2034
0
                }
2035
0
            } break;
2036
0
        case LLM_ARCH_GRANITE:
2037
0
        case LLM_ARCH_GRANITE_MOE:
2038
0
            {
2039
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2040
0
                ml.get_key(LLM_KV_LOGIT_SCALE,                 hparams.f_logit_scale);
2041
0
                ml.get_key(LLM_KV_RESIDUAL_SCALE,              hparams.f_residual_scale);
2042
0
                ml.get_key(LLM_KV_EMBEDDING_SCALE,             hparams.f_embedding_scale);
2043
0
                ml.get_key(LLM_KV_ATTENTION_SCALE,             hparams.f_attention_scale);
2044
2045
                // Granite uses rope_finetuned as a switch for rope, so default to true
2046
0
                bool rope_finetuned = true;
2047
0
                ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
2048
0
                hparams.rope_finetuned = rope_finetuned;
2049
2050
0
                switch (hparams.n_layer) {
2051
0
                    case 32: type = LLM_TYPE_3B; break;
2052
0
                    case 40: type = LLM_TYPE_3B; break;
2053
                    // Add additional layer/vocab/etc checks here for other model sizes
2054
0
                    default: type = LLM_TYPE_UNKNOWN;
2055
0
                }
2056
2057
                // For Granite MoE Shared
2058
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, /* required */ false);
2059
0
            } break;
2060
0
        case LLM_ARCH_GRANITE_HYBRID:
2061
0
            {
2062
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2063
0
                ml.get_key(LLM_KV_LOGIT_SCALE,                 hparams.f_logit_scale, /* required */ false);
2064
0
                ml.get_key(LLM_KV_RESIDUAL_SCALE,              hparams.f_residual_scale, /* required */ false);
2065
0
                ml.get_key(LLM_KV_EMBEDDING_SCALE,             hparams.f_embedding_scale, /* required */ false);
2066
0
                ml.get_key(LLM_KV_ATTENTION_SCALE,             hparams.f_attention_scale, /* required */ false);
2067
2068
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
2069
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
2070
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
2071
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
2072
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
2073
2074
                // Granite uses rope_finetuned as a switch for rope, so default to true
2075
0
                bool rope_finetuned = true;
2076
0
                ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
2077
0
                hparams.rope_finetuned = rope_finetuned;
2078
2079
                // A layer is recurrent IFF the n_head_kv value is set to 0
2080
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
2081
0
                    hparams.recurrent_layer_arr[i] = hparams.n_head_kv(i) == 0;
2082
0
                }
2083
2084
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2085
2086
0
                switch (hparams.n_embd) {
2087
0
                    case 768: type = LLM_TYPE_350M; break;
2088
0
                    case 1536: type = (hparams.n_embd == 2048 ? LLM_TYPE_7B_A1B : LLM_TYPE_1B); break;
2089
0
                    case 2048: case 2560: type = LLM_TYPE_3B; break;
2090
0
                    case 4096: type = LLM_TYPE_32B; break;
2091
0
                    default: type = LLM_TYPE_UNKNOWN;
2092
0
                }
2093
2094
                // For Granite MoE Shared
2095
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, /* required */ false);
2096
0
            } break;
2097
0
        case LLM_ARCH_CHAMELEON:
2098
0
            {
2099
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2100
0
                hparams.f_norm_eps = 1e-5;  // eps for qk-norm, torch default
2101
0
                ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm);
2102
2103
0
                switch (hparams.n_layer) {
2104
0
                    case 32: type = LLM_TYPE_7B; break;
2105
0
                    case 48: type = LLM_TYPE_34B; break;
2106
0
                    default: type = LLM_TYPE_UNKNOWN;
2107
0
               }
2108
0
            } break;
2109
0
        case LLM_ARCH_WAVTOKENIZER_DEC:
2110
0
            {
2111
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
2112
0
                ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS,    hparams.f_norm_group_eps);
2113
0
                ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups);
2114
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
2115
0
            } break;
2116
0
        case LLM_ARCH_BAILINGMOE:
2117
0
            {
2118
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2119
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
2120
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
2121
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
2122
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale);
2123
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,         hparams.expert_weights_norm, false);
2124
2125
0
                switch (hparams.n_layer) {
2126
0
                    case 28: type = LLM_TYPE_16B; break;
2127
0
                    case 88: type = LLM_TYPE_290B; break;
2128
0
                    default: type = LLM_TYPE_UNKNOWN;
2129
0
                }
2130
0
            } break;
2131
0
        case LLM_ARCH_BAILINGMOE2:
2132
0
            {
2133
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,       hparams.f_norm_rms_eps);
2134
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,         hparams.n_layer_dense_lead);
2135
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp);
2136
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp);
2137
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,               hparams.n_expert_shared);
2138
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,              hparams.expert_weights_scale);
2139
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,               hparams.expert_weights_norm, false);
2140
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,                hparams.expert_gating_func);
2141
0
                ml.get_key(LLM_KV_NEXTN_PREDICT_LAYERS,              hparams.nextn_predict_layers, false);
2142
2143
                // TODO: when MTP is implemented, this should probably be updated if needed
2144
0
                hparams.n_layer_kv_from_start = hparams.n_layer - hparams.nextn_predict_layers;
2145
2146
0
                switch (hparams.n_layer) {
2147
0
                    case 20: type = LLM_TYPE_16B_A1B; break;
2148
0
                    case 21: type = LLM_TYPE_16B_A1B; break;
2149
0
                    case 32: type = LLM_TYPE_100B_A6B; break;
2150
0
                    case 33: type = LLM_TYPE_100B_A6B; break;
2151
0
                    default: type = LLM_TYPE_UNKNOWN;
2152
0
                }
2153
0
            } break;
2154
0
        case LLM_ARCH_DOTS1:
2155
0
            {
2156
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2157
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
2158
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
2159
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
2160
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale);
2161
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,         hparams.expert_weights_norm, false);
2162
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func, false);
2163
0
                switch (hparams.n_layer) {
2164
0
                    case 62: type = LLM_TYPE_142B; break;
2165
0
                    default: type = LLM_TYPE_UNKNOWN;
2166
0
                }
2167
0
            } break;
2168
0
        case LLM_ARCH_ERNIE4_5:
2169
0
        case LLM_ARCH_ERNIE4_5_MOE:
2170
0
            {
2171
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2172
0
                if (arch == LLM_ARCH_ERNIE4_5_MOE) {
2173
0
                    ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp);
2174
0
                    ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
2175
0
                    ml.get_key(LLM_KV_INTERLEAVE_MOE_LAYER_STEP,         hparams.n_moe_layer_step);
2176
0
                    ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,         hparams.n_layer_dense_lead);
2177
0
                }
2178
2179
0
                switch (hparams.n_layer) {
2180
0
                    case 18: type = LLM_TYPE_0_3B; break;
2181
0
                    case 28: type = LLM_TYPE_21B_A3B; break;
2182
0
                    case 54: type = LLM_TYPE_300B_A47B; break;
2183
0
                    default: type = LLM_TYPE_UNKNOWN;
2184
0
                }
2185
0
            } break;
2186
0
        case LLM_ARCH_FALCON_H1:
2187
0
            {
2188
                // Common parameters
2189
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2190
2191
                // SSM parameters
2192
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
2193
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
2194
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
2195
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
2196
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
2197
2198
0
                std::fill(hparams.recurrent_layer_arr.begin(), hparams.recurrent_layer_arr.end(), true);
2199
2200
0
                switch (hparams.n_layer) {
2201
0
                    case 36:
2202
0
                        type = LLM_TYPE_0_5B; break;
2203
0
                    case 24:
2204
0
                        type = LLM_TYPE_1_5B; break;
2205
0
                    case 66:
2206
0
                        type = LLM_TYPE_1B; break;
2207
0
                    case 32:
2208
0
                        type = LLM_TYPE_3B; break;
2209
0
                    case 44:
2210
0
                        type = LLM_TYPE_7B; break;
2211
0
                    case 72:
2212
0
                        type = LLM_TYPE_34B; break;
2213
0
                    default:
2214
0
                        type = LLM_TYPE_UNKNOWN;
2215
0
                }
2216
0
            } break;
2217
0
        case LLM_ARCH_HUNYUAN_MOE:
2218
0
            {
2219
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,       hparams.f_norm_rms_eps);
2220
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp);
2221
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp);
2222
2223
0
                switch (hparams.n_layer) {
2224
0
                    case 32: type = LLM_TYPE_A13B; break;
2225
0
                    default: type = LLM_TYPE_UNKNOWN;
2226
0
                }
2227
0
            } break;
2228
0
        case LLM_ARCH_HUNYUAN_DENSE:
2229
0
            {
2230
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2231
2232
0
                switch (hparams.n_embd) {
2233
0
                    case 1024: type = LLM_TYPE_0_5B; break;
2234
0
                    case 2048: type = LLM_TYPE_1_8B; break;
2235
0
                    case 3072: type = LLM_TYPE_4B; break;
2236
0
                    case 4096: type = LLM_TYPE_7B; break;
2237
0
                    default: type = LLM_TYPE_UNKNOWN;
2238
0
                }
2239
0
            } break;
2240
0
        case LLM_ARCH_SMOLLM3:
2241
0
            {
2242
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2243
0
                hparams.n_no_rope_layer_step = 4;
2244
2245
0
                switch (hparams.n_layer) {
2246
0
                    case 36: type = LLM_TYPE_3B; break;
2247
0
                    default: type = LLM_TYPE_UNKNOWN;
2248
0
                }
2249
0
            } break;
2250
0
        case LLM_ARCH_OPENAI_MOE:
2251
0
            {
2252
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2253
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
2254
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa);
2255
2256
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
2257
0
                hparams.set_swa_pattern(2);
2258
2259
0
                hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
2260
0
                hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
2261
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
2262
2263
0
                switch (hparams.n_layer) {
2264
0
                    case 24: type = LLM_TYPE_20B; break;
2265
0
                    case 36: type = LLM_TYPE_120B; break;
2266
0
                    default: type = LLM_TYPE_UNKNOWN;
2267
0
                }
2268
0
            } break;
2269
0
        case LLM_ARCH_LFM2:
2270
0
            {
2271
0
                ml.get_key(LLM_KV_SHORTCONV_L_CACHE,           hparams.n_shortconv_l_cache);
2272
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2273
0
                for (uint32_t il = 0; il < hparams.n_layer; ++il) {
2274
0
                    hparams.recurrent_layer_arr[il] = hparams.n_head_kv(il) == 0;
2275
0
                }
2276
0
                hparams.n_layer_dense_lead = hparams.n_layer;
2277
0
                switch (hparams.n_ff()) {
2278
0
                    case  4608: type = LLM_TYPE_350M; break;
2279
0
                    case  6912: type = LLM_TYPE_700M; break;
2280
0
                    case  8192: type = LLM_TYPE_1_2B; break;
2281
0
                    case 10752: type = LLM_TYPE_2_6B; break;
2282
0
                    default:    type = LLM_TYPE_UNKNOWN;
2283
0
                }
2284
0
            } break;
2285
0
        case LLM_ARCH_LFM2MOE:
2286
0
            {
2287
0
                ml.get_key(LLM_KV_SHORTCONV_L_CACHE,           hparams.n_shortconv_l_cache);
2288
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2289
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
2290
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
2291
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func);
2292
2293
0
                for (uint32_t il = 0; il < hparams.n_layer; ++il) {
2294
0
                    hparams.recurrent_layer_arr[il] = hparams.n_head_kv(il) == 0;
2295
0
                }
2296
2297
0
                type = LLM_TYPE_8B_A1B;
2298
0
            } break;
2299
0
        case LLM_ARCH_SMALLTHINKER:
2300
0
            {
2301
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
2302
2303
0
                if (found_swa && hparams.n_swa > 0) {
2304
0
                    hparams.swa_type      = LLAMA_SWA_TYPE_STANDARD;
2305
0
                    hparams.n_swa         = 4096;
2306
0
                    hparams.set_swa_pattern(4, true);
2307
2308
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
2309
0
                    hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
2310
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
2311
0
                } else {
2312
0
                    hparams.swa_type             = LLAMA_SWA_TYPE_NONE;
2313
0
                    hparams.n_no_rope_layer_step = hparams.n_layer;
2314
0
                }
2315
2316
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp, false);
2317
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2318
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func, false);
2319
2320
0
                switch (hparams.n_layer) {
2321
0
                    case 32: type = LLM_TYPE_4B;  break;
2322
0
                    case 52: type = LLM_TYPE_20B; break;
2323
0
                    default: type = LLM_TYPE_UNKNOWN;
2324
0
                }
2325
0
            } break;
2326
0
        case LLM_ARCH_GROVEMOE:
2327
0
            {
2328
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp);
2329
0
                ml.get_key(LLM_KV_EXPERT_CHUNK_FEED_FORWARD_LENGTH,  hparams.n_ff_chexp);
2330
0
                ml.get_key(LLM_KV_EXPERT_GROUP_SCALE,                hparams.expert_group_scale);
2331
0
                ml.get_key(LLM_KV_EXPERTS_PER_GROUP,                 hparams.n_group_experts);
2332
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,       hparams.f_norm_rms_eps);
2333
2334
0
                switch (hparams.n_layer) {
2335
0
                    case 48: type = LLM_TYPE_30B_A3B; break;
2336
0
                    default: type = LLM_TYPE_UNKNOWN;
2337
0
                }
2338
0
            } break;
2339
0
        case LLM_ARCH_APERTUS:
2340
0
            {
2341
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2342
0
                ml.get_key_or_arr(LLM_KV_XIELU_ALPHA_N,        hparams.xielu_alpha_n, hparams.n_layer);
2343
0
                ml.get_key_or_arr(LLM_KV_XIELU_ALPHA_P,        hparams.xielu_alpha_p, hparams.n_layer);
2344
0
                ml.get_key_or_arr(LLM_KV_XIELU_BETA,           hparams.xielu_beta,    hparams.n_layer);
2345
0
                ml.get_key_or_arr(LLM_KV_XIELU_EPS,            hparams.xielu_eps,     hparams.n_layer);
2346
2347
0
                switch (hparams.n_layer) {
2348
0
                    case 32: type = LLM_TYPE_8B; break;
2349
0
                    default: type = LLM_TYPE_UNKNOWN;
2350
0
                }
2351
0
            } break;
2352
0
        case LLM_ARCH_MINIMAX_M2:
2353
0
            {
2354
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,  hparams.f_norm_rms_eps);
2355
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,   hparams.n_ff_exp);
2356
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,           hparams.expert_gating_func, false);
2357
2358
0
                switch (hparams.n_layer) {
2359
0
                    case 62: type = LLM_TYPE_230B_A10B; break;
2360
0
                    default: type = LLM_TYPE_UNKNOWN;
2361
0
                }
2362
0
            } break;
2363
0
        case LLM_ARCH_COGVLM:
2364
0
            {
2365
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2366
0
                switch (hparams.n_layer) {
2367
0
                    case 32: type = LLM_TYPE_13B; break;
2368
0
                    default: type = LLM_TYPE_UNKNOWN;
2369
0
                }
2370
0
            } break;
2371
0
        case LLM_ARCH_PANGU_EMBED:
2372
0
            {
2373
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2374
0
                switch (hparams.n_layer) {
2375
0
                    case 26: type = LLM_TYPE_1B; break; // openPangu-Embedded-1B-V1.1
2376
0
                    case 34: type = LLM_TYPE_7B; break; // openPangu-Embedded-7B-V1.1
2377
0
                    default: type = LLM_TYPE_UNKNOWN;
2378
0
                }
2379
0
            } break;
2380
0
        case LLM_ARCH_QWEN3NEXT:
2381
0
            {
2382
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp, false);
2383
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
2384
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,       hparams.f_norm_rms_eps);
2385
2386
                // Load linear attention (gated delta net) parameters
2387
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
2388
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
2389
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
2390
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
2391
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
2392
2393
                // Mark recurrent layers (linear attention layers)
2394
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
2395
0
                    hparams.recurrent_layer_arr[i] = ((i + 1) % 4 != 0); // TODO: extract the magic 4 from "full_attention_interval"
2396
0
                }
2397
2398
0
                switch (hparams.n_layer) {
2399
0
                    case 48: type = LLM_TYPE_80B_A3B; break;
2400
0
                    default: type = LLM_TYPE_UNKNOWN;
2401
0
                }
2402
0
            } break;
2403
0
        case LLM_ARCH_MISTRAL3:
2404
0
            {
2405
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2406
0
                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_SCALE, hparams.f_attn_temp_scale, false);
2407
2408
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_FAST, hparams.yarn_beta_fast,    false);
2409
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_SLOW, hparams.yarn_beta_slow,    false);
2410
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL,   hparams.rope_yarn_log_mul, 0.0f);
2411
2412
0
                hparams.f_attn_temp_offset = 0.0f;
2413
2414
                // TODO: maybe add n_attn_temp_floor_scale as a separate KV?
2415
0
                if (hparams.f_attn_temp_scale != 0.0f) {
2416
0
                    hparams.n_attn_temp_floor_scale = hparams.n_ctx_orig_yarn;
2417
0
                    if (hparams.n_attn_temp_floor_scale == 0) {
2418
0
                        throw std::runtime_error("invalid n_ctx_orig_yarn for attention temperature scaling");
2419
0
                    }
2420
0
                }
2421
2422
0
                switch (hparams.n_layer) {
2423
0
                    case 26: type = LLM_TYPE_3B; break;
2424
0
                    case 34: type = LLM_TYPE_8B; break;
2425
0
                    case 40: type = LLM_TYPE_14B; break;
2426
0
                    default: type = LLM_TYPE_UNKNOWN;
2427
0
                }
2428
0
            } break;
2429
0
        case LLM_ARCH_MIMO2:
2430
0
            {
2431
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2432
2433
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
2434
2435
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
2436
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,   hparams.n_swa);
2437
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA,         hparams.rope_freq_base_train_swa);
2438
0
                ml.get_key_or_arr(LLM_KV_ATTENTION_SLIDING_WINDOW_PATTERN, hparams.swa_layers, hparams.n_layer);
2439
2440
0
                switch (hparams.n_layer) {
2441
0
                    case 48: type = LLM_TYPE_310B_A15B; break;
2442
0
                    default: type = LLM_TYPE_UNKNOWN;
2443
0
                }
2444
0
            } break;
2445
0
        default: throw std::runtime_error("unsupported model architecture");
2446
0
    }
2447
2448
0
    pimpl->n_bytes = ml.n_bytes;
2449
2450
0
    pimpl->desc_str = arch_name() + " " + type_name() + " " + ml.ftype_name();
2451
2452
0
    if (hparams.f_max_alibi_bias > 0.0f) {
2453
0
        hparams.use_alibi = true;
2454
0
    }
2455
2456
0
    hparams.rope_type = llama_model_rope_type(this);
2457
0
}
2458
2459
0
void llama_model::load_vocab(llama_model_loader & ml) {
2460
0
    const auto kv = LLM_KV(arch);
2461
2462
0
    vocab.load(ml, kv);
2463
0
}
2464
2465
0
bool llama_model::load_tensors(llama_model_loader & ml) {
2466
0
    const auto & split_mode   = params.split_mode;
2467
0
    const auto & use_mlock    = params.use_mlock;
2468
0
    const auto & tensor_split = params.tensor_split;
2469
2470
0
    const int n_layer      = hparams.n_layer;
2471
0
    const int n_gpu_layers = this->n_gpu_layers();
2472
2473
0
    const bool use_mmap_buffer = true;
2474
2475
0
    LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s, direct_io = %s)\n",
2476
0
        __func__, ml.use_mmap ? "true" : "false", ml.use_direct_io ? "true" : "false");
2477
2478
    // build a list of buffer types for the CPU and GPU devices
2479
0
    pimpl->cpu_buft_list = make_cpu_buft_list(devices, params.use_extra_bufts, params.no_host);
2480
0
    for (auto * dev : devices) {
2481
0
        buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split);
2482
        // add CPU buffer types as a fallback
2483
0
        buft_list.insert(buft_list.end(), pimpl->cpu_buft_list.begin(), pimpl->cpu_buft_list.end());
2484
0
        pimpl->gpu_buft_list.emplace(dev, std::move(buft_list));
2485
0
    }
2486
2487
0
    ggml_backend_dev_t cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
2488
0
    if (cpu_dev == nullptr) {
2489
0
        throw std::runtime_error(format("%s: no CPU backend found", __func__));
2490
0
    }
2491
2492
    // calculate the split points
2493
0
    bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + n_devices(), [](float x) { return x == 0.0f; });
2494
0
    std::vector<float> splits(n_devices());
2495
0
    if (all_zero) {
2496
        // default split, by free memory
2497
0
        for (size_t i = 0; i < n_devices(); ++i) {
2498
0
            ggml_backend_dev_t dev = devices[i];
2499
0
            size_t total;
2500
0
            size_t free;
2501
0
            ggml_backend_dev_memory(dev, &free, &total);
2502
2503
            // devices can return 0 bytes for free and total memory if they do not
2504
            // have any to report. in this case, we will use the host memory as a fallback
2505
            // fixes: https://github.com/ggml-org/llama.cpp/issues/18577
2506
0
            if (free == 0 && total == 0) {
2507
0
                ggml_backend_dev_memory(cpu_dev, &free, &total);
2508
0
            }
2509
0
            splits[i] = free;
2510
0
        }
2511
0
    } else {
2512
0
        std::copy(tensor_split, tensor_split + n_devices(), splits.begin());
2513
0
    }
2514
2515
    // sum and normalize the splits to get the split points
2516
0
    float split_sum = 0.0f;
2517
0
    for (size_t i = 0; i < n_devices(); ++i) {
2518
0
        split_sum += splits[i];
2519
0
        splits[i] = split_sum;
2520
0
    }
2521
0
    for (size_t i = 0; i < n_devices(); ++i) {
2522
0
        splits[i] /= split_sum;
2523
0
    }
2524
2525
0
    const int i_gpu_start = std::max(int(hparams.n_layer) + 1 - n_gpu_layers, 0);
2526
0
    const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, int(n_layer) + 1);
2527
0
    auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev {
2528
0
        const bool is_swa = il < int(hparams.n_layer) && hparams.is_swa(il);
2529
0
        if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) {
2530
0
            LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(cpu_dev), is_swa);
2531
0
            return {cpu_dev, &pimpl->cpu_buft_list};
2532
0
        }
2533
0
        const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin();
2534
0
        auto * dev = devices.at(layer_gpu);
2535
0
        LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(dev), is_swa);
2536
0
        return {dev, &pimpl->gpu_buft_list.at(dev)};
2537
0
    };
2538
2539
    // assign the input layer
2540
    // there is very little benefit to offloading the input layer, so always keep it on the CPU
2541
0
    pimpl->dev_input = { cpu_dev, &pimpl->cpu_buft_list };
2542
2543
    // assign the repeating layers to the devices according to the splits
2544
0
    pimpl->dev_layer.resize(n_layer);
2545
0
    for (int il = 0; il < n_layer; ++il) {
2546
0
        pimpl->dev_layer[il] = get_layer_buft_list(il);
2547
0
    }
2548
2549
    // assign the output layer
2550
0
    pimpl->dev_output = get_layer_buft_list(n_layer);
2551
2552
    // one ggml context per buffer type
2553
0
    int max_n_tensors = ml.n_tensors;
2554
0
    max_n_tensors += 1;         // duplicated output tensor
2555
0
    max_n_tensors += n_layer*2; // duplicated rope freq tensors
2556
0
    const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors;
2557
2558
    // define a comparator for the buft -> ctx map to ensure that the order is well-defined:
2559
0
    struct ggml_backend_buft_comparator {
2560
0
        bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const {
2561
0
            return strcmp(ggml_backend_buft_name(lhs), ggml_backend_buft_name(rhs)) < 0;
2562
0
        }
2563
0
    };
2564
0
    std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map;
2565
2566
0
    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
2567
0
        auto it = ctx_map.find(buft);
2568
0
        if (it == ctx_map.end()) {
2569
0
            ggml_init_params params = {
2570
0
                /*.mem_size   =*/ ctx_size,
2571
0
                /*.mem_buffer =*/ NULL,
2572
0
                /*.no_alloc   =*/ true,
2573
0
            };
2574
2575
0
            ggml_context * ctx = ggml_init(params);
2576
0
            if (!ctx) {
2577
0
                throw std::runtime_error(format("failed to create ggml context"));
2578
0
            }
2579
2580
0
            ctx_map.emplace(buft, ctx);
2581
2582
0
            return ctx;
2583
0
        }
2584
0
        return it->second.get();
2585
0
    };
2586
2587
0
    const auto TENSOR_DUPLICATED   = llama_model_loader::TENSOR_DUPLICATED;
2588
0
    const auto TENSOR_NOT_REQUIRED = llama_model_loader::TENSOR_NOT_REQUIRED;
2589
0
    const auto TENSOR_SKIP         = llama_model_loader::TENSOR_SKIP;
2590
2591
    // create tensors for the weights
2592
0
    {
2593
        // note: cast to int64_t since we will use these for the tensor dimensions
2594
0
        const int64_t n_head        = hparams.n_head();
2595
0
        const int64_t n_head_kv     = hparams.n_head_kv();
2596
0
        const int64_t n_embd        = hparams.n_embd;
2597
0
        const int64_t n_embd_k_gqa  = hparams.n_embd_k_gqa();
2598
0
        const int64_t n_embd_v_gqa  = hparams.n_embd_v_gqa();
2599
0
        const int64_t n_embd_head_k = hparams.n_embd_head_k;
2600
0
        const int64_t n_embd_head_v = hparams.n_embd_head_v;
2601
0
        const int64_t n_ff          = hparams.n_ff();
2602
0
        const int64_t n_embd_gqa    = n_embd_v_gqa;
2603
0
        const int64_t n_vocab       = vocab.n_tokens();
2604
0
        const int64_t n_token_types = vocab.n_token_types();
2605
0
        const int64_t n_rot         = hparams.n_rot;
2606
0
        const int64_t n_expert      = hparams.n_expert;
2607
0
        const int64_t n_expert_used = hparams.n_expert_used;
2608
0
        const int64_t n_ctx_train   = hparams.n_ctx_train;
2609
2610
0
        if (n_expert > 0 && hparams.n_expert_used == 0) {
2611
0
            throw std::runtime_error("model has expert layers but no expert layers are used");
2612
0
        }
2613
2614
0
        int n_moved_tensors = 0;
2615
0
        ggml_tensor * first_moved_tensor = nullptr;
2616
0
        ggml_backend_buffer_type_t first_moved_from_buft = nullptr;
2617
0
        ggml_backend_buffer_type_t first_moved_to_buft = nullptr;
2618
2619
0
        auto create_tensor = [&](const LLM_TN_IMPL & tn, const std::initializer_list<int64_t> & ne, int flags) -> ggml_tensor * {
2620
0
            ggml_tensor * t_meta = ml.get_tensor_meta(tn.str().c_str());
2621
2622
0
            if (!t_meta) {
2623
0
                if (flags & TENSOR_NOT_REQUIRED) {
2624
0
                    return nullptr;
2625
0
                }
2626
0
                throw std::runtime_error(format("missing tensor '%s'", tn.str().c_str()));
2627
0
            }
2628
2629
            // some models use the token embedding tensor as the output, but since these are used in different layers and with different ops
2630
            // the tensor is duplicated
2631
            // to handle this, we check if the tensor is duplicated, and if so, we assume that it is being loaded as the output tensor
2632
0
            llm_tensor tn_tensor = tn.tensor;
2633
0
            if (tn.tensor == LLM_TENSOR_TOKEN_EMBD && flags & TENSOR_DUPLICATED) {
2634
0
                tn_tensor = LLM_TENSOR_OUTPUT;
2635
0
            }
2636
2637
0
            llm_tensor_info info;
2638
0
            try {
2639
0
                info = llm_tensor_info_for(tn_tensor);
2640
0
            } catch (const std::out_of_range & e) {
2641
0
                throw std::runtime_error(format("missing tensor info mapping for %s", tn.str().c_str()));
2642
0
            }
2643
2644
            // skip unused tensors
2645
0
            if (info.op == GGML_OP_NONE || flags & TENSOR_SKIP) {
2646
0
                const size_t nbytes = ggml_nbytes(t_meta);
2647
0
                LLAMA_LOG_WARN("model has unused tensor %s (size = %zu bytes) -- ignoring\n", tn.str().c_str(), nbytes);
2648
2649
0
                ml.size_data -= nbytes;
2650
0
                ml.n_created++;
2651
2652
0
                return nullptr;
2653
0
            }
2654
2655
            // tensors with "bias" suffix are always used with GGML_OP_ADD or GGML_OP_ADD_ID
2656
0
            ggml_op op;
2657
0
            bool bias = tn.suffix != nullptr && strcmp(tn.suffix, "bias") == 0;
2658
0
            if (bias) {
2659
0
                if (info.op == GGML_OP_MUL_MAT_ID) {
2660
0
                    op = GGML_OP_ADD_ID;
2661
0
                } else {
2662
0
                    op = GGML_OP_ADD;
2663
0
                }
2664
0
            } else {
2665
0
                op = info.op;
2666
0
            }
2667
2668
            // sanity checks
2669
0
            if (info.layer == LLM_TENSOR_LAYER_INPUT || info.layer == LLM_TENSOR_LAYER_OUTPUT) {
2670
0
                if (tn.bid != -1) {
2671
0
                    GGML_ABORT("input/output layer tensor %s used with a layer number", tn.str().c_str());
2672
0
                }
2673
0
            } else {
2674
0
                if (tn.bid == -1) {
2675
0
                    GGML_ABORT("repeating layer tensor %s used without a layer number", tn.str().c_str());
2676
0
                }
2677
0
            }
2678
2679
            // select the buffer type for this tensor
2680
0
            buft_list_t * buft_list;
2681
0
            switch (info.layer) {
2682
0
                case LLM_TENSOR_LAYER_INPUT:
2683
0
                    buft_list = pimpl->dev_input.buft_list;
2684
0
                    break;
2685
0
                case LLM_TENSOR_LAYER_OUTPUT:
2686
0
                    buft_list = pimpl->dev_output.buft_list;
2687
0
                    break;
2688
0
                case LLM_TENSOR_LAYER_REPEATING:
2689
0
                    buft_list = pimpl->dev_layer.at(tn.bid).buft_list;
2690
0
                    break;
2691
0
                default:
2692
0
                    GGML_ABORT("invalid layer %d for tensor %s", info.layer, tn.str().c_str());
2693
0
            }
2694
2695
0
            ggml_backend_buffer_type_t buft = nullptr;
2696
2697
            // check overrides
2698
0
            if (ml.tensor_buft_overrides) {
2699
0
                std::string tensor_name = tn.str();
2700
0
                for (const auto * overrides = ml.tensor_buft_overrides; overrides->pattern != nullptr; ++overrides) {
2701
0
                    std::regex pattern(overrides->pattern);
2702
0
                    if (std::regex_search(tensor_name, pattern)) {
2703
0
                        if (overrides->buft == ggml_backend_cpu_buffer_type()) {
2704
                            // when overriding to a CPU buffer, consider the extra buffer types
2705
0
                            buft = select_weight_buft(hparams, t_meta, op, pimpl->cpu_buft_list);
2706
0
                        } else {
2707
0
                            buft = overrides->buft;
2708
0
                        }
2709
2710
0
                        LLAMA_LOG_DEBUG("tensor %s (%zu MiB %s) buffer type overridden to %s\n",
2711
0
                                tensor_name.c_str(),
2712
0
                                ggml_nbytes(t_meta) / 1024 / 1024, ggml_type_name(t_meta->type),
2713
0
                                ggml_backend_buft_name(buft));
2714
0
                        break;
2715
0
                    }
2716
0
                }
2717
0
            }
2718
2719
0
            if (!buft) {
2720
0
                buft = select_weight_buft(hparams, t_meta, op, *buft_list);
2721
0
                if (!buft) {
2722
0
                    throw std::runtime_error(format("failed to find a compatible buffer type for tensor %s", tn.str().c_str()));
2723
0
                }
2724
0
            }
2725
2726
            // avoid using a host buffer when using mmap
2727
0
            auto * buft_dev = ggml_backend_buft_get_device(buft);
2728
0
            if (ml.use_mmap && buft_dev && buft == ggml_backend_dev_host_buffer_type(buft_dev)) {
2729
0
                auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
2730
0
                if (!cpu_dev) {
2731
0
                    throw std::runtime_error("no CPU backend found");
2732
0
                }
2733
0
                buft = ggml_backend_dev_buffer_type(cpu_dev);
2734
0
            }
2735
2736
0
            if (buft != buft_list->front().second) {
2737
0
                n_moved_tensors++;
2738
0
                if (!first_moved_tensor) {
2739
0
                    first_moved_tensor = t_meta;
2740
0
                    first_moved_from_buft = buft_list->front().second;
2741
0
                    first_moved_to_buft   = buft;
2742
0
                }
2743
0
            }
2744
2745
0
            ggml_context * ctx = ctx_for_buft(buft);
2746
2747
            // if duplicated, check if the original tensor was allocated in the same buffer type context and avoid creating a new one
2748
0
            if (flags & TENSOR_DUPLICATED) {
2749
0
                ggml_tensor * t = ggml_get_tensor(ctx, tn.str().c_str());
2750
0
                if (t) {
2751
0
                    return t;
2752
0
                }
2753
0
            }
2754
0
            return ml.create_tensor(ctx, tn, ne, flags);
2755
0
        };
2756
2757
0
        layers.resize(n_layer);
2758
2759
        // TODO: move to a separate function
2760
0
        const auto tn = LLM_TN(arch);
2761
0
        switch (arch) {
2762
0
            case LLM_ARCH_LLAMA:
2763
0
            case LLM_ARCH_REFACT:
2764
0
            case LLM_ARCH_MINICPM:
2765
0
            case LLM_ARCH_GRANITE:
2766
0
            case LLM_ARCH_GRANITE_MOE:
2767
0
            case LLM_ARCH_MISTRAL3:
2768
0
            case LLM_ARCH_LLAMA_EMBED:
2769
0
                {
2770
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
2771
2772
                    // output
2773
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2774
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
2775
2776
                    // if output is NULL, init from the input tok embed
2777
0
                    if (output == NULL) {
2778
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
2779
0
                    }
2780
2781
0
                    for (int i = 0; i < n_layer; ++i) {
2782
0
                        auto & layer = layers[i];
2783
2784
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2785
2786
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
2787
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
2788
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
2789
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
2790
2791
                        // optional bias tensors
2792
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
2793
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
2794
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
2795
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
2796
2797
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
2798
2799
0
                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
2800
0
                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2801
0
                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2802
0
                        }
2803
0
                        else {
2804
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2805
0
                        }
2806
2807
0
                        if (n_expert == 0) {
2808
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
2809
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
2810
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
2811
2812
                            // optional MLP bias
2813
0
                            layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
2814
0
                            layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
2815
0
                            layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
2816
0
                        } else {
2817
0
                            layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
2818
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, TENSOR_NOT_REQUIRED);
2819
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
2820
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
2821
2822
                            // For Granite MoE Shared
2823
0
                            if (hparams.n_ff_shexp > 0) {
2824
0
                                layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
2825
0
                                layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
2826
0
                                layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0);
2827
0
                            }
2828
0
                        }
2829
0
                    }
2830
0
                } break;
2831
0
            case LLM_ARCH_LLADA:
2832
0
                {
2833
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
2834
2835
                    // output
2836
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
2837
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED);
2838
2839
                    // if output is NULL, init from the input tok embed
2840
0
                    if (output == NULL) {
2841
0
                        output =
2842
0
                            create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, TENSOR_DUPLICATED);
2843
0
                    }
2844
2845
0
                    for (int i = 0; i < n_layer; ++i) {
2846
0
                        auto & layer = layers[i];
2847
2848
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
2849
2850
                        // Use separate Q, K, V projections without bias, matching LLaDALlamaBlock
2851
0
                        layer.wq =
2852
0
                            create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
2853
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_k_gqa }, 0);
2854
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_v_gqa }, 0);
2855
                        // No bias for QKV projections as per config: include_bias=false, include_qkv_bias=false
2856
0
                        layer.wo =
2857
0
                            create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
2858
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, TENSOR_NOT_REQUIRED);
2859
2860
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
2861
2862
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), { n_rot / 2 },
2863
0
                                                         TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2864
2865
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0);
2866
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
2867
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
2868
2869
                        // optional MLP bias
2870
0
                        layer.ffn_gate_b =
2871
0
                            create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), { n_ff }, TENSOR_NOT_REQUIRED);
2872
0
                        layer.ffn_down_b =
2873
0
                            create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), { n_embd }, TENSOR_NOT_REQUIRED);
2874
0
                        layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), { n_ff }, TENSOR_NOT_REQUIRED);
2875
0
                    }
2876
0
                }
2877
0
                break;
2878
0
            case LLM_ARCH_LLADA_MOE:
2879
0
                {
2880
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
2881
2882
                    // output
2883
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2884
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
2885
2886
0
                    GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for llada-moe");
2887
0
                    GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for llada-moe");
2888
2889
0
                    for (int i = 0; i < n_layer; ++i) {
2890
0
                        auto & layer = layers[i];
2891
2892
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2893
2894
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
2895
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
2896
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
2897
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
2898
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
2899
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
2900
2901
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
2902
2903
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
2904
2905
0
                        const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
2906
2907
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
2908
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
2909
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
2910
0
                    }
2911
0
                } break;
2912
0
            case LLM_ARCH_LLAMA4:
2913
0
                {
2914
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
2915
2916
                    // output
2917
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2918
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
2919
2920
                    // if output is NULL, init from the input tok embed
2921
0
                    if (output == NULL) {
2922
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
2923
0
                    }
2924
2925
0
                    for (int i = 0; i < n_layer; ++i) {
2926
0
                        bool is_moe_layer = hparams.n_moe_layer_step > 0 && (i + 1) % hparams.n_moe_layer_step == 0;
2927
2928
0
                        auto & layer = layers[i];
2929
2930
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2931
2932
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
2933
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
2934
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
2935
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
2936
2937
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
2938
2939
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2940
2941
0
                        if (is_moe_layer) {
2942
0
                            int n_ff_exp = hparams.n_ff_exp;
2943
2944
0
                            layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
2945
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff_exp, n_expert}, 0);
2946
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff_exp, n_embd, n_expert}, 0);
2947
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff_exp, n_expert}, 0);
2948
2949
                            // Shared expert
2950
0
                            const int64_t n_ff_shexp = n_ff_exp;
2951
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {    n_embd, n_ff_shexp}, 0);
2952
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd    }, 0);
2953
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {    n_embd, n_ff_shexp}, 0);
2954
0
                        } else {
2955
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
2956
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
2957
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
2958
0
                        }
2959
0
                    }
2960
0
                } break;
2961
0
            case LLM_ARCH_DECI:
2962
0
                {
2963
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
2964
2965
                    // output
2966
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2967
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
2968
2969
                    // if output is NULL, init from the input tok embed
2970
0
                    if (output == NULL) {
2971
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
2972
0
                    }
2973
2974
0
                    for (int i = 0; i < n_layer; ++i) {
2975
0
                        auto & layer = layers[i];
2976
0
                        const int64_t n_embd_k_gqa  = hparams.n_embd_k_gqa(i);
2977
0
                        const int64_t n_embd_v_gqa  = hparams.n_embd_v_gqa(i);
2978
0
                        const int64_t n_embd_gqa    = hparams.n_embd_v_gqa(i);
2979
0
                        const int64_t n_ff          = hparams.n_ff(i);
2980
0
                        const int64_t n_head        = hparams.n_head(i);
2981
0
                        const int64_t n_head_kv     = hparams.n_head_kv(i);
2982
2983
0
                        if (n_head_kv == 0 && n_head > 0) {
2984
                            // linear attention for DeciLMCausalModel
2985
0
                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2986
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
2987
0
                        }
2988
0
                        else if (n_head_kv > 0) {
2989
0
                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2990
2991
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
2992
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
2993
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
2994
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
2995
0
                        }
2996
2997
                        // optional bias tensors
2998
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
2999
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3000
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3001
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
3002
3003
0
                        if (n_ff > 0) {
3004
0
                            layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3005
0
                        }
3006
3007
0
                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
3008
0
                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3009
0
                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3010
0
                        }
3011
0
                        else {
3012
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3013
0
                        }
3014
3015
0
                        if (n_ff > 0) {
3016
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3017
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3018
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3019
0
                        }
3020
3021
                        // optional MLP bias
3022
0
                        layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
3023
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
3024
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
3025
0
                    }
3026
0
                } break;
3027
0
            case LLM_ARCH_MINICPM3:
3028
0
                {
3029
0
                    const int64_t n_embd_head_qk_rope = hparams.n_rot;
3030
0
                    const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
3031
3032
0
                    const int64_t q_lora_rank  = hparams.n_lora_q;
3033
0
                    const int64_t kv_lora_rank = hparams.n_lora_kv;
3034
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3035
3036
                    // output
3037
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3038
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3039
3040
                    // if output is NULL, init from the input tok embed
3041
0
                    if (output == NULL) {
3042
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3043
0
                    }
3044
3045
0
                    for (int i = 0; i < n_layer; ++i) {
3046
0
                        auto & layer = layers[i];
3047
3048
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3049
0
                        layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
3050
3051
0
                        layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
3052
3053
0
                        layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
3054
0
                        layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k}, 0);
3055
3056
0
                        layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
3057
0
                        layer.wkv_b     = create_tensor(tn(LLM_TENSOR_ATTN_KV_B,     "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
3058
0
                        layer.wo        = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {              n_head * (                      n_embd_head_v), n_embd}, 0);
3059
3060
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3061
3062
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3063
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3064
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3065
3066
0
                        layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3067
0
                        layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3068
0
                    }
3069
0
                } break;
3070
0
            case LLM_ARCH_GROK:
3071
0
                {
3072
0
                    if (n_expert == 0) {
3073
0
                        throw std::runtime_error("Grok model cannot have zero experts");
3074
0
                    }
3075
3076
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3077
3078
                    // output
3079
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3080
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3081
3082
                    // if output is NULL, init from the input tok embed
3083
0
                    if (output == NULL) {
3084
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3085
0
                    }
3086
3087
0
                    const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff/* / n_expert_used*/; // grok-1 n_ff_exp == n_ff
3088
0
                    for (int i = 0; i < n_layer; ++i) {
3089
0
                        auto & layer = layers[i];
3090
3091
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3092
3093
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3094
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3095
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3096
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3097
3098
0
                        layer.attn_out_norm   = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
3099
3100
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3101
3102
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
3103
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff,   n_embd}, TENSOR_NOT_REQUIRED);
3104
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
3105
3106
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
3107
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff_exp, n_expert}, TENSOR_NOT_REQUIRED);
3108
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd,   n_expert}, 0);
3109
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff_exp, n_expert}, 0);
3110
3111
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3112
0
                        if (!layer.ffn_post_norm) {
3113
0
                            layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
3114
0
                        }
3115
0
                    }
3116
0
                } break;
3117
0
            case LLM_ARCH_DBRX:
3118
0
                {
3119
0
                    if (n_expert == 0) {
3120
0
                        throw std::runtime_error("DBRX model cannot have zero experts");
3121
0
                    }
3122
3123
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3124
3125
                    // output
3126
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3127
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3128
3129
0
                    for (int i = 0; i < n_layer; ++i) {
3130
0
                        auto & layer = layers[i];
3131
3132
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3133
3134
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3135
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3136
3137
0
                        layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
3138
3139
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
3140
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
3141
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
3142
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
3143
0
                    }
3144
0
                } break;
3145
0
            case LLM_ARCH_BAICHUAN:
3146
0
                {
3147
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3148
0
                    {
3149
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3150
0
                        output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3151
0
                    }
3152
3153
0
                    for (int i = 0; i < n_layer; ++i) {
3154
0
                        auto & layer = layers[i];
3155
3156
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3157
3158
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3159
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3160
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3161
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3162
3163
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3164
3165
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3166
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3167
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3168
0
                    }
3169
0
                } break;
3170
0
            case LLM_ARCH_FALCON:
3171
0
                {
3172
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3173
3174
                    // output
3175
0
                    {
3176
0
                        output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3177
0
                        output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3178
3179
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3180
0
                        if (!output) {
3181
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU
3182
0
                        }
3183
0
                    }
3184
3185
0
                    for (int i = 0; i < n_layer; ++i) {
3186
0
                        auto & layer = layers[i];
3187
3188
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3189
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
3190
3191
0
                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3192
0
                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3193
3194
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3195
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3196
3197
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3198
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3199
0
                    }
3200
0
                } break;
3201
0
            case LLM_ARCH_STARCODER:
3202
0
                {
3203
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3204
0
                    pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,   "weight"), {n_embd, n_ctx_train}, 0);
3205
3206
                    // output
3207
0
                    {
3208
0
                        output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3209
0
                        output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3210
0
                        output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3211
0
                        if (!output) {
3212
                            // needs to be on GPU
3213
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3214
0
                        }
3215
3216
0
                    }
3217
3218
0
                    for (int i = 0; i < n_layer; ++i) {
3219
0
                        auto & layer = layers[i];
3220
3221
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3222
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
3223
3224
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3225
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
3226
3227
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3228
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
3229
3230
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3231
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
3232
3233
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3234
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
3235
3236
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i),   {n_embd, n_ff}, 0);
3237
0
                        layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i),     {n_ff}, 0);
3238
0
                    }
3239
0
                } break;
3240
0
            case LLM_ARCH_BERT:
3241
0
            case LLM_ARCH_NOMIC_BERT:
3242
0
            case LLM_ARCH_NOMIC_BERT_MOE:
3243
0
            case LLM_ARCH_JINA_BERT_V3:
3244
0
                {
3245
0
                    tok_embd     = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, 0);
3246
0
                    type_embd    = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, TENSOR_NOT_REQUIRED);
3247
3248
0
                    if (arch == LLM_ARCH_BERT) {
3249
0
                        pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,    "weight"), {n_embd, n_ctx_train}, 0);
3250
3251
0
                        cls   = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED);
3252
0
                        cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"),   {n_embd},         TENSOR_NOT_REQUIRED);
3253
3254
0
                        cls_out   = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
3255
0
                        cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"),   {hparams.n_cls_out},         TENSOR_NOT_REQUIRED);
3256
0
                    }
3257
3258
0
                    tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
3259
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {n_embd}, 0);
3260
3261
0
                    for (int i = 0; i < n_layer; ++i) {
3262
0
                        auto & layer = layers[i];
3263
3264
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3265
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3266
3267
0
                        if (!layer.wqkv) {
3268
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3269
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i),   {n_embd}, 0);
3270
3271
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3272
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i),   {n_embd_gqa}, 0);
3273
3274
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3275
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i),   {n_embd_gqa}, 0);
3276
0
                        }
3277
3278
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {n_embd, n_embd}, 0);
3279
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3280
3281
0
                        layer.attn_out_norm   = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
3282
0
                        layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i),   {n_embd}, 0);
3283
3284
0
                        if (hparams.moe_every_n_layers > 0 && i % hparams.moe_every_n_layers == 1) {
3285
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff,   n_expert}, 0);
3286
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff,   n_embd, n_expert}, 0);
3287
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,   "weight", i), {n_embd, n_expert}, 0);
3288
0
                        } else {
3289
0
                            layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
3290
0
                            layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, TENSOR_NOT_REQUIRED);
3291
0
                            layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3292
0
                            layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3293
3294
0
                            if (arch == LLM_ARCH_NOMIC_BERT) {
3295
0
                                layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
3296
0
                            }
3297
0
                        }
3298
3299
0
                        layer.layer_out_norm   = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
3300
0
                        layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i),   {n_embd}, 0);
3301
0
                    }
3302
0
                } break;
3303
0
            case LLM_ARCH_MODERN_BERT:
3304
0
                {
3305
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3306
0
                    tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
3307
3308
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3309
3310
0
                    for(int i = 0; i < n_layer; ++i) {
3311
0
                        auto& layer = layers[i];
3312
3313
0
                        if ( i != 0 ) {
3314
0
                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3315
0
                        } else{
3316
                            // layer 0 uses identity
3317
0
                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3318
0
                        }
3319
3320
3321
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, 3 * n_embd }, 0);
3322
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT,   "weight", i), {n_embd, n_embd}, 0);
3323
3324
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, 2 * n_ff}, 0);
3325
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3326
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3327
0
                    }
3328
3329
0
                    cls       = create_tensor(tn(LLM_TENSOR_CLS,     "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED);
3330
0
                    cls_out   = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
3331
0
                    cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"),   {hparams.n_cls_out},         TENSOR_NOT_REQUIRED);
3332
3333
0
                } break;
3334
0
            case LLM_ARCH_NEO_BERT:
3335
0
                {
3336
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, 0);
3337
3338
0
                    cls   = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED);
3339
0
                    cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"),   {n_embd},         TENSOR_NOT_REQUIRED);
3340
3341
0
                    cls_out   = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
3342
0
                    cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"),   {hparams.n_cls_out},         TENSOR_NOT_REQUIRED);
3343
3344
0
                    output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
3345
3346
0
                    for (int i = 0; i < n_layer; ++i) {
3347
0
                        auto & layer = layers[i];
3348
3349
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3350
3351
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3352
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3353
3354
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3355
3356
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff*2}, 0);
3357
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3358
0
                    }
3359
0
                } break;
3360
0
            case LLM_ARCH_JINA_BERT_V2:
3361
0
                {
3362
0
                    tok_embd  = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, 0); // word_embeddings
3363
0
                    type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0); // token_type_embeddings
3364
3365
0
                    tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); // LayerNorm
3366
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {n_embd}, 0); //LayerNorm bias
3367
3368
0
                    cls   = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED);
3369
0
                    cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"),   {1},         TENSOR_NOT_REQUIRED);
3370
0
                    for (int i = 0; i < n_layer; ++i) {
3371
0
                        auto & layer = layers[i]; // JinaBertLayer
3372
3373
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
3374
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i),   {n_embd}, 0);
3375
3376
0
                        layer.attn_q_norm   = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3377
0
                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3378
3379
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
3380
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias",   i), {n_embd_gqa}, 0);
3381
3382
0
                        layer.attn_k_norm   = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3383
0
                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3384
3385
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
3386
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias",   i), {n_embd_gqa}, 0);
3387
3388
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); //output_dens
3389
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), {n_embd}, 0); //output_dens
3390
3391
0
                        layer.attn_out_norm   = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0); //output_norm
3392
0
                        layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias",   i), {n_embd}, 0);
3393
3394
0
                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3395
0
                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3396
3397
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
3398
3399
0
                        const auto tn_ffn_up_weight = tn(LLM_TENSOR_FFN_UP, "weight", i);
3400
0
                        ggml_tensor * t_ffn_up = ml.get_tensor_meta(tn_ffn_up_weight.str().c_str());
3401
0
                        const int64_t n_ffn_up = t_ffn_up ? t_ffn_up->ne[1] : n_ff;
3402
3403
0
                        GGML_ASSERT(n_ffn_up == n_ff || n_ffn_up == n_ff * 2);
3404
0
                        layer.ffn_up   = create_tensor(tn_ffn_up_weight, {n_embd, n_ffn_up}, 0);
3405
0
                        layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ffn_up}, TENSOR_NOT_REQUIRED);
3406
3407
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3408
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias",   i), {n_embd}, 0);
3409
3410
0
                        layer.layer_out_norm   = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
3411
0
                        layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias",   i), {n_embd}, 0);
3412
0
                    }
3413
0
                } break;
3414
0
            case LLM_ARCH_BLOOM:
3415
0
                {
3416
0
                    tok_embd   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,      "weight"), {n_embd, n_vocab}, 0);
3417
0
                    tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
3418
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {n_embd}, 0);
3419
3420
                    // output
3421
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3422
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3423
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3424
3425
                    // if output is NULL, init from the input tok embed
3426
0
                    if (output == NULL) {
3427
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3428
0
                    }
3429
3430
0
                    for (int i = 0; i < n_layer; ++i) {
3431
0
                        auto & layer = layers[i];
3432
3433
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3434
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias",   i), {n_embd}, 0);
3435
3436
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3437
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias",   i), {n_embd + 2*n_embd_gqa}, 0);
3438
3439
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3440
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), {n_embd}, 0);
3441
3442
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3443
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias",   i), {n_embd}, 0);
3444
3445
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3446
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias",   i), {n_embd}, 0);
3447
3448
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
3449
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias",   i), {n_ff}, 0);
3450
0
                    }
3451
0
                } break;
3452
0
            case LLM_ARCH_MPT:
3453
0
                {
3454
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3455
0
                    pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,   "weight"), {n_embd, n_ctx_train}, TENSOR_NOT_REQUIRED);
3456
3457
                    // output
3458
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3459
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, TENSOR_NOT_REQUIRED);
3460
3461
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3462
0
                    if (!output) {
3463
0
                        output    = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU
3464
0
                    }
3465
3466
0
                    for (int i = 0; i < n_layer; ++i) {
3467
0
                        auto & layer = layers[i];
3468
3469
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3470
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3471
3472
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3473
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3474
3475
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3476
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3477
3478
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3479
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3480
3481
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3482
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3483
3484
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3485
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, TENSOR_NOT_REQUIRED);
3486
3487
0
                        layer.attn_q_norm   = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3488
0
                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3489
3490
0
                        layer.attn_k_norm   = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3491
0
                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3492
3493
                        // AWQ ScaleActivation layer
3494
0
                        layer.ffn_act = create_tensor(tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, TENSOR_NOT_REQUIRED);
3495
0
                    }
3496
0
                } break;
3497
0
            case LLM_ARCH_STABLELM:
3498
0
                {
3499
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3500
3501
                    // output
3502
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3503
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3504
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3505
3506
0
                    for (int i = 0; i < n_layer; ++i) {
3507
0
                        auto & layer = layers[i];
3508
3509
0
                        layer.attn_norm =   create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3510
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
3511
3512
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3513
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3514
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3515
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3516
3517
                        // optional bias tensors, present in Stable LM 2 1.6B
3518
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
3519
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3520
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3521
3522
                        // optional q and k layernorms, present in StableLM 2 12B
3523
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head},    TENSOR_NOT_REQUIRED);
3524
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED);
3525
3526
                        // optional FFN norm, not present in StableLM 2 12B which uses parallel residual
3527
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3528
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3529
3530
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3531
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3532
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3533
0
                    }
3534
0
                } break;
3535
0
            case LLM_ARCH_QWEN:
3536
0
                {
3537
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3538
3539
                    // output
3540
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3541
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3542
3543
0
                    for (int i = 0; i < n_layer; ++i) {
3544
0
                        auto & layer = layers[i];
3545
3546
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3547
3548
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3}, 0);
3549
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd*3}, 0);
3550
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3551
3552
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3553
3554
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2}, 0);
3555
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd}, 0);
3556
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff/2}, 0);
3557
0
                    }
3558
0
                } break;
3559
0
            case LLM_ARCH_QWEN2:
3560
0
            case LLM_ARCH_QWEN2VL:
3561
0
            case LLM_ARCH_DREAM:
3562
0
                {
3563
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3564
3565
                    // output
3566
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3567
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3568
0
                    output_b    = create_tensor(tn(LLM_TENSOR_OUTPUT,      "bias"),   {n_vocab}, TENSOR_NOT_REQUIRED);
3569
                    // if output is NULL, init from the input tok embed
3570
0
                    if (output == NULL) {
3571
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3572
0
                    }
3573
3574
0
                    for (int i = 0; i < n_layer; ++i) {
3575
0
                        auto & layer = layers[i];
3576
3577
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3578
3579
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3580
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3581
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3582
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3583
3584
                        // optional bias tensors
3585
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
3586
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3587
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3588
3589
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3590
3591
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3592
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3593
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3594
0
                    }
3595
0
                } break;
3596
0
            case LLM_ARCH_QWEN2MOE:
3597
0
                {
3598
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3599
3600
                    // output
3601
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3602
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3603
3604
0
                    for (int i = 0; i < n_layer; ++i) {
3605
0
                        auto & layer = layers[i];
3606
3607
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3608
3609
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3610
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3611
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3612
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3613
3614
                        // optional bias tensors
3615
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
3616
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3617
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3618
3619
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3620
3621
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
3622
3623
0
                        if (n_expert == 0) {
3624
0
                            throw std::runtime_error("n_expert must be > 0 for QWEN2MOE");
3625
0
                        }
3626
0
                        if (n_expert_used == 0) {
3627
0
                            throw std::runtime_error("n_expert_used must be > 0 for QWEN2MOE");
3628
0
                        }
3629
3630
                        // MoE branch
3631
0
                        const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
3632
3633
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
3634
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
3635
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
3636
3637
                        // Shared expert branch
3638
0
                        const int64_t n_ff_shexp = hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff;
3639
3640
0
                        layer.ffn_gate_inp_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), {n_embd}, 0);
3641
0
                        layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {    n_embd, n_ff_shexp}, 0);
3642
0
                        layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp,     n_embd}, 0);
3643
0
                        layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {    n_embd, n_ff_shexp}, 0);
3644
0
                    }
3645
0
                } break;
3646
0
            case LLM_ARCH_QWEN3:
3647
0
            case LLM_ARCH_QWEN3VL:
3648
0
                {
3649
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3650
3651
                    // output
3652
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3653
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3654
                    // if output is NULL, init from the input tok embed
3655
0
                    if (output == NULL) {
3656
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3657
0
                    }
3658
3659
                    // output rerank head
3660
0
                    cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
3661
3662
0
                    for (int i = 0; i < n_layer; ++i) {
3663
0
                        auto & layer = layers[i];
3664
3665
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3666
3667
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
3668
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3669
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3670
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
3671
3672
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
3673
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
3674
3675
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3676
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3677
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3678
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3679
0
                    }
3680
0
                } break;
3681
0
            case LLM_ARCH_QWEN3MOE:
3682
0
            case LLM_ARCH_QWEN3VLMOE:
3683
0
            case LLM_ARCH_RND1:
3684
0
                {
3685
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3686
3687
                    // output
3688
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3689
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3690
                    // if output is NULL, init from the input tok embed
3691
0
                    if (output == NULL) {
3692
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3693
0
                    }
3694
3695
0
                    for (int i = 0; i < n_layer; ++i) {
3696
0
                        auto & layer = layers[i];
3697
3698
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3699
3700
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
3701
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3702
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3703
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
3704
3705
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
3706
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
3707
3708
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3709
3710
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
3711
3712
0
                        if (n_expert == 0) {
3713
0
                            throw std::runtime_error("n_expert must be > 0 for QWEN3MOE");
3714
0
                        }
3715
0
                        if (n_expert_used == 0) {
3716
0
                            throw std::runtime_error("n_expert_used must be > 0 for QWEN3MOE");
3717
0
                        }
3718
3719
                        // MoE branch
3720
0
                        const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
3721
3722
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
3723
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
3724
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
3725
0
                    }
3726
0
                } break;
3727
0
            case LLM_ARCH_PHI2:
3728
0
                {
3729
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3730
3731
                    // output
3732
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3733
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3734
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3735
0
                    output_b      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "bias"),   {n_vocab}, 0);
3736
3737
0
                    for (int i = 0; i < n_layer; ++i) {
3738
0
                        auto & layer = layers[i];
3739
3740
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3741
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
3742
3743
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3744
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3745
3746
0
                        if (layer.wqkv == nullptr) {
3747
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
3748
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i),   {n_embd}, 0);
3749
3750
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
3751
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i),   {n_embd_gqa}, 0);
3752
3753
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
3754
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i),   {n_embd_gqa}, 0);
3755
0
                        }
3756
3757
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3758
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
3759
3760
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3761
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
3762
3763
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
3764
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
3765
0
                    }
3766
0
                } break;
3767
0
            case LLM_ARCH_PHI3:
3768
0
                {
3769
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
3770
3771
                    // output
3772
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
3773
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3774
3775
                    // if output is NULL, init from the input tok embed
3776
0
                    if (output == NULL) {
3777
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3778
0
                    }
3779
3780
0
                    for (int i = 0; i < n_layer; ++i) {
3781
0
                        auto & layer = layers[i];
3782
3783
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
3784
3785
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, TENSOR_NOT_REQUIRED);
3786
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
3787
3788
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
3789
3790
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
3791
0
                        layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff }, 0);
3792
3793
0
                        layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3794
0
                        layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3795
0
                    }
3796
0
                } break;
3797
0
            case LLM_ARCH_PHIMOE:
3798
0
                {
3799
0
                    const int64_t n_embd_head = n_embd / n_head;
3800
3801
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
3802
3803
                    // output
3804
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
3805
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3806
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), { n_embd, n_vocab }, 0);
3807
0
                    output_b      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "bias"),   { n_vocab }, 0);
3808
3809
0
                    for (int i = 0; i < n_layer; ++i) {
3810
0
                        auto & layer = layers[i];
3811
3812
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
3813
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias",   i), { n_embd }, 0);
3814
3815
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, TENSOR_NOT_REQUIRED);
3816
0
                        if (layer.wqkv == nullptr) {
3817
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
3818
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias",   i), {n_embd}, 0);
3819
3820
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
3821
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias",   i), {n_embd_gqa}, 0);
3822
3823
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
3824
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias",   i), {n_embd_gqa}, 0);
3825
0
                        }
3826
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
3827
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), { n_embd }, 0);
3828
3829
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
3830
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias",   i), { n_embd }, 0);
3831
3832
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert},         0);
3833
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
3834
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
3835
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
3836
3837
0
                        layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3838
0
                        layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3839
0
                     }
3840
0
                } break;
3841
0
            case LLM_ARCH_PLAMO:
3842
0
                {
3843
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3844
3845
                    // output
3846
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3847
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3848
3849
0
                    for (int i = 0; i < n_layer; ++i) {
3850
0
                        auto & layer = layers[i];
3851
3852
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3853
3854
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3855
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3856
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3857
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3858
3859
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3860
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3861
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3862
0
                    }
3863
0
                } break;
3864
0
            case LLM_ARCH_PLAMO2:
3865
0
                {
3866
                    // mamba parameters
3867
0
                    const uint32_t d_conv             = hparams.ssm_d_conv;
3868
0
                    const uint32_t d_state            = hparams.ssm_d_state;
3869
0
                    const uint32_t num_heads          = hparams.ssm_dt_rank;
3870
0
                    const uint32_t intermediate_size  = hparams.ssm_d_inner;
3871
0
                    const int64_t dt_dim              = std::max(64, int(hparams.n_embd / 16));
3872
3873
                    // attention parameters
3874
0
                    const uint32_t qk_dim = hparams.n_embd_head_k;
3875
0
                    const uint32_t v_dim  = hparams.n_embd_head_v;
3876
3877
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3878
3879
                    // output
3880
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3881
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3882
                    // if output is NULL, init from the input tok embed
3883
0
                    if (output == NULL) {
3884
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3885
0
                    }
3886
3887
0
                    for (int i = 0; i < n_layer; ++i) {
3888
0
                        auto & layer = layers[i];
3889
0
                        bool is_mamba_layer = hparams.is_recurrent(i);
3890
3891
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3892
3893
0
                        if (is_mamba_layer) {
3894
0
                            layer.ssm_in       = create_tensor(tn(LLM_TENSOR_SSM_IN,     "weight", i), {n_embd, 2 * intermediate_size}, 0);
3895
0
                            layer.ssm_conv1d   = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, intermediate_size}, 0);
3896
3897
0
                            layer.ssm_x    = create_tensor(tn(LLM_TENSOR_SSM_X,  "weight", i), {intermediate_size, dt_dim + 2*d_state}, 0);
3898
0
                            layer.ssm_dt   = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_dim, num_heads}, 0);
3899
0
                            layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {num_heads}, 0);
3900
3901
0
                            layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {num_heads}, 0);
3902
0
                            layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {num_heads}, 0);
3903
3904
0
                            layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {intermediate_size, n_embd}, 0);
3905
3906
0
                            layer.ssm_dt_norm = create_tensor(tn(LLM_TENSOR_SSM_DT_NORM, i), {dt_dim}, 0);
3907
0
                            layer.ssm_b_norm = create_tensor(tn(LLM_TENSOR_SSM_B_NORM, i), {d_state}, 0);
3908
0
                            layer.ssm_c_norm = create_tensor(tn(LLM_TENSOR_SSM_C_NORM, i), {d_state}, 0);
3909
0
                        } else {
3910
0
                            const int64_t num_attention_heads = hparams.n_head(i);
3911
0
                            const int64_t q_num_heads         = num_attention_heads;
3912
0
                            const int64_t num_key_value_heads = hparams.n_head_kv(i);
3913
0
                            const int64_t k_num_heads         = num_key_value_heads;
3914
0
                            const int64_t v_num_heads         = num_key_value_heads;
3915
0
                            const int64_t q_proj_dim          = q_num_heads * qk_dim;
3916
0
                            const int64_t k_proj_dim          = k_num_heads * qk_dim;
3917
0
                            const int64_t v_proj_dim          = v_num_heads * v_dim;
3918
3919
0
                            layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, q_proj_dim + k_proj_dim + v_proj_dim}, 0);
3920
0
                            layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {qk_dim, num_attention_heads}, 0);
3921
0
                            layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {qk_dim, k_num_heads}, 0);
3922
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {q_num_heads * v_dim, n_embd}, 0);
3923
0
                        }
3924
3925
                        // All layers have post-attention norm, FFN norm, and FFN tensors
3926
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, i), {n_embd}, 0);
3927
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3928
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3929
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff * 2}, 0);
3930
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, i), {n_embd}, 0);
3931
0
                    }
3932
0
                } break;
3933
0
            case LLM_ARCH_PLAMO3:
3934
0
                {
3935
0
                    const int64_t head_dim_q = hparams.n_embd_head_k;
3936
0
                    const int64_t head_dim_v = hparams.n_embd_head_v;
3937
3938
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3939
3940
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3941
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3942
0
                    if (output == NULL) {
3943
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3944
0
                    }
3945
3946
0
                    for (int i = 0; i < n_layer; ++i) {
3947
0
                        auto & layer = layers[i];
3948
3949
0
                        const int64_t num_attention_heads = hparams.n_head(i);
3950
0
                        const int64_t num_key_value_heads = hparams.n_head_kv(i);
3951
0
                        const int64_t q_proj_dim = num_attention_heads * head_dim_q;
3952
0
                        const int64_t k_proj_dim = num_key_value_heads * head_dim_q;
3953
0
                        const int64_t v_proj_dim = num_key_value_heads * head_dim_v;
3954
0
                        const int64_t n_ff_cur   = hparams.n_ff(i);
3955
3956
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3957
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i),
3958
0
                                {n_embd,q_proj_dim + k_proj_dim + v_proj_dim}, 0);
3959
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {head_dim_q}, 0);
3960
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {head_dim_q}, 0);
3961
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {num_attention_heads * head_dim_v, n_embd}, 0);
3962
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, i), {n_embd}, 0);
3963
3964
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3965
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, i), {n_embd}, 0);
3966
3967
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff_cur * 2}, 0);
3968
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff_cur, n_embd}, 0);
3969
0
                    }
3970
0
                } break;
3971
0
            case LLM_ARCH_GPT2:
3972
0
                {
3973
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3974
0
                    pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,   "weight"), {n_embd, n_ctx_train}, 0);
3975
3976
                    // output
3977
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3978
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3979
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3980
3981
                    // if output is NULL, init from the input tok embed
3982
0
                    if (output == NULL) {
3983
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3984
0
                    }
3985
3986
0
                    for (int i = 0; i < n_layer; ++i) {
3987
0
                        auto & layer = layers[i];
3988
3989
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "weight", i), {n_embd}, 0);
3990
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "bias", i),   {n_embd}, 0);
3991
3992
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3993
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
3994
3995
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3996
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
3997
3998
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3999
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
4000
4001
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
4002
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
4003
4004
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
4005
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
4006
0
                    }
4007
0
                } break;
4008
0
            case LLM_ARCH_CODESHELL:
4009
0
                {
4010
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4011
4012
                    // if tok embd is NULL, init from output
4013
0
                    if (tok_embd == NULL) {
4014
0
                        tok_embd = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4015
0
                    }
4016
4017
                    // output
4018
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4019
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
4020
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4021
4022
0
                    for (int i = 0; i < n_layer; ++i) {
4023
0
                        auto & layer = layers[i];
4024
4025
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4026
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
4027
4028
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
4029
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
4030
4031
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4032
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
4033
4034
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4035
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
4036
4037
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
4038
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
4039
4040
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i),   {n_embd, n_ff}, 0);
4041
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i),     {n_ff}, 0);
4042
0
                    }
4043
0
                } break;
4044
0
            case LLM_ARCH_ORION:
4045
0
                {
4046
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4047
4048
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4049
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
4050
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4051
4052
0
                    for (int i = 0; i < n_layer; ++i) {
4053
0
                        auto & layer = layers[i];
4054
4055
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4056
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
4057
4058
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4059
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4060
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4061
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4062
4063
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4064
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
4065
4066
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4067
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4068
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4069
0
                    }
4070
0
                } break;
4071
0
            case LLM_ARCH_INTERNLM2:
4072
0
                {
4073
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4074
4075
                    // output
4076
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4077
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4078
4079
0
                    for (int i = 0; i < n_layer; ++i) {
4080
0
                        auto & layer = layers[i];
4081
4082
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4083
                        // layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
4084
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4085
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4086
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4087
4088
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4089
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4090
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4091
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4092
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4093
0
                    }
4094
0
                } break;
4095
0
            case LLM_ARCH_GEMMA:
4096
0
                {
4097
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4098
4099
                    // output
4100
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4101
0
                    output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
4102
4103
0
                    for (int i = 0; i < n_layer; ++i) {
4104
0
                        auto & layer = layers[i];
4105
4106
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4107
4108
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4109
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
4110
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
4111
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
4112
4113
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4114
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4115
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4116
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4117
0
                    }
4118
0
                } break;
4119
0
            case LLM_ARCH_GEMMA2:
4120
0
                {
4121
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4122
4123
                    // output
4124
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4125
0
                    output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
4126
4127
0
                    for (int i = 0; i < n_layer; ++i) {
4128
0
                        auto & layer = layers[i];
4129
4130
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4131
4132
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4133
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
4134
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
4135
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
4136
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4137
4138
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4139
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4140
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4141
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4142
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
4143
0
                    }
4144
0
                } break;
4145
0
            case LLM_ARCH_GEMMA3:
4146
0
            case LLM_ARCH_GEMMA_EMBEDDING:
4147
0
                {
4148
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4149
4150
                    // output
4151
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4152
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4153
4154
                    // if output is NULL, init from the input tok embed
4155
0
                    if (output == NULL) {
4156
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,   "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4157
0
                    }
4158
4159
                    // Dense linear weights
4160
0
                    dense_2_out_layers = create_tensor(tn(LLM_TENSOR_DENSE_2_OUT, "weight"), {n_embd, hparams.dense_2_feat_out}, TENSOR_NOT_REQUIRED);
4161
0
                    dense_3_out_layers = create_tensor(tn(LLM_TENSOR_DENSE_3_OUT, "weight"), {hparams.dense_3_feat_in, n_embd}, TENSOR_NOT_REQUIRED);
4162
4163
4164
0
                    for (int i = 0; i < n_layer; ++i) {
4165
0
                        auto & layer = layers[i];
4166
4167
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4168
4169
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4170
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
4171
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
4172
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
4173
4174
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4175
0
                        layer.attn_k_norm    = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM,    "weight", i), {n_embd_head_k}, 0);
4176
0
                        layer.attn_q_norm    = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM,    "weight", i), {n_embd_head_k}, 0);
4177
4178
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4179
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4180
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4181
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4182
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
4183
0
                    }
4184
0
                } break;
4185
0
            case LLM_ARCH_GEMMA3N:
4186
0
                {
4187
0
                    const int64_t n_altup      = hparams.n_altup;
4188
0
                    const int64_t laurel_rank  = hparams.laurel_rank;
4189
0
                    const int64_t n_embd_altup = hparams.n_embd_altup;
4190
4191
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4192
                    // if output is NULL, init from the input tok embed
4193
0
                    if (output == NULL) {
4194
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4195
0
                    }
4196
4197
0
                    tok_embd           = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,           "weight"), {n_embd, n_vocab}, 0);
4198
0
                    tok_embd_per_layer = create_tensor(tn(LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "weight"), {n_embd_altup * n_layer, n_vocab}, 0);
4199
4200
0
                    altup_proj           = create_tensor(tn(LLM_TENSOR_ALTUP_PROJ,           "weight"), {n_embd, n_embd, n_altup - 1}, 0);
4201
0
                    altup_unembd_proj    = create_tensor(tn(LLM_TENSOR_ALTUP_UNEMBD_PROJ,    "weight"), {n_embd, n_embd, n_altup - 1}, 0);
4202
0
                    per_layer_model_proj = create_tensor(tn(LLM_TENSOR_PER_LAYER_MODEL_PROJ, "weight"), {n_embd, n_embd_altup * n_layer}, 0);
4203
0
                    per_layer_proj_norm  = create_tensor(tn(LLM_TENSOR_PER_LAYER_PROJ_NORM,  "weight"), {n_embd_altup}, 0);
4204
4205
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4206
4207
0
                    for (int i = 0; i < n_layer; ++i) {
4208
0
                        auto & layer = layers[i];
4209
4210
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4211
4212
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4213
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
4214
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
4215
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
4216
4217
0
                        layer.attn_q_norm    = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM,    "weight", i), {n_embd_head_k}, 0);
4218
0
                        layer.attn_k_norm    = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM,    "weight", i), {n_embd_head_k}, 0);
4219
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4220
4221
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4222
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4223
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4224
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4225
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
4226
4227
                        // altup & laurel
4228
0
                        layer.per_layer_inp_gate   = create_tensor(tn(LLM_TENSOR_PER_LAYER_INP_GATE,  "weight", i), {n_embd, n_embd_altup}, 0);
4229
0
                        layer.per_layer_proj       = create_tensor(tn(LLM_TENSOR_PER_LAYER_PROJ,      "weight", i), {n_embd_altup, n_embd}, 0);
4230
0
                        layer.per_layer_post_norm  = create_tensor(tn(LLM_TENSOR_PER_LAYER_POST_NORM, "weight", i), {n_embd}, 0);
4231
0
                        layer.altup_correct_coef   = create_tensor(tn(LLM_TENSOR_ALTUP_CORRECT_COEF,  "weight", i), {n_altup, n_altup}, 0);
4232
0
                        layer.altup_correct_scale  = create_tensor(tn(LLM_TENSOR_ALTUP_CORRECT_SCALE, "weight", i), {n_embd}, 0);
4233
0
                        layer.altup_predict_coef   = create_tensor(tn(LLM_TENSOR_ALTUP_PREDICT_COEF,  "weight", i), {n_altup, n_altup * n_altup}, 0);
4234
0
                        layer.altup_router         = create_tensor(tn(LLM_TENSOR_ALTUP_ROUTER,        "weight", i), {n_embd, n_altup}, 0);
4235
0
                        layer.altup_router_norm    = create_tensor(tn(LLM_TENSOR_ALTUP_ROUTER_NORM,   "weight", i), {n_embd}, 0);
4236
0
                        layer.laurel_l             = create_tensor(tn(LLM_TENSOR_LAUREL_L,            "weight", i), {n_embd, laurel_rank}, 0);
4237
0
                        layer.laurel_r             = create_tensor(tn(LLM_TENSOR_LAUREL_R,            "weight", i), {laurel_rank, n_embd}, 0);
4238
0
                        layer.laurel_post_norm     = create_tensor(tn(LLM_TENSOR_LAUREL_POST_NORM,    "weight", i), {n_embd}, 0);
4239
0
                    }
4240
0
                } break;
4241
0
            case LLM_ARCH_STARCODER2:
4242
0
                {
4243
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4244
4245
                    // output
4246
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4247
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
4248
4249
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4250
                    // if output is NULL, init from the input tok embed
4251
0
                    if (output == NULL) {
4252
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4253
0
                    }
4254
4255
0
                    for (int i = 0; i < n_layer; ++i) {
4256
0
                        auto & layer = layers[i];
4257
4258
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4259
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
4260
4261
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4262
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4263
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4264
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4265
4266
                        // optional bias tensors
4267
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, 0);
4268
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, 0);
4269
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, 0);
4270
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
4271
4272
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4273
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
4274
4275
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4276
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4277
4278
                        // optional bias tensors
4279
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
4280
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP ,  "bias", i), {  n_ff}, 0);
4281
0
                    }
4282
0
                } break;
4283
0
            case LLM_ARCH_MAMBA:
4284
0
                {
4285
0
                    const int64_t d_conv  = hparams.ssm_d_conv;
4286
0
                    const int64_t d_inner = hparams.ssm_d_inner;
4287
0
                    const int64_t d_state = hparams.ssm_d_state;
4288
0
                    const int64_t dt_rank = hparams.ssm_dt_rank;
4289
4290
                    // only an expansion factor of 2 is supported for now
4291
0
                    if (2 * n_embd != d_inner) {
4292
0
                        throw std::runtime_error("only an expansion factor of 2 is supported for now");
4293
0
                    }
4294
4295
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4296
4297
                    // output
4298
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4299
4300
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4301
                    // if output is NULL, init from the input tok embed, duplicated to allow offloading
4302
0
                    if (output == NULL) {
4303
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4304
0
                    }
4305
4306
0
                    for (int i = 0; i < n_layer; ++i) {
4307
0
                        auto & layer = layers[i];
4308
4309
                        // norm
4310
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4311
4312
0
                        layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner}, 0);
4313
4314
0
                        layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner}, 0);
4315
0
                        layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner}, 0);
4316
4317
0
                        layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state}, 0);
4318
4319
0
                        layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner}, 0);
4320
0
                        layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner}, 0);
4321
4322
                        // no "weight" suffix for these
4323
0
                        layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner}, 0);
4324
0
                        layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {d_inner}, 0);
4325
4326
                        // out_proj
4327
0
                        layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
4328
0
                    }
4329
0
                } break;
4330
0
            case LLM_ARCH_MAMBA2:
4331
0
                {
4332
0
                    const int64_t d_conv  = hparams.ssm_d_conv;
4333
0
                    const int64_t d_inner = hparams.ssm_d_inner;
4334
0
                    const int64_t d_state = hparams.ssm_d_state;
4335
0
                    const int64_t n_head  = hparams.ssm_dt_rank;
4336
0
                    const int64_t n_group = hparams.ssm_n_group;
4337
0
                    const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_head;
4338
4339
                    // only an expansion factor of 2 is supported for now
4340
0
                    GGML_ASSERT(2 * n_embd == d_inner);
4341
4342
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4343
4344
                    // output
4345
0
                    {
4346
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4347
4348
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4349
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
4350
0
                        if (output == NULL) {
4351
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4352
0
                        }
4353
0
                    }
4354
4355
0
                    for (int i = 0; i < n_layer; ++i) {
4356
0
                        auto & layer = layers[i];
4357
4358
                        // norm
4359
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4360
4361
0
                        layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, d_in_proj}, 0);
4362
4363
0
                        layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner + 2*n_group*d_state}, 0);
4364
0
                        layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner + 2*n_group*d_state}, 0);
4365
4366
0
                        layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {n_head}, 0);
4367
4368
                        // no "weight" suffix for these
4369
0
                        layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, n_head}, 0);
4370
0
                        layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, n_head}, 0);
4371
4372
0
                        layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {d_inner / n_group, n_group}, 0);
4373
4374
                        // out_proj
4375
0
                        layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
4376
0
                    }
4377
0
                } break;
4378
0
            case LLM_ARCH_JAMBA:
4379
0
                {
4380
0
                    const int64_t d_conv  = hparams.ssm_d_conv;
4381
0
                    const int64_t d_inner = hparams.ssm_d_inner;
4382
0
                    const int64_t d_state = hparams.ssm_d_state;
4383
0
                    const int64_t dt_rank = hparams.ssm_dt_rank;
4384
4385
                    // only an expansion factor of 2 is supported for now
4386
0
                    GGML_ASSERT(2 * n_embd == d_inner);
4387
4388
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4389
4390
                    // output
4391
0
                    {
4392
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4393
4394
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4395
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
4396
0
                        if (output == NULL) {
4397
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4398
0
                        }
4399
0
                    }
4400
4401
0
                    for (int i = 0; i < n_layer; ++i) {
4402
0
                        const int64_t n_head_kv = hparams.n_head_kv(i);
4403
0
                        const int64_t n_embd_gqa = hparams.n_embd_v_gqa(i);
4404
4405
0
                        auto & layer = layers[i];
4406
4407
                        // norm
4408
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4409
4410
0
                        if (n_head_kv == 0) {
4411
                            // Mamba layer
4412
0
                            layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner}, 0);
4413
4414
0
                            layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner}, 0);
4415
0
                            layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner}, 0);
4416
4417
0
                            layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state}, 0);
4418
4419
0
                            layer.ssm_dt_norm = create_tensor(tn(LLM_TENSOR_SSM_DT_NORM, "weight", i), {dt_rank}, 0);
4420
4421
0
                            layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner}, 0);
4422
0
                            layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner}, 0);
4423
4424
0
                            layer.ssm_b_norm = create_tensor(tn(LLM_TENSOR_SSM_B_NORM, "weight", i), {d_state}, 0);
4425
0
                            layer.ssm_c_norm = create_tensor(tn(LLM_TENSOR_SSM_C_NORM, "weight", i), {d_state}, 0);
4426
4427
                            // no "weight" suffix for these
4428
0
                            layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner}, 0);
4429
0
                            layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {d_inner}, 0);
4430
4431
                            // out_proj
4432
0
                            layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
4433
0
                        } else {
4434
                            // Attention layers
4435
4436
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4437
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4438
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4439
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4440
0
                        }
4441
4442
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4443
4444
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, TENSOR_NOT_REQUIRED);
4445
4446
0
                        if (layer.ffn_gate_inp) {
4447
                            // MoE
4448
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
4449
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0);
4450
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff, n_expert}, 0);
4451
0
                        } else {
4452
                            // FFN (no MoE)
4453
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
4454
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
4455
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
4456
0
                        }
4457
0
                    }
4458
0
                } break;
4459
0
            case LLM_ARCH_GRANITE_HYBRID:
4460
0
                {
4461
                    // mamba2 Mixer SSM params
4462
                    // NOTE: int64_t for tensor dimensions
4463
0
                    const int64_t d_conv     = hparams.ssm_d_conv;
4464
0
                    const int64_t d_inner    = hparams.ssm_d_inner;
4465
0
                    const int64_t d_state    = hparams.ssm_d_state;
4466
0
                    const int64_t n_ssm_head = hparams.ssm_dt_rank;
4467
0
                    const int64_t n_group    = hparams.ssm_n_group;
4468
0
                    const int64_t d_in_proj  = 2*d_inner + 2*n_group*d_state + n_ssm_head;
4469
4470
                    // only an expansion factor of 2 is supported for now
4471
0
                    GGML_ASSERT(2 * n_embd == d_inner);
4472
4473
                    // embeddings
4474
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4475
4476
                    // output
4477
0
                    {
4478
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4479
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4480
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
4481
0
                        if (output == NULL) {
4482
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4483
0
                        }
4484
0
                    }
4485
4486
0
                    for (int i = 0; i < n_layer; ++i) {
4487
0
                        auto & layer = layers[i];
4488
4489
                        // norm
4490
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4491
4492
0
                        if (hparams.is_recurrent(i)) {
4493
                            // ssm layers
4494
0
                            layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, d_in_proj}, 0);
4495
4496
0
                            layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner + 2*n_group*d_state}, 0);
4497
0
                            layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner + 2*n_group*d_state}, TENSOR_NOT_REQUIRED);
4498
4499
0
                            layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {n_ssm_head}, 0);
4500
4501
                            // no "weight" suffix for these
4502
0
                            layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, n_ssm_head}, 0);
4503
0
                            layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, n_ssm_head}, 0);
4504
4505
0
                            layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {d_inner / n_group, n_group}, 0);
4506
4507
                            // out_proj
4508
0
                            layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
4509
0
                        } else {
4510
                            // attention layers (with optional bias)
4511
0
                            const int64_t n_head_i = hparams.n_head(i);
4512
0
                            const int64_t n_embd_k_gqa_i = hparams.n_embd_k_gqa(i);
4513
0
                            const int64_t n_embd_v_gqa_i = hparams.n_embd_v_gqa(i);
4514
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head_i}, 0);
4515
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa_i}, 0);
4516
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa_i}, 0);
4517
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head_i, n_embd}, 0);
4518
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},         TENSOR_NOT_REQUIRED);
4519
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_k_gqa_i}, TENSOR_NOT_REQUIRED);
4520
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_v_gqa_i}, TENSOR_NOT_REQUIRED);
4521
0
                            layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},         TENSOR_NOT_REQUIRED);
4522
0
                        }
4523
4524
                        // feed forward (w/ optional biases)
4525
0
                        if (n_expert > 0) {
4526
                            // MoE FFN
4527
0
                            layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4528
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
4529
0
                            layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
4530
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, TENSOR_NOT_REQUIRED);
4531
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
4532
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
4533
4534
                            // For Granite MoE Shared
4535
0
                            if (hparams.n_ff_shexp > 0) {
4536
0
                                layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
4537
0
                                layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
4538
0
                                layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0);
4539
0
                            }
4540
0
                        } else {
4541
0
                            layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4542
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
4543
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4544
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4545
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4546
0
                            layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
4547
0
                            layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
4548
0
                            layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
4549
0
                        }
4550
0
                    }
4551
0
                } break;
4552
0
            case LLM_ARCH_XVERSE:
4553
0
                {
4554
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4555
4556
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4557
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4558
4559
0
                    for (int i = 0; i < n_layer; ++i) {
4560
0
                        auto & layer = layers[i];
4561
4562
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4563
4564
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4565
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4566
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4567
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4568
4569
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4570
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4571
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4572
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4573
0
                    }
4574
0
                } break;
4575
0
            case LLM_ARCH_COMMAND_R:
4576
0
                {
4577
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4578
4579
                    // output
4580
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4581
                    // init output from the input tok embed
4582
0
                    output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4583
4584
0
                    for (int i = 0; i < n_layer; ++i) {
4585
0
                        auto & layer = layers[i];
4586
4587
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4588
4589
0
                        if (n_layer >= 64){
4590
0
                            layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
4591
0
                            layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
4592
0
                        }
4593
4594
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4595
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4596
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4597
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4598
4599
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4600
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4601
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4602
0
                    }
4603
0
                } break;
4604
0
            case LLM_ARCH_COHERE2:
4605
0
                {
4606
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
4607
4608
                    // output
4609
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
4610
                    // init output from the input tok embed
4611
0
                    output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab },
4612
0
                                                      TENSOR_DUPLICATED);
4613
4614
0
                    for (int i = 0; i < n_layer; ++i) {
4615
0
                        auto & layer = layers[i];
4616
4617
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
4618
4619
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd }, 0);
4620
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
4621
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
4622
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
4623
4624
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0);
4625
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
4626
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
4627
0
                    }
4628
0
                }
4629
0
                break;
4630
0
            case LLM_ARCH_OLMO:  // adapted from LLM_ARCH_LLAMA with norm params removed
4631
0
                {
4632
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4633
4634
                    // output
4635
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4636
                    // if output is NULL, init from the input tok embed
4637
0
                    if (output == NULL) {
4638
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4639
0
                    }
4640
4641
0
                    for (int i = 0; i < n_layer; ++i) {
4642
0
                        auto & layer = layers[i];
4643
4644
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4645
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4646
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4647
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4648
4649
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4650
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4651
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4652
0
                    }
4653
0
                } break;
4654
0
            case LLM_ARCH_OLMO2:
4655
0
                {
4656
0
                    const int64_t n_embd_head = n_embd / n_head;
4657
4658
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4659
4660
                    // output
4661
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4662
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4663
4664
0
                    for (int i = 0; i < n_layer; ++i) {
4665
0
                        auto & layer = layers[i];
4666
4667
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4668
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4669
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4670
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4671
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
4672
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_head_kv * n_embd_head}, 0);
4673
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4674
4675
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4676
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4677
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4678
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
4679
0
                    }
4680
0
                } break;
4681
0
            case LLM_ARCH_SEED_OSS:
4682
0
                {
4683
0
                    const uint32_t head_dim             = hparams.n_embd_head_k;
4684
0
                    const int64_t n_qo_dim              = n_head * head_dim;
4685
0
                    const int64_t n_kv_dim              = n_head_kv * head_dim;
4686
4687
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4688
4689
                    // output
4690
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4691
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4692
                    // if output is NULL, init from the input tok embed
4693
0
                    if (output == NULL) {
4694
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4695
0
                    }
4696
4697
0
                    for (int i = 0; i < n_layer; ++i) {
4698
0
                        auto & layer = layers[i];
4699
4700
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_qo_dim}, 0);
4701
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_kv_dim}, 0);
4702
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_kv_dim}, 0);
4703
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_qo_dim, n_embd}, 0);
4704
4705
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_qo_dim},   TENSOR_NOT_REQUIRED);
4706
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_kv_dim},   TENSOR_NOT_REQUIRED);
4707
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_kv_dim},   TENSOR_NOT_REQUIRED);
4708
4709
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4710
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4711
4712
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4713
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4714
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4715
0
                    }
4716
0
                } break;
4717
4718
0
            case LLM_ARCH_OLMOE:
4719
0
                {
4720
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4721
4722
                    // output
4723
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4724
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4725
4726
0
                    for (int i = 0; i < n_layer; ++i) {
4727
0
                        auto & layer = layers[i];
4728
4729
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4730
4731
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4732
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4733
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4734
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4735
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
4736
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
4737
4738
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4739
4740
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
4741
4742
0
                        if (n_expert == 0) {
4743
0
                            throw std::runtime_error("n_expert must be > 0");
4744
0
                        }
4745
0
                        if (n_expert_used == 0) {
4746
0
                            throw std::runtime_error("n_expert_used must be > 0");
4747
0
                        }
4748
4749
                        // MoE branch
4750
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
4751
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
4752
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
4753
0
                    }
4754
0
                } break;
4755
0
            case LLM_ARCH_OPENELM:
4756
0
                {
4757
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4758
4759
                    // output
4760
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4761
                    // init output from the input tok embed
4762
0
                    output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4763
4764
0
                    for (int i = 0; i < n_layer; ++i) {
4765
0
                        const int64_t n_head      =   hparams.n_head(i);
4766
0
                        const int64_t n_head_qkv  = 2*hparams.n_head_kv(i) + n_head;
4767
0
                        const int64_t n_ff        =   hparams.n_ff(i);
4768
4769
0
                        auto & layer = layers[i];
4770
4771
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4772
4773
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_head_qkv*n_embd_head_k}, 0);
4774
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
4775
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
4776
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head*n_embd_head_k, n_embd}, 0);
4777
4778
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4779
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
4780
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
4781
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
4782
0
                    }
4783
0
                } break;
4784
0
            case LLM_ARCH_GPTNEOX:
4785
0
                {
4786
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4787
4788
                    // output
4789
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4790
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
4791
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4792
4793
0
                    for (int i = 0; i < n_layer; ++i) {
4794
0
                        auto & layer = layers[i];
4795
4796
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4797
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
4798
4799
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
4800
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
4801
4802
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4803
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
4804
4805
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4806
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
4807
4808
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
4809
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
4810
4811
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
4812
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
4813
0
                    }
4814
0
                } break;
4815
0
            case LLM_ARCH_ARCTIC:
4816
0
                {
4817
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4818
4819
                    // output
4820
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4821
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4822
4823
                    // if output is NULL, init from the input tok embed
4824
0
                    if (output == NULL) {
4825
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4826
0
                    }
4827
4828
0
                    for (int i = 0; i < n_layer; ++i) {
4829
0
                        auto & layer = layers[i];
4830
4831
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4832
4833
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4834
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4835
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4836
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4837
4838
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4839
4840
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_embd}, 0);
4841
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_embd, n_embd}, 0);
4842
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_embd}, 0);
4843
4844
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
4845
0
                        layer.ffn_norm_exps = create_tensor(tn(LLM_TENSOR_FFN_NORM_EXPS, "weight", i), {n_embd}, 0);
4846
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, false);
4847
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
4848
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
4849
0
                    }
4850
0
                } break;
4851
0
            case LLM_ARCH_DEEPSEEK:
4852
0
                {
4853
4854
0
                    const int64_t n_ff_exp        = hparams.n_ff_exp;
4855
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
4856
4857
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4858
4859
                    // output
4860
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4861
                    // try to load output.weight, if not found, use token_embd (tied embeddings)
4862
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4863
0
                    if (!output) {
4864
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4865
0
                    }
4866
4867
0
                    for (int i = 0; i < n_layer; ++i) {
4868
0
                        auto & layer = layers[i];
4869
4870
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4871
4872
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4873
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4874
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4875
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4876
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4877
4878
0
                        if (i < (int) hparams.n_layer_dense_lead) {
4879
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4880
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4881
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4882
0
                        } else {
4883
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
4884
4885
0
                            if (n_expert == 0) {
4886
0
                                throw std::runtime_error("n_expert must be > 0");
4887
0
                            }
4888
0
                            if (n_expert_used == 0) {
4889
0
                                throw std::runtime_error("n_expert_used must be > 0");
4890
0
                            }
4891
4892
                            // MoE branch
4893
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
4894
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
4895
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
4896
4897
                            // Shared expert branch
4898
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
4899
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
4900
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
4901
0
                        }
4902
0
                    }
4903
0
                } break;
4904
0
            case LLM_ARCH_DEEPSEEK2:
4905
0
                {
4906
                    // lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B
4907
0
                    const bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26);
4908
4909
0
                    const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0);
4910
4911
                    // note: these are the actual head sizes you get when treating as MHA or after "decompression" using wv_b for MLA
4912
0
                    const int64_t n_embd_head_k_mla = is_mla ? hparams.n_embd_head_k_mla : hparams.n_embd_head_k;
4913
0
                    const int64_t n_embd_head_v_mla = is_mla ? hparams.n_embd_head_v_mla : hparams.n_embd_head_v;
4914
4915
0
                    const int64_t n_embd_head_qk_rope = hparams.n_rot;
4916
0
                    const int64_t n_embd_head_qk_nope = n_embd_head_k_mla - n_embd_head_qk_rope;
4917
4918
0
                    const int64_t q_lora_rank  = hparams.n_lora_q;
4919
0
                    const int64_t kv_lora_rank = hparams.n_lora_kv;
4920
4921
0
                    const int64_t n_ff_exp        = hparams.n_ff_exp;
4922
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
4923
4924
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4925
4926
                    // output
4927
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4928
                    // try to load output.weight, if not found, use token_embd (tied embeddings)
4929
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4930
0
                    if (!output) {
4931
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4932
0
                    }
4933
4934
0
                    for (int i = 0; i < n_layer; ++i) {
4935
0
                        auto & layer = layers[i];
4936
4937
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4938
0
                        if (!is_lite) {
4939
0
                            layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
4940
0
                        }
4941
4942
0
                        layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
4943
4944
0
                        if (!is_lite) {
4945
0
                            layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
4946
0
                            layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k_mla}, 0);
4947
0
                        } else {
4948
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_head * n_embd_head_k_mla}, 0);
4949
0
                        }
4950
4951
0
                        layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + n_embd_head_qk_rope}, 0);
4952
4953
                        // note: only old legacy GGUF files will have the unsplit wkv_b tensor in
4954
0
                        if (is_mla) {
4955
0
                            layer.wk_b = create_tensor(tn(LLM_TENSOR_ATTN_K_B, "weight", i), {n_embd_head_qk_nope, kv_lora_rank, n_head}, 0);
4956
0
                            layer.wv_b = create_tensor(tn(LLM_TENSOR_ATTN_V_B, "weight", i), {kv_lora_rank, n_embd_head_v_mla, n_head}, 0);
4957
0
                        } else {
4958
0
                            layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v_mla)}, 0);
4959
0
                        }
4960
4961
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_embd_head_v_mla, n_embd}, 0);
4962
4963
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4964
4965
0
                        if (i < (int) hparams.n_layer_dense_lead) {
4966
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4967
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4968
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4969
0
                        } else {
4970
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
4971
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
4972
4973
0
                            if (n_expert == 0) {
4974
0
                                throw std::runtime_error("n_expert must be > 0");
4975
0
                            }
4976
0
                            if (n_expert_used == 0) {
4977
0
                                throw std::runtime_error("n_expert_used must be > 0");
4978
0
                            }
4979
4980
                            // MoE branch
4981
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
4982
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
4983
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
4984
4985
                            // Shared expert branch
4986
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
4987
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
4988
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
4989
0
                        }
4990
0
                    }
4991
0
                } break;
4992
0
            case LLM_ARCH_PLM:
4993
0
                {
4994
0
                    const int64_t n_embd_head_qk_rope = hparams.n_rot;
4995
0
                    const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
4996
0
                    const int64_t kv_lora_rank = hparams.n_lora_kv;
4997
4998
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4999
5000
                    // output
5001
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5002
                    // output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
5003
0
                    output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5004
5005
0
                    for (int i = 0; i < n_layer; ++i) {
5006
0
                        auto & layer = layers[i];
5007
5008
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5009
5010
0
                        layer.wq        = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
5011
0
                        layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
5012
0
                        layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
5013
0
                        layer.wkv_b     = create_tensor(tn(LLM_TENSOR_ATTN_KV_B,     "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
5014
0
                        layer.wo        = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {              n_head * (                      n_embd_head_v), n_embd}, 0);
5015
5016
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5017
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5018
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5019
0
                    }
5020
0
                } break;
5021
0
            case LLM_ARCH_BITNET:
5022
0
                {
5023
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5024
5025
                    // output
5026
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5027
5028
0
                    for (int i = 0; i < n_layer; ++i) {
5029
0
                        auto & layer = layers[i];
5030
5031
0
                        layer.attn_norm     = create_tensor(tn(LLM_TENSOR_ATTN_NORM,     "weight", i), {n_embd}, 0);
5032
0
                        layer.attn_sub_norm = create_tensor(tn(LLM_TENSOR_ATTN_SUB_NORM, "weight", i), {n_embd}, 0);
5033
5034
0
                        layer.wq       = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
5035
0
                        layer.wq_scale = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5036
0
                        layer.wk       = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
5037
0
                        layer.wk_scale = create_tensor(tn(LLM_TENSOR_ATTN_K,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5038
0
                        layer.wv       = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
5039
0
                        layer.wv_scale = create_tensor(tn(LLM_TENSOR_ATTN_V,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5040
0
                        layer.wo       = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5041
0
                        layer.wo_scale = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5042
5043
0
                        layer.ffn_norm     = create_tensor(tn(LLM_TENSOR_FFN_NORM,     "weight", i), {n_embd}, 0);
5044
0
                        layer.ffn_sub_norm = create_tensor(tn(LLM_TENSOR_FFN_SUB_NORM, "weight", i), {n_ff}, 0);
5045
5046
0
                        layer.ffn_gate       = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
5047
0
                        layer.ffn_gate_scale = create_tensor(tn(LLM_TENSOR_FFN_GATE, "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5048
0
                        layer.ffn_down       = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
5049
0
                        layer.ffn_down_scale = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5050
0
                        layer.ffn_up         = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
5051
0
                        layer.ffn_up_scale   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5052
0
                    }
5053
0
                } break;
5054
0
            case LLM_ARCH_T5:
5055
0
                {
5056
0
                    const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
5057
5058
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5059
5060
                    // output
5061
0
                    output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
5062
0
                    output_norm     = create_tensor(tn(LLM_TENSOR_DEC_OUTPUT_NORM, "weight"), {n_embd}, 0);
5063
5064
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5065
                    // if output is NULL, init from the input tok embed
5066
0
                    if (output == NULL) {
5067
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5068
0
                    }
5069
5070
                    // n_layer:     number of encoder_layers
5071
                    // dec_n_layer: number of decoder_layers
5072
0
                    const int dec_n_layer = hparams.dec_n_layer;
5073
0
                    if (dec_n_layer > n_layer) {
5074
0
                        layers.resize(dec_n_layer);
5075
0
                    }
5076
5077
                    // load encoder layers
5078
0
                    for (int i = 0; i < n_layer; ++i) {
5079
0
                        auto & layer = layers[i];
5080
5081
0
                        layer.attn_norm_enc  = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM,  "weight", i), {n_embd}, 0);
5082
0
                        layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
5083
5084
0
                        layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5085
0
                        layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5086
0
                        layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5087
0
                        layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
5088
5089
0
                        layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
5090
0
                        layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
5091
0
                        layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5092
0
                        layer.ffn_up_enc   = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5093
0
                    }
5094
5095
                    // load decoder layers
5096
0
                    for (int i = 0; i < dec_n_layer; ++i) {
5097
0
                        auto & layer = layers[i];
5098
5099
0
                        layer.attn_norm  = create_tensor(tn(LLM_TENSOR_DEC_ATTN_NORM,  "weight", i), {n_embd}, 0);
5100
0
                        layer.attn_rel_b = create_tensor(tn(LLM_TENSOR_DEC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
5101
5102
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_DEC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5103
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_DEC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5104
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_DEC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5105
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_DEC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
5106
5107
0
                        layer.attn_norm_cross  = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "weight", i), {n_embd}, 0);
5108
                        // this tensor seems to be unused in HF transformers implementation
5109
0
                        layer.attn_rel_b_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
5110
5111
0
                        layer.wq_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5112
0
                        layer.wk_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5113
0
                        layer.wv_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5114
0
                        layer.wo_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
5115
5116
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_DEC_FFN_NORM, "weight", i), {n_embd}, 0);
5117
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_DEC_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
5118
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_DEC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5119
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_DEC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5120
0
                    }
5121
0
                } break;
5122
0
            case LLM_ARCH_T5ENCODER:
5123
0
                {
5124
0
                    const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
5125
5126
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5127
5128
                    // output
5129
0
                    output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
5130
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5131
                    // if output is NULL, init from the input tok embed
5132
0
                    if (output == NULL) {
5133
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5134
0
                    }
5135
5136
0
                    for (int i = 0; i < n_layer; ++i) {
5137
0
                        auto & layer = layers[i];
5138
5139
0
                        layer.attn_norm_enc  = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM,  "weight", i), {n_embd}, 0);
5140
0
                        layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
5141
5142
0
                        layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5143
0
                        layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5144
0
                        layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5145
0
                        layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
5146
5147
0
                        layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
5148
0
                        layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
5149
0
                        layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5150
0
                        layer.ffn_up_enc   = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5151
0
                    }
5152
0
                } break;
5153
0
            case LLM_ARCH_JAIS:
5154
0
                {
5155
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5156
5157
                    // output
5158
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5159
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
5160
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
5161
5162
0
                    for (int i = 0; i < n_layer; ++i) {
5163
0
                        auto & layer = layers[i];
5164
5165
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "weight", i), {n_embd}, 0);
5166
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "bias", i),   {n_embd}, 0);
5167
5168
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
5169
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
5170
5171
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5172
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
5173
5174
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5175
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
5176
5177
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
5178
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
5179
5180
0
                        layer.ffn_gate   = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "weight", i), {n_embd, n_ff}, 0);
5181
0
                        layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "bias", i),   {n_ff}, 0);
5182
5183
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
5184
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
5185
0
                    }
5186
0
                } break;
5187
0
            case LLM_ARCH_CHATGLM:
5188
0
                {
5189
0
                    tok_embd   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,      "weight"), {n_embd, n_vocab}, 0);
5190
5191
                    // output
5192
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5193
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5194
                    // if output is NULL, init from the input tok embed
5195
0
                    if (output == NULL) {
5196
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5197
0
                    }
5198
5199
0
                    for (int i = 0; i < n_layer; ++i) {
5200
0
                        auto & layer = layers[i];
5201
5202
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5203
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
5204
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
5205
5206
0
                        if (layer.wqkv == nullptr) {
5207
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
5208
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5209
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5210
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
5211
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5212
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5213
0
                        }
5214
5215
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5216
5217
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5218
5219
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff * 2}, 0);
5220
5221
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
5222
0
                    }
5223
0
                } break;
5224
0
            case LLM_ARCH_GLM4:
5225
0
                {
5226
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5227
5228
                    // output
5229
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5230
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5231
                    // if output is NULL, init from the input tok embed
5232
0
                    if (output == NULL) {
5233
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5234
0
                    }
5235
5236
0
                    for (int i = 0; i < n_layer; ++i) {
5237
0
                        auto & layer = layers[i];
5238
5239
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5240
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
5241
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
5242
5243
0
                        if (layer.wqkv == nullptr) {
5244
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
5245
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5246
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5247
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
5248
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5249
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5250
0
                        }
5251
5252
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5253
5254
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
5255
5256
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5257
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5258
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff * 2}, 0);
5259
5260
0
                        layer.ffn_post_norm  = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
5261
0
                    }
5262
0
                } break;
5263
0
            case LLM_ARCH_GLM4_MOE:
5264
0
                {
5265
0
                    const int64_t n_expert        = hparams.n_expert;
5266
0
                    const int64_t n_expert_used   = hparams.n_expert_used;
5267
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
5268
5269
0
                    GGML_ASSERT(hparams.n_expert > 0 && "n_expert must be > 0 for GLM4_MOE MoE layers");
5270
0
                    GGML_ASSERT(hparams.n_expert_used > 0 && "n_expert_used must be > 0 for GLM4_MOE MoE layers");
5271
5272
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
5273
5274
                    // output
5275
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
5276
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED);
5277
                    // if output is NULL, init from the input tok embed
5278
0
                    if (output == NULL) {
5279
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, TENSOR_DUPLICATED);
5280
0
                    }
5281
5282
                    // Load ALL tensors including NextN layer to satisfy total tensor count
5283
                    // but only PROCESS up to last layer (skipping final NextN layer) in forward pass
5284
0
                    for (int i = 0; i < n_layer; ++i) {
5285
0
                        int flags = 0;
5286
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
5287
                            // skip all tensors in the NextN layers
5288
0
                            flags |= TENSOR_SKIP;
5289
0
                        }
5290
5291
0
                        auto & layer = layers[i];
5292
5293
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, flags);
5294
5295
                        // GLM-style attention with bias terms
5296
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, flags);
5297
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_k_gqa }, flags);
5298
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_v_gqa }, flags);
5299
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), { n_embd_head_k * n_head }, TENSOR_NOT_REQUIRED | flags);
5300
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), { n_embd_k_gqa }, TENSOR_NOT_REQUIRED | flags);
5301
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), { n_embd_v_gqa }, TENSOR_NOT_REQUIRED | flags);
5302
5303
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, flags);
5304
5305
                        // K/Q norm tensors (optional for GLM-4.5 355B variant)
5306
0
                        layer.attn_q_norm = create_tensor(
5307
0
                            tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), { n_embd_head_k }, TENSOR_NOT_REQUIRED | flags);
5308
0
                        layer.attn_k_norm = create_tensor(
5309
0
                            tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), { n_embd_head_k }, TENSOR_NOT_REQUIRED | flags);
5310
5311
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), { n_embd }, flags);
5312
5313
                        // Check if this layer uses MoE or dense FFN based on n_layer_dense_lead
5314
                        // GLM 4.5 uses hybrid architecture: layer 0 is dense, layers 1+ are MoE
5315
0
                        const bool use_moe = (static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead);
5316
5317
0
                        if (use_moe) {
5318
                            // MoE layers
5319
0
                            layer.ffn_gate_inp =
5320
0
                                create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert }, flags);
5321
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), { n_expert }, flags);
5322
5323
                            // MoE branch
5324
0
                            const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
5325
5326
0
                            layer.ffn_gate_exps = create_tensor(
5327
0
                                tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, flags);
5328
0
                            layer.ffn_down_exps = create_tensor(
5329
0
                                tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, flags);
5330
0
                            layer.ffn_up_exps = create_tensor(
5331
0
                                tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, flags);
5332
5333
                            // Shared expert
5334
0
                            if (n_expert_shared > 0) {
5335
0
                                const int64_t n_ff_shexp = n_ff_exp * n_expert_shared;
5336
0
                                layer.ffn_gate_shexp = create_tensor(
5337
0
                                    tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, n_ff_shexp }, flags);
5338
0
                                layer.ffn_down_shexp = create_tensor(
5339
0
                                    tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_shexp, n_embd }, flags);
5340
0
                                layer.ffn_up_shexp = create_tensor(
5341
0
                                    tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, n_ff_shexp }, flags);
5342
0
                            }
5343
0
                        } else {
5344
                            // Dense layers (first k layers) - GLM uses separate gate/up projections
5345
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, flags);
5346
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, flags);
5347
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), { n_embd, n_ff }, flags);
5348
0
                        }
5349
5350
                        // NextN/MTP tensors (preserved but unused) - conditionally load for last nextn_predict_layers
5351
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
5352
0
                            layer.nextn.eh_proj          = create_tensor(tn(LLM_TENSOR_NEXTN_EH_PROJ, "weight", i), { 2 * n_embd, n_embd }, flags);
5353
0
                            layer.nextn.enorm            = create_tensor(tn(LLM_TENSOR_NEXTN_ENORM, "weight", i), { n_embd }, flags);
5354
0
                            layer.nextn.hnorm            = create_tensor(tn(LLM_TENSOR_NEXTN_HNORM, "weight", i), { n_embd }, flags);
5355
5356
                            // Optional tensors
5357
0
                            layer.nextn.embed_tokens     = create_tensor(tn(LLM_TENSOR_NEXTN_EMBED_TOKENS, "weight", i), { n_embd, n_vocab }, flags | TENSOR_NOT_REQUIRED);
5358
0
                            layer.nextn.shared_head_head = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "weight", i), { n_embd, n_vocab }, flags | TENSOR_NOT_REQUIRED);
5359
0
                            layer.nextn.shared_head_norm = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "weight", i), { n_embd }, flags | TENSOR_NOT_REQUIRED);
5360
0
                        }
5361
0
                    }
5362
0
                }
5363
0
                break;
5364
0
            case LLM_ARCH_NEMOTRON:
5365
0
                {
5366
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5367
5368
                    // output
5369
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5370
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
5371
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5372
5373
0
                    for (int i = 0; i < n_layer; ++i) {
5374
0
                        auto & layer = layers[i];
5375
5376
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5377
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
5378
5379
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
5380
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
5381
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
5382
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5383
5384
                        // optional bias tensors
5385
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
5386
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5387
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5388
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
5389
5390
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5391
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
5392
5393
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5394
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5395
5396
                        // optional MLP bias
5397
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
5398
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
5399
0
                    }
5400
0
                } break;
5401
0
            case LLM_ARCH_NEMOTRON_H:
5402
0
            case LLM_ARCH_NEMOTRON_H_MOE:
5403
0
                {
5404
                    // mamba2 Mixer SSM params
5405
                    // NOTE: int64_t for tensor dimensions
5406
0
                    const int64_t d_conv     = hparams.ssm_d_conv;
5407
0
                    const int64_t d_inner    = hparams.ssm_d_inner;
5408
0
                    const int64_t d_state    = hparams.ssm_d_state;
5409
0
                    const int64_t n_ssm_head = hparams.ssm_dt_rank;
5410
0
                    const int64_t n_group    = hparams.ssm_n_group;
5411
0
                    const int64_t d_in_proj  = 2*d_inner + 2*n_group*d_state + n_ssm_head;
5412
5413
                    // embeddings
5414
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5415
5416
                    // output
5417
0
                    {
5418
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5419
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5420
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
5421
0
                        if (output == NULL) {
5422
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5423
0
                        }
5424
0
                    }
5425
5426
0
                    for (int i = 0; i < n_layer; ++i) {
5427
0
                        auto & layer = layers[i];
5428
5429
                        // all blocks use the attn norm
5430
0
                        layer.attn_norm  = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5431
5432
0
                        if (hparams.is_recurrent(i)) {
5433
                            // ssm layers
5434
0
                            layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, d_in_proj}, 0);
5435
5436
0
                            layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner + 2*n_group*d_state}, 0);
5437
0
                            layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner + 2*n_group*d_state}, TENSOR_NOT_REQUIRED);
5438
5439
0
                            layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {n_ssm_head}, 0);
5440
5441
                            // no "weight" suffix for these
5442
0
                            layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, n_ssm_head}, 0);
5443
0
                            layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, n_ssm_head}, 0);
5444
5445
0
                            layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {d_inner / n_group, n_group}, 0);
5446
5447
                            // out_proj
5448
0
                            layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
5449
0
                        } else if (hparams.n_ff(i) == 0) {
5450
                            // attention layers (with optional bias)
5451
0
                            const int64_t n_head_i = hparams.n_head(i);
5452
0
                            const int64_t n_embd_k_gqa_i = hparams.n_embd_k_gqa(i);
5453
0
                            const int64_t n_embd_v_gqa_i = hparams.n_embd_v_gqa(i);
5454
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head_i}, 0);
5455
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa_i}, 0);
5456
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa_i}, 0);
5457
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head_i, n_embd}, 0);
5458
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias",   i), {n_embd},         TENSOR_NOT_REQUIRED);
5459
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias",   i), {n_embd_k_gqa_i}, TENSOR_NOT_REQUIRED);
5460
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias",   i), {n_embd_v_gqa_i}, TENSOR_NOT_REQUIRED);
5461
0
                            layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), {n_embd},         TENSOR_NOT_REQUIRED);
5462
0
                        }  else {
5463
0
                            if (n_expert != 0) {
5464
0
                                const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
5465
0
                                const int64_t n_ff_shexp = hparams.n_ff_shexp;
5466
5467
0
                                layer.ffn_gate_inp    = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), { n_embd, n_expert}, 0);
5468
0
                                layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert         }, 0);
5469
5470
                                // MoE branch
5471
0
                                layer.ffn_down_exps   = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
5472
0
                                layer.ffn_up_exps     = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
5473
5474
                                // Shared expert branch
5475
0
                                layer.ffn_down_shexp  = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, 0);
5476
0
                                layer.ffn_up_shexp    = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_shexp}, 0);
5477
5478
0
                            } else {
5479
                                // mlp layers
5480
0
                                layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  hparams.n_ff(i), n_embd}, 0);
5481
0
                                layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   hparams.n_ff(i)}, 0);
5482
0
                                layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
5483
0
                                layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias",   i), {hparams.n_ff(i)}, TENSOR_NOT_REQUIRED);
5484
0
                            }
5485
0
                        }
5486
0
                    }
5487
0
                } break;
5488
0
            case LLM_ARCH_EXAONE:
5489
0
                {
5490
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5491
5492
                    // output
5493
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5494
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5495
5496
                    // if output is NULL, init from the input tok embed
5497
0
                    if (output == NULL) {
5498
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5499
0
                    }
5500
5501
0
                    for (int i = 0; i < n_layer; ++i) {
5502
0
                        auto & layer = layers[i];
5503
5504
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5505
5506
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
5507
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5508
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5509
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
5510
5511
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM,   "weight", i), {n_embd}, 0);
5512
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
5513
0
                        layer.ffn_gate   = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "weight", i), {n_embd,   n_ff}, 0);
5514
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN,   "weight", i), {  n_ff, n_embd}, 0);
5515
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,     "weight", i), {n_embd,   n_ff}, 0);
5516
0
                    }
5517
0
                } break;
5518
0
            case LLM_ARCH_EXAONE4:
5519
0
                {
5520
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5521
5522
                    // output
5523
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5524
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5525
5526
                    // if output is NULL, init from the input tok embed
5527
0
                    if (output == NULL) {
5528
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5529
0
                    }
5530
5531
0
                    for (int i = 0; i < n_layer; ++i) {
5532
0
                        auto & layer = layers[i];
5533
5534
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
5535
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5536
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5537
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5538
5539
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
5540
5541
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
5542
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
5543
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
5544
5545
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
5546
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5547
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5548
0
                        layer.ffn_post_norm  = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
5549
0
                    }
5550
0
                } break;
5551
0
            case LLM_ARCH_EXAONE_MOE:
5552
0
                {
5553
0
                    const int64_t n_ff_exp       = hparams.n_ff_exp;
5554
0
                    const int64_t n_expert       = hparams.n_expert;
5555
0
                    const int64_t n_expert_used  = hparams.n_expert_used;
5556
0
                    const int64_t n_ff_shexp     = hparams.n_ff_shexp;
5557
0
                    const int64_t head_dim       = hparams.n_embd_head_k;
5558
0
                    const int64_t n_qo_dim       = n_head * head_dim;
5559
0
                    const int64_t n_kv_dim       = n_head_kv * head_dim;
5560
5561
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5562
5563
                    // output
5564
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5565
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
5566
5567
0
                    if (output == NULL) {
5568
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5569
0
                    }
5570
5571
0
                    for (int i = 0; i < n_layer; ++i) {
5572
0
                        int flags = 0;
5573
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
5574
                            // skip all tensors in the NextN layers
5575
0
                            flags |= TENSOR_SKIP;
5576
0
                        }
5577
5578
0
                        auto & layer = layers[i];
5579
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_qo_dim}, flags);
5580
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_kv_dim}, flags);
5581
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_kv_dim}, flags);
5582
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_qo_dim, n_embd}, flags);
5583
5584
0
                        layer.rope_freqs   = create_tensor(tn(LLM_TENSOR_ROPE_FREQS,  "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0) | flags);
5585
5586
0
                        layer.attn_norm    = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "weight", i), {n_embd}, flags);
5587
0
                        layer.attn_q_norm  = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, flags);
5588
0
                        layer.attn_k_norm  = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, flags);
5589
5590
0
                        layer.ffn_norm     = create_tensor(tn(LLM_TENSOR_FFN_NORM,    "weight", i), {n_embd}, flags);
5591
5592
                        // dense layers for first n_layer_dense_lead layers or nextn_predict_layers layers at the end
5593
0
                        if (i < (int) hparams.n_layer_dense_lead || (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers)) {
5594
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, flags);
5595
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, flags);
5596
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, flags);
5597
0
                        } else {
5598
0
                            layer.ffn_gate_inp    = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, flags);
5599
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED | flags);
5600
5601
0
                            if (n_expert == 0) {
5602
0
                                throw std::runtime_error("n_expert must be > 0");
5603
0
                            }
5604
0
                            if (n_expert_used == 0) {
5605
0
                                throw std::runtime_error("n_expert_used must be > 0");
5606
0
                            }
5607
5608
0
                            layer.ffn_gate_exps  = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS,  "weight", i), {n_embd, n_ff_exp, n_expert}, flags);
5609
0
                            layer.ffn_down_exps  = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS,  "weight", i), {n_ff_exp, n_embd, n_expert}, flags);
5610
0
                            layer.ffn_up_exps    = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,    "weight", i), {n_embd, n_ff_exp, n_expert}, flags);
5611
5612
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_shexp}, flags);
5613
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, flags);
5614
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_shexp}, flags);
5615
0
                        }
5616
5617
                        // NextN/MTP tensors (preserved but unused) - conditionally load for last nextn_predict_layers
5618
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
5619
0
                            layer.nextn.eh_proj          = create_tensor(tn(LLM_TENSOR_NEXTN_EH_PROJ, "weight", i), {2 * n_embd, n_embd}, flags);
5620
0
                            layer.nextn.enorm            = create_tensor(tn(LLM_TENSOR_NEXTN_ENORM,   "weight", i), {n_embd}, flags);
5621
0
                            layer.nextn.hnorm            = create_tensor(tn(LLM_TENSOR_NEXTN_HNORM,   "weight", i), {n_embd}, flags);
5622
5623
0
                            layer.nextn.shared_head_norm = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "weight", i), {n_embd}, flags | TENSOR_NOT_REQUIRED);
5624
0
                            layer.nextn.embed_tokens     = create_tensor(tn(LLM_TENSOR_NEXTN_EMBED_TOKENS,     "weight", i), {n_embd, n_vocab}, flags | TENSOR_NOT_REQUIRED);
5625
0
                            layer.nextn.shared_head_head = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "weight", i), {n_embd, n_vocab}, flags | TENSOR_NOT_REQUIRED);
5626
0
                        }
5627
0
                    }
5628
0
                } break;
5629
0
            case LLM_ARCH_RWKV6:
5630
0
                {
5631
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5632
5633
                    // Block 0, LN0
5634
0
                    tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
5635
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
5636
5637
                    // output
5638
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5639
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
5640
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5641
5642
0
                    const int time_mix_extra_dim = hparams.time_mix_extra_dim;
5643
0
                    const int time_decay_extra_dim = hparams.time_decay_extra_dim;
5644
0
                    const int head_size = hparams.wkv_head_size;
5645
0
                    const int attn_hidden_size = n_embd;
5646
0
                    const int ffn_size = hparams.n_ff_arr[0];
5647
5648
0
                    for (int i = 0; i < n_layer; ++i) {
5649
0
                        auto & layer = layers[i];
5650
5651
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5652
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
5653
5654
0
                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
5655
0
                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i),   {n_embd}, 0);
5656
5657
0
                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
5658
0
                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
5659
5660
0
                        layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
5661
0
                        layer.time_mix_lerp_w = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_W, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5662
0
                        layer.time_mix_lerp_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5663
0
                        layer.time_mix_lerp_v = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_V, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5664
0
                        layer.time_mix_lerp_r = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5665
0
                        layer.time_mix_lerp_g = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_G, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5666
0
                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, TENSOR_NOT_REQUIRED);
5667
0
                        GGML_ASSERT(!(layer.time_mix_lerp_fused == NULL && layer.time_mix_lerp_w == NULL));
5668
5669
0
                        layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, 0);
5670
0
                        layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
5671
0
                        layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
5672
0
                        layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
5673
0
                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
5674
0
                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
5675
0
                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
5676
0
                        layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
5677
5678
0
                        layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
5679
0
                        layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
5680
0
                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
5681
5682
0
                        layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
5683
0
                        layer.channel_mix_lerp_r = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, 0);
5684
5685
0
                        layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
5686
0
                        layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
5687
0
                        layer.channel_mix_receptance = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "weight", i), {n_embd, n_embd}, 0);
5688
0
                    }
5689
5690
0
                } break;
5691
0
            case LLM_ARCH_RWKV6QWEN2:
5692
0
                {
5693
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5694
5695
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5696
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, TENSOR_NOT_REQUIRED);
5697
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5698
5699
0
                    const int time_mix_extra_dim = hparams.time_mix_extra_dim;
5700
0
                    const int time_decay_extra_dim = hparams.time_decay_extra_dim;
5701
0
                    const int head_size = hparams.wkv_head_size;
5702
0
                    const int attn_hidden_size = n_embd;
5703
0
                    const int n_head_kv = hparams.n_head_kv();
5704
0
                    int attn_key_value_size;
5705
0
                    if (n_head_kv == 0 || attn_hidden_size / head_size == n_head_kv) {
5706
0
                        attn_key_value_size = attn_hidden_size;
5707
0
                    } else {
5708
0
                        attn_key_value_size = n_head_kv * head_size;
5709
0
                    }
5710
5711
0
                    for (int i = 0; i < n_layer; ++i) {
5712
0
                        auto & layer = layers[i];
5713
5714
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5715
5716
0
                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
5717
0
                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
5718
5719
0
                        layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
5720
0
                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
5721
5722
0
                        layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, TENSOR_NOT_REQUIRED);
5723
0
                        layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
5724
0
                        layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
5725
0
                        layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
5726
0
                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {n_embd, attn_key_value_size}, 0);
5727
0
                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {n_embd, attn_key_value_size}, 0);
5728
0
                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
5729
0
                        layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
5730
                        // optional bias tensors
5731
0
                        layer.time_mix_key_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "bias", i), {attn_key_value_size}, TENSOR_NOT_REQUIRED);
5732
0
                        layer.time_mix_value_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "bias", i), {attn_key_value_size}, TENSOR_NOT_REQUIRED);
5733
0
                        layer.time_mix_receptance_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "bias", i), {attn_hidden_size}, TENSOR_NOT_REQUIRED);
5734
5735
0
                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
5736
5737
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5738
5739
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
5740
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5741
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5742
0
                    }
5743
0
                } break;
5744
0
            case LLM_ARCH_RWKV7:
5745
0
                {
5746
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5747
5748
                    // Block 0, LN0
5749
0
                    tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
5750
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
5751
5752
                    // output
5753
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5754
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
5755
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5756
5757
0
                    const int n_lora_decay = hparams.n_lora_decay;
5758
0
                    const int n_lora_iclr = hparams.n_lora_iclr;
5759
0
                    const int n_lora_value_res_mix = hparams.n_lora_value_res_mix;
5760
0
                    const int n_lora_gate = hparams.n_lora_gate;
5761
0
                    const int attn_hidden_size = n_embd;
5762
0
                    const int ffn_size = hparams.n_ff_arr[0];
5763
5764
0
                    for (int i = 0; i < n_layer; ++i) {
5765
0
                        auto & layer = layers[i];
5766
5767
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5768
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
5769
5770
0
                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
5771
0
                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i),   {n_embd}, 0);
5772
5773
0
                        layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0);
5774
0
                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0);
5775
0
                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0);
5776
5777
0
                        layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0);
5778
0
                        layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0);
5779
0
                        layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0);
5780
5781
0
                        if (i == 0) {
5782
                            // actually not used
5783
0
                            layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
5784
0
                            layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0);
5785
0
                            layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0);
5786
0
                        } else {
5787
0
                            layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
5788
0
                            layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0);
5789
0
                            layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0);
5790
0
                        }
5791
5792
0
                        layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, 0);
5793
0
                        layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, 0);
5794
5795
0
                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0);
5796
5797
0
                        layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0);
5798
0
                        layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0);
5799
0
                        layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0);
5800
5801
0
                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
5802
0
                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
5803
0
                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
5804
5805
0
                        layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
5806
0
                        layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
5807
0
                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
5808
5809
0
                        layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
5810
5811
0
                        layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
5812
0
                        layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
5813
0
                    }
5814
5815
0
                } break;
5816
0
            case LLM_ARCH_ARWKV7:
5817
0
                {
5818
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5819
5820
                    // output
5821
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5822
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5823
5824
0
                    const int n_lora_decay = hparams.n_lora_decay;
5825
0
                    const int n_lora_iclr = hparams.n_lora_iclr;
5826
0
                    const int n_lora_value_res_mix = hparams.n_lora_value_res_mix;
5827
0
                    const int n_lora_gate = hparams.n_lora_gate;
5828
0
                    const int attn_hidden_size = n_embd;
5829
5830
0
                    for (int i = 0; i < n_layer; ++i) {
5831
0
                        auto & layer = layers[i];
5832
5833
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5834
5835
0
                        layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0);
5836
0
                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0);
5837
0
                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0);
5838
5839
0
                        layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0);
5840
0
                        layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0);
5841
0
                        layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0);
5842
5843
0
                        if (i == 0) {
5844
                            // actually not used
5845
0
                            layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
5846
0
                            layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0);
5847
0
                            layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0);
5848
0
                        } else {
5849
0
                            layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
5850
0
                            layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0);
5851
0
                            layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0);
5852
0
                        }
5853
5854
0
                        layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, TENSOR_NOT_REQUIRED);
5855
0
                        layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, TENSOR_NOT_REQUIRED);
5856
5857
0
                        try {
5858
0
                            layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0);
5859
0
                        } catch(std::runtime_error & e) {
5860
                            // ARWKV models may not have gate tensors
5861
0
                            layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
5862
0
                        }
5863
5864
0
                        layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0);
5865
0
                        layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0);
5866
0
                        layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0);
5867
5868
0
                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
5869
0
                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
5870
0
                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
5871
5872
0
                        layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
5873
0
                        layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
5874
0
                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
5875
5876
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5877
5878
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
5879
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5880
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5881
0
                    }
5882
5883
0
                } break;
5884
0
            case LLM_ARCH_CHAMELEON:
5885
0
                {
5886
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5887
5888
                    // output
5889
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5890
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5891
                    // if output is NULL, init from the input tok embed
5892
0
                    if (output == NULL) {
5893
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5894
0
                    }
5895
5896
0
                    for (int i = 0; i < n_layer; ++i) {
5897
0
                        auto & layer = layers[i];
5898
5899
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5900
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
5901
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
5902
0
                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i),  {n_embd_head_k, n_head}, TENSOR_NOT_REQUIRED);
5903
0
                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i),  {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED);
5904
5905
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
5906
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
5907
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
5908
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5909
5910
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5911
5912
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
5913
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5914
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5915
0
                    }
5916
0
                } break;
5917
0
            case LLM_ARCH_WAVTOKENIZER_DEC:
5918
0
                {
5919
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hparams.n_embd_features, n_vocab}, 0);
5920
5921
0
                    conv1d   = create_tensor(tn(LLM_TENSOR_CONV1D, "weight"), {7, hparams.n_embd_features, hparams.posnet.n_embd}, 0);
5922
0
                    conv1d_b = create_tensor(tn(LLM_TENSOR_CONV1D, "bias"),   {1, hparams.posnet.n_embd}, 0);
5923
5924
                    // posnet
5925
0
                    {
5926
0
                        const int64_t n_embd = hparams.posnet.n_embd;
5927
5928
0
                        for (uint32_t i = 0; i < hparams.posnet.n_layer; ++i) {
5929
0
                            auto & layer = layers[i].posnet;
5930
5931
                            // posnet:
5932
                            //
5933
                            //  - resnet
5934
                            //  - resnet
5935
                            //  - attn
5936
                            //  - resnet
5937
                            //  - resnet
5938
                            //  - norm
5939
                            //
5940
0
                            switch (i) {
5941
0
                                case 0:
5942
0
                                case 1:
5943
0
                                case 3:
5944
0
                                case 4:
5945
0
                                    {
5946
0
                                        layer.norm1   = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "weight", i), {1, n_embd}, 0);
5947
0
                                        layer.norm1_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "bias",   i), {1, n_embd}, 0);
5948
5949
0
                                        layer.conv1   = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "weight", i), {3, n_embd, n_embd}, 0);
5950
0
                                        layer.conv1_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "bias",   i), {1, n_embd}, 0);
5951
5952
0
                                        layer.norm2   = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "weight", i), {1, n_embd}, 0);
5953
0
                                        layer.norm2_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "bias",   i), {1, n_embd}, 0);
5954
5955
0
                                        layer.conv2   = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "weight", i), {3, n_embd, n_embd}, 0);
5956
0
                                        layer.conv2_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "bias",   i), {1, n_embd}, 0);
5957
0
                                    } break;
5958
0
                                case 2:
5959
0
                                    {
5960
0
                                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
5961
0
                                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias",   i), {1, n_embd}, 0);
5962
5963
0
                                        layer.attn_q      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q,    "weight", i), {1, n_embd, n_embd}, 0);
5964
0
                                        layer.attn_q_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q,    "bias",   i), {1, n_embd}, 0);
5965
5966
0
                                        layer.attn_k      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K,    "weight", i), {1, n_embd, n_embd}, 0);
5967
0
                                        layer.attn_k_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K,    "bias",   i), {1, n_embd}, 0);
5968
5969
0
                                        layer.attn_v      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V,    "weight", i), {1, n_embd, n_embd}, 0);
5970
0
                                        layer.attn_v_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V,    "bias",   i), {1, n_embd}, 0);
5971
5972
0
                                        layer.attn_o      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT,  "weight", i), {1, n_embd, n_embd}, 0);
5973
0
                                        layer.attn_o_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT,  "bias",   i), {1, n_embd}, 0);
5974
0
                                    } break;
5975
0
                                case 5:
5976
0
                                    {
5977
0
                                        layer.norm   = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
5978
0
                                        layer.norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias",   i), {1, n_embd}, 0);
5979
0
                                    } break;
5980
0
                                default: GGML_ABORT("unknown posnet layer");
5981
0
                            };
5982
0
                        }
5983
0
                    }
5984
5985
0
                    GGML_ASSERT(hparams.posnet.n_embd == hparams.convnext.n_embd);
5986
5987
0
                    tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {hparams.posnet.n_embd}, 0);
5988
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {hparams.posnet.n_embd}, 0);
5989
5990
                    // convnext
5991
0
                    {
5992
0
                        const int64_t n_embd = hparams.convnext.n_embd;
5993
5994
0
                        for (uint32_t i = 0; i < hparams.convnext.n_layer; ++i) {
5995
0
                            auto & layer = layers[i].convnext;
5996
5997
0
                            layer.dw     = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW,    "weight", i), {7, 1, n_embd}, 0);
5998
0
                            layer.dw_b   = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW,    "bias",   i), {1, n_embd}, 0);
5999
6000
0
                            layer.norm   = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM,  "weight", i), {n_embd}, 0);
6001
0
                            layer.norm_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM,  "bias",   i), {n_embd}, 0);
6002
6003
0
                            layer.pw1    = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1,   "weight", i), {n_embd, n_ff}, 0);
6004
0
                            layer.pw1_b  = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1,   "bias",   i), {n_ff}, 0);
6005
6006
0
                            layer.pw2    = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2,   "weight", i), {n_ff, n_embd}, 0);
6007
0
                            layer.pw2_b  = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2,   "bias",   i), {n_embd}, 0);
6008
6009
0
                            layer.gamma  = create_tensor(tn(LLM_TENSOR_CONVNEXT_GAMMA, "weight", i), {n_embd}, 0);
6010
0
                        }
6011
6012
                        // output
6013
0
                        output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6014
0
                        output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
6015
0
                    }
6016
6017
0
                    output   = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hparams.convnext.n_embd, n_embd}, 0);
6018
0
                    output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"),   {n_embd}, 0);
6019
0
                } break;
6020
0
            case LLM_ARCH_BAILINGMOE:
6021
0
                {
6022
0
                    const int64_t n_ff_exp            = hparams.n_ff_exp;
6023
0
                    const int64_t n_expert_shared     = hparams.n_expert_shared;
6024
6025
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6026
6027
                    // output
6028
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6029
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6030
6031
0
                    for (int i = 0; i < n_layer; ++i) {
6032
0
                        auto & layer = layers[i];
6033
6034
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6035
6036
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_head * n_rot}, 0);
6037
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_head_kv * n_rot}, 0);
6038
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_head_kv * n_rot}, 0);
6039
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_rot, n_embd}, 0);
6040
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6041
6042
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6043
6044
0
                        if (n_expert == 0) {
6045
0
                            throw std::runtime_error("n_expert must be > 0");
6046
0
                        }
6047
0
                        if (n_expert_used == 0) {
6048
0
                            throw std::runtime_error("n_expert_used must be > 0");
6049
0
                        }
6050
6051
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6052
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
6053
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6054
6055
0
                        layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
6056
0
                        layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
6057
0
                        layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
6058
0
                    }
6059
0
                } break;
6060
0
            case LLM_ARCH_BAILINGMOE2:
6061
0
                {
6062
0
                    const int64_t n_ff_exp        = hparams.n_ff_exp;
6063
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
6064
6065
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6066
6067
                    // output
6068
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6069
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6070
6071
0
                    GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for bailingmoe2");
6072
0
                    GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for bailingmoe2");
6073
6074
0
                    for (int i = 0; i < n_layer; ++i) {
6075
0
                        int flags = 0;
6076
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
6077
                            // skip all tensors in the NextN layers
6078
0
                            flags |= TENSOR_SKIP;
6079
0
                        }
6080
6081
0
                        auto & layer = layers[i];
6082
6083
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, flags);
6084
6085
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, flags);
6086
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, flags);
6087
6088
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, flags);
6089
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, flags);
6090
6091
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, flags);
6092
6093
0
                        if (static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) { // MoE layers
6094
0
                            const int64_t n_ff_shexp = (hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff_exp) * n_expert_shared;
6095
6096
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, flags);
6097
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED | flags);
6098
6099
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, flags);
6100
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, flags);
6101
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, flags);
6102
6103
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_shexp}, flags);
6104
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, flags);
6105
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_shexp}, flags);
6106
0
                        } else { // Dense layers
6107
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, flags);
6108
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, flags);
6109
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, flags);
6110
0
                        }
6111
6112
                        // NextN/MTP tensors (preserved but unused) - conditionally load for last nextn_predict_layers
6113
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
6114
0
                            layer.nextn.eh_proj          = create_tensor(tn(LLM_TENSOR_NEXTN_EH_PROJ, "weight", i), { 2 * n_embd, n_embd }, flags);
6115
0
                            layer.nextn.embed_tokens     = create_tensor(tn(LLM_TENSOR_NEXTN_EMBED_TOKENS, "weight", i), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED | flags);
6116
0
                            layer.nextn.enorm            = create_tensor(tn(LLM_TENSOR_NEXTN_ENORM, "weight", i), { n_embd }, flags);
6117
0
                            layer.nextn.hnorm            = create_tensor(tn(LLM_TENSOR_NEXTN_HNORM, "weight", i), { n_embd }, flags);
6118
0
                            layer.nextn.shared_head_head = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "weight", i), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED | flags);
6119
0
                            layer.nextn.shared_head_norm = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "weight", i), { n_embd }, TENSOR_NOT_REQUIRED | flags);
6120
0
                            layer.layer_out_norm         = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, flags);
6121
0
                        }
6122
0
                    }
6123
0
                } break;
6124
0
            case LLM_ARCH_DOTS1:
6125
0
                {
6126
0
                    const int64_t n_ff_exp        = hparams.n_ff_exp;
6127
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
6128
6129
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6130
6131
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6132
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6133
6134
0
                    for (int i = 0; i < n_layer; ++i) {
6135
0
                        auto & layer = layers[i];
6136
6137
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6138
6139
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6140
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6141
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6142
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6143
6144
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6145
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6146
6147
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6148
6149
0
                        if (i < (int) hparams.n_layer_dense_lead) {
6150
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6151
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6152
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6153
0
                        } else {
6154
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6155
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
6156
6157
0
                            if (n_expert == 0) {
6158
0
                                throw std::runtime_error("n_expert must be > 0");
6159
0
                            }
6160
0
                            if (n_expert_used == 0) {
6161
0
                                throw std::runtime_error("n_expert_used must be > 0");
6162
0
                            }
6163
6164
                            // MoE branch
6165
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6166
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
6167
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6168
6169
                            // Shared expert branch
6170
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
6171
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
6172
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
6173
0
                        }
6174
0
                    }
6175
0
                } break;
6176
0
            case LLM_ARCH_ARCEE:
6177
0
                {
6178
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6179
6180
                    // output
6181
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6182
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6183
6184
                    // if output is NULL, init from the input tok embed
6185
0
                    if (output == NULL) {
6186
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6187
0
                    }
6188
6189
0
                    for (int i = 0; i < n_layer; ++i) {
6190
0
                        auto & layer = layers[i];
6191
6192
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6193
6194
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6195
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6196
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6197
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6198
6199
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6200
6201
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6202
6203
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6204
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6205
0
                    }
6206
0
                } break;
6207
0
            case LLM_ARCH_AFMOE:
6208
0
                {
6209
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6210
6211
                    // output
6212
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6213
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6214
6215
                    // if output is NULL, init from the input tok embed
6216
0
                    if (output == NULL) {
6217
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6218
0
                    }
6219
6220
0
                    const int64_t n_ff_exp = hparams.n_ff_exp;
6221
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
6222
6223
0
                    for (int i = 0; i < n_layer; ++i) {
6224
0
                        auto & layer = layers[i];
6225
6226
                        // dual attention normalization
6227
0
                        layer.attn_norm      = create_tensor(tn(LLM_TENSOR_ATTN_NORM,      "weight", i), {n_embd}, 0);
6228
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
6229
6230
                        // attention projections
6231
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6232
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6233
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6234
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6235
6236
                        // Q/K normalization
6237
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6238
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6239
6240
                        // attention gating
6241
0
                        layer.wqkv_gate = create_tensor(tn(LLM_TENSOR_ATTN_GATE, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6242
6243
                        // dual ffn normalization
6244
0
                        layer.ffn_norm      = create_tensor(tn(LLM_TENSOR_FFN_NORM,      "weight", i), {n_embd}, 0);
6245
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
6246
6247
0
                        if (static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) {
6248
                            // MoE layers
6249
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6250
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, 0);
6251
6252
                            // grouped expert weights
6253
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, 0);
6254
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
6255
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff_exp, n_expert}, 0);
6256
6257
                            // shared expert
6258
0
                            if (n_expert_shared > 0) {
6259
0
                                const int64_t n_ff_shexp = n_ff_exp * n_expert_shared;
6260
0
                                layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_shexp}, 0);
6261
0
                                layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, 0);
6262
0
                                layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_shexp}, 0);
6263
0
                            }
6264
0
                        } else {
6265
                            // Dense layers
6266
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
6267
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
6268
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
6269
0
                        }
6270
0
                    }
6271
0
                } break;
6272
0
            case LLM_ARCH_ERNIE4_5:
6273
0
            case LLM_ARCH_ERNIE4_5_MOE:
6274
0
                {
6275
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6276
6277
                    // output
6278
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6279
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6280
                    // if output is NULL, init from the input tok embed
6281
0
                    if (output == NULL) {
6282
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6283
0
                    }
6284
6285
0
                    for (int i = 0; i < n_layer; ++i) {
6286
0
                        auto & layer = layers[i];
6287
6288
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6289
6290
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6291
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
6292
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
6293
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6294
6295
                        // optional bias tensors
6296
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
6297
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
6298
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
6299
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
6300
6301
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6302
6303
0
                        if (arch == LLM_ARCH_ERNIE4_5_MOE && static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) { // MoE layers
6304
0
                            int n_ff_exp = hparams.n_ff_exp;
6305
6306
0
                            layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
6307
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
6308
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff_exp, n_expert}, TENSOR_NOT_REQUIRED);
6309
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff_exp, n_embd, n_expert}, 0);
6310
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff_exp, n_expert}, 0);
6311
6312
                            // Shared expert (if present)
6313
0
                            if (hparams.n_ff_shexp > 0) {
6314
0
                                layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {    n_embd, hparams.n_ff_shexp}, 0);
6315
0
                                layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd    }, 0);
6316
0
                                layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {    n_embd, hparams.n_ff_shexp}, 0);
6317
0
                            }
6318
0
                        } else { // Dense layers
6319
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6320
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6321
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6322
0
                        }
6323
0
                    }
6324
0
                } break;
6325
0
            case LLM_ARCH_FALCON_H1:
6326
0
                {
6327
                    // Common
6328
0
                    const int64_t hidden_size = hparams.n_embd; // hidden_size
6329
6330
                    // mamba2 Mixer SSM params
6331
0
                    const int64_t ssm_conv_kernel_size  = hparams.ssm_d_conv; // ssm_conv_kernel_size
6332
0
                    const int64_t ssm_n_groups          = hparams.ssm_n_group; // ssm_n_groups
6333
0
                    const int64_t ssm_state_size        = hparams.ssm_d_state; // ssm_state_size
6334
0
                    const int64_t ssm_intermediate_size = hparams.ssm_d_inner; // TODO expand
6335
0
                    const int64_t ssm_num_heads         = hparams.ssm_dt_rank; // ssm_num_heads
6336
0
                    const int64_t ssm_conv_dim          = ssm_intermediate_size + 2 * ssm_n_groups * ssm_state_size;
6337
0
                    const int64_t ssm_projection_size   = ssm_intermediate_size + ssm_conv_dim + ssm_num_heads;
6338
6339
                    // attn params
6340
0
                    const int64_t attn_num_attention_head = hparams.n_head(0); // rename to: attn_num_attention_head
6341
0
                    const int64_t attn_num_key_value_head = hparams.n_head_kv(0);
6342
6343
                    // ffn params
6344
0
                    const int64_t ffn_intermediate_size = hparams.n_ff(0);
6345
6346
                    // embeddings
6347
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hidden_size, n_vocab}, 0);
6348
6349
                    // output
6350
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hidden_size, n_vocab}, TENSOR_NOT_REQUIRED);
6351
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {hidden_size}, 0);
6352
6353
                    // if output is NULL, init from the input tok embed
6354
0
                    if (output == NULL) {
6355
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hidden_size, n_vocab}, TENSOR_DUPLICATED);
6356
0
                    }
6357
6358
0
                    for (int i = 0; i < n_layer; ++i) {
6359
0
                        auto & layer = layers[i];
6360
6361
                        /*SSM LAYERS*/
6362
                        // ssm in
6363
0
                        layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {hidden_size, ssm_projection_size}, 0);
6364
                        // ssm 1d conv
6365
0
                        layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {ssm_conv_kernel_size, ssm_conv_dim}, 0);
6366
0
                        layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {ssm_conv_dim}, TENSOR_NOT_REQUIRED);
6367
                        // ssm_dt
6368
0
                        layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {ssm_num_heads}, 0);
6369
                        // no "weight" suffix for these
6370
0
                        layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, ssm_num_heads}, 0);
6371
0
                        layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, ssm_num_heads}, 0);
6372
                        // ssm_norm
6373
0
                        layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {ssm_intermediate_size / ssm_n_groups, ssm_n_groups}, TENSOR_NOT_REQUIRED);
6374
                        // out_proj
6375
0
                        layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {ssm_intermediate_size, hidden_size}, 0);
6376
6377
                        /*ATTENTION LAYERS*/
6378
                        // attention layers (with optional bias)
6379
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {hidden_size, n_embd_head_k * attn_num_attention_head}, 0);
6380
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {hidden_size, attn_num_key_value_head * n_embd_head_k}, 0);
6381
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {hidden_size, attn_num_key_value_head * n_embd_head_v}, 0);
6382
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * attn_num_attention_head, hidden_size}, 0);
6383
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {hidden_size}, TENSOR_NOT_REQUIRED);
6384
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {attn_num_key_value_head * n_embd_head_k}, TENSOR_NOT_REQUIRED);
6385
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {attn_num_key_value_head * n_embd_head_v}, TENSOR_NOT_REQUIRED);
6386
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {hidden_size}, TENSOR_NOT_REQUIRED);
6387
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {hidden_size}, 0);
6388
6389
6390
                        // feed forward (w/ optional biases)
6391
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, i), {hidden_size}, 0);
6392
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6393
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {hidden_size,   ffn_intermediate_size}, 0);
6394
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  ffn_intermediate_size, hidden_size}, 0);
6395
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {hidden_size,   ffn_intermediate_size}, 0);
6396
6397
0
                        layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {ffn_intermediate_size}, TENSOR_NOT_REQUIRED);
6398
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {hidden_size}, TENSOR_NOT_REQUIRED);
6399
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {ffn_intermediate_size}, TENSOR_NOT_REQUIRED);
6400
0
                    }
6401
0
                } break;
6402
0
            case LLM_ARCH_HUNYUAN_MOE:
6403
0
                {
6404
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6405
6406
                    // output
6407
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6408
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6409
                    // if output is NULL, init from the input tok embed
6410
0
                    if (output == NULL) {
6411
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6412
0
                    }
6413
6414
0
                    for (int i = 0; i < n_layer; ++i) {
6415
0
                        auto & layer = layers[i];
6416
6417
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6418
6419
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6420
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6421
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6422
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6423
6424
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6425
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6426
6427
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6428
6429
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
6430
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, 0);
6431
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
6432
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
6433
6434
0
                        layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
6435
0
                        layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
6436
0
                        layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0);
6437
0
                    }
6438
0
                } break;
6439
0
            case LLM_ARCH_HUNYUAN_DENSE:
6440
0
                {
6441
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6442
6443
                    // output
6444
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6445
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6446
                    // if output is NULL, init from the input tok embed
6447
0
                    if (output == NULL) {
6448
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6449
0
                    }
6450
6451
0
                    for (int i = 0; i < n_layer; ++i) {
6452
0
                        auto & layer = layers[i];
6453
6454
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6455
6456
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6457
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6458
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6459
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6460
6461
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6462
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6463
6464
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6465
6466
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6467
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6468
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6469
6470
0
                    }
6471
0
                } break;
6472
0
            case LLM_ARCH_SMOLLM3:
6473
0
                {
6474
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6475
6476
                    // output
6477
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6478
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6479
6480
                    // if output is NULL, init from the input tok embed
6481
0
                    if (output == NULL) {
6482
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6483
0
                    }
6484
6485
0
                    for (int i = 0; i < n_layer; ++i) {
6486
0
                        auto & layer = layers[i];
6487
6488
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6489
6490
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6491
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6492
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6493
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6494
6495
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6496
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6497
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6498
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6499
0
                    }
6500
0
                } break;
6501
0
            case LLM_ARCH_OPENAI_MOE:
6502
0
                {
6503
0
                    const int64_t n_ff_exp = hparams.n_ff_exp;
6504
6505
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6506
6507
                    // output
6508
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6509
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6510
6511
0
                    for (int i = 0; i < n_layer; ++i) {
6512
0
                        auto & layer = layers[i];
6513
6514
0
                        layer.attn_norm      = create_tensor(tn(LLM_TENSOR_ATTN_NORM,      "weight", i), {n_embd}, 0);
6515
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
6516
6517
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_head * n_rot}, 0);
6518
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_head_kv * n_rot}, 0);
6519
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_head_kv * n_rot}, 0);
6520
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_rot, n_embd}, 0);
6521
6522
0
                        layer.attn_sinks = create_tensor(tn(LLM_TENSOR_ATTN_SINKS, "weight", i), {n_head}, 0);
6523
6524
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {  n_embd, n_expert}, 0);
6525
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6526
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
6527
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6528
6529
                        // bias
6530
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_head * n_rot}, 0);
6531
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_head_kv * n_rot}, 0);
6532
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_head_kv * n_rot}, 0);
6533
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
6534
6535
0
                        layer.ffn_gate_inp_b  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "bias", i), {n_expert}, 0);
6536
0
                        layer.ffn_gate_exps_b = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "bias", i), {n_ff_exp, n_expert}, 0);
6537
0
                        layer.ffn_down_exps_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "bias", i), {  n_embd, n_expert}, 0);
6538
0
                        layer.ffn_up_exps_b   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "bias", i), {n_ff_exp, n_expert}, 0);
6539
0
                    }
6540
0
                } break;
6541
0
            case LLM_ARCH_LFM2:
6542
0
            case LLM_ARCH_LFM2MOE:
6543
0
                {
6544
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6545
6546
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM_LFM2, "weight"), {n_embd}, 0);
6547
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,           "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6548
6549
0
                    if (output == NULL) {
6550
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6551
0
                    }
6552
6553
0
                    for (int i = 0; i < n_layer; ++i) {
6554
0
                        auto & layer = layers[i];
6555
6556
0
                        const bool is_moe_layer = i >= static_cast<int>(hparams.n_layer_dense_lead);
6557
6558
                        // ffn/moe is same for transformer and conv layers
6559
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6560
0
                        if (is_moe_layer) {
6561
0
                            GGML_ASSERT(n_expert && n_expert_used);
6562
0
                            layer.ffn_gate_inp    = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i),  {n_embd, n_expert}, 0);
6563
0
                            layer.ffn_gate_exps   = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, hparams.n_ff_exp, n_expert}, 0);
6564
0
                            layer.ffn_down_exps   = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {hparams.n_ff_exp,   n_embd, n_expert}, 0);
6565
0
                            layer.ffn_up_exps     = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i),   {n_embd, hparams.n_ff_exp, n_expert}, 0);
6566
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, 0);
6567
0
                        } else {  // dense
6568
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6569
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6570
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6571
0
                        }
6572
6573
                        // for operator_norm
6574
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6575
6576
0
                        if (!hparams.is_recurrent(i)) {
6577
0
                            layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6578
0
                            layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6579
0
                            GGML_ASSERT(n_embd_v_gqa == n_embd_k_gqa);
6580
6581
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
6582
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, hparams.n_embd_k_gqa(i)}, 0);
6583
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, hparams.n_embd_v_gqa(i)}, 0);
6584
6585
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
6586
0
                        } else {
6587
0
                            layer.shortconv.conv     = create_tensor(tn(LLM_TENSOR_SHORTCONV_CONV,    "weight", i), {hparams.n_shortconv_l_cache, n_embd}, 0);
6588
0
                            layer.shortconv.in_proj  = create_tensor(tn(LLM_TENSOR_SHORTCONV_INPROJ,  "weight", i), {n_embd, 3 * n_embd}, 0);
6589
0
                            layer.shortconv.out_proj = create_tensor(tn(LLM_TENSOR_SHORTCONV_OUTPROJ, "weight", i), {n_embd, n_embd}, 0);
6590
0
                        }
6591
0
                    }
6592
6593
                    // for LFM2-ColBert-350M
6594
0
                    dense_2_out_layers = create_tensor(tn(LLM_TENSOR_DENSE_2_OUT, "weight"), {n_embd, hparams.get_n_embd_out()}, TENSOR_NOT_REQUIRED);
6595
0
                } break;
6596
0
            case LLM_ARCH_SMALLTHINKER:
6597
0
                {
6598
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
6599
6600
                    // output
6601
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
6602
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6603
6604
                    // if output is NULL, init from the input tok embed
6605
0
                    if (output == NULL) {
6606
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6607
0
                    }
6608
6609
0
                    for (int i = 0; i < n_layer; ++i) {
6610
0
                        auto & layer = layers[i];
6611
6612
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
6613
6614
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
6615
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
6616
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
6617
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
6618
6619
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
6620
6621
0
                        GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for SMALLTHINKER");
6622
0
                        GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for SMALLTHINKER");
6623
6624
                        // MoE branch
6625
0
                        const int64_t n_ff_exp = hparams.n_ff_exp;
6626
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert }, 0);
6627
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0);
6628
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, 0);
6629
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0);
6630
0
                    }
6631
0
                } break;
6632
0
            case LLM_ARCH_GROVEMOE:
6633
0
                {
6634
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6635
6636
                    // output
6637
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6638
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6639
                    // if output is NULL, init from the input tok embed
6640
0
                    if (output == NULL) {
6641
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6642
0
                    }
6643
6644
0
                    GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for GROVEMOE");
6645
0
                    GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for GROVEMOE");
6646
0
                    GGML_ASSERT(hparams.n_group_experts > 0 && "n_group_experts must be > 0 for GROVEMOE");
6647
6648
0
                    for (int i = 0; i < n_layer; ++i) {
6649
0
                        auto & layer = layers[i];
6650
6651
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6652
6653
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6654
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
6655
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
6656
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6657
6658
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6659
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6660
6661
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6662
6663
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6664
6665
                        // MoE branch
6666
0
                        const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
6667
0
                        const int64_t n_ff_chexp = hparams.n_ff_chexp ? hparams.n_ff_chexp : n_embd_head_k;
6668
0
                        const int64_t n_chunk_expert = n_expert / hparams.n_group_experts;
6669
6670
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6671
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
6672
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6673
6674
0
                        layer.ffn_gate_chexps = create_tensor(tn(LLM_TENSOR_FFN_GATE_CHEXPS, "weight", i), {  n_embd, n_ff_chexp, n_chunk_expert}, 0);
6675
0
                        layer.ffn_down_chexps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_CHEXPS, "weight", i), {n_ff_chexp,   n_embd, n_chunk_expert}, 0);
6676
0
                        layer.ffn_up_chexps   = create_tensor(tn(LLM_TENSOR_FFN_UP_CHEXPS,   "weight", i), {  n_embd, n_ff_chexp, n_chunk_expert}, 0);
6677
0
                    }
6678
0
                } break;
6679
0
            case LLM_ARCH_APERTUS:
6680
0
                {
6681
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
6682
6683
                    // output
6684
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
6685
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), { n_embd, n_vocab }, 0);
6686
6687
0
                    for (int i = 0; i < n_layer; ++i) {
6688
0
                        auto & layer = layers[i];
6689
6690
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
6691
6692
0
                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
6693
0
                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6694
0
                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6695
0
                        } else {
6696
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6697
0
                        }
6698
6699
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
6700
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), { n_embd, n_embd_gqa }, 0);
6701
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), { n_embd, n_embd_gqa }, 0);
6702
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
6703
6704
                        // optional bias tensors
6705
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), { n_embd },     TENSOR_NOT_REQUIRED);
6706
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), { n_embd_gqa }, TENSOR_NOT_REQUIRED);
6707
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), { n_embd_gqa }, TENSOR_NOT_REQUIRED);
6708
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd },     TENSOR_NOT_REQUIRED);
6709
6710
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
6711
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
6712
0
                        layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
6713
6714
                        // Q and K layernorms for Apertus
6715
0
                        layer.attn_q_norm   = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), { n_embd_head_k }, 0);
6716
0
                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias",   i), { n_embd_head_k }, TENSOR_NOT_REQUIRED);
6717
0
                        layer.attn_k_norm   = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), { n_embd_head_k }, 0);
6718
0
                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias",   i), { n_embd_head_k }, TENSOR_NOT_REQUIRED);
6719
0
                    }
6720
0
                } break;
6721
0
            case LLM_ARCH_MINIMAX_M2:
6722
0
                {
6723
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6724
6725
                    // output
6726
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6727
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6728
6729
0
                    for (int i = 0; i < n_layer; ++i) {
6730
0
                        auto & layer = layers[i];
6731
6732
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
6733
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
6734
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
6735
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
6736
6737
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6738
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k * n_head}, 0);
6739
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_k_gqa}, 0);
6740
6741
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6742
6743
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6744
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
6745
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
6746
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
6747
0
                        layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, 0);
6748
0
                    }
6749
0
                } break;
6750
0
            case LLM_ARCH_COGVLM:
6751
0
                {
6752
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6753
6754
                    // output
6755
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6756
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6757
6758
                    // if output is NULL, init from the input tok embed
6759
0
                    if (output == NULL) {
6760
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6761
0
                    }
6762
6763
0
                    for (int i = 0; i < n_layer; ++i) {
6764
0
                        auto & layer = layers[i];
6765
6766
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6767
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd_head_k * n_head * 3}, 0);
6768
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6769
6770
0
                        layer.visexp_attn_wqkv = create_tensor(tn(LLM_TENSOR_VISEXP_ATTN_QKV, "weight", i), {n_embd, n_embd_head_k * n_head * 3}, 0);
6771
0
                        layer.visexp_attn_wo = create_tensor(tn(LLM_TENSOR_VISEXP_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6772
6773
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6774
6775
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6776
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6777
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6778
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6779
6780
0
                        layer.visexp_ffn_gate = create_tensor(tn(LLM_TENSOR_VISEXP_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6781
0
                        layer.visexp_ffn_down = create_tensor(tn(LLM_TENSOR_VISEXP_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6782
0
                        layer.visexp_ffn_up   = create_tensor(tn(LLM_TENSOR_VISEXP_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6783
0
                    }
6784
0
                } break;
6785
0
            case LLM_ARCH_PANGU_EMBED:
6786
0
                {
6787
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6788
6789
                    // output
6790
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6791
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6792
6793
                    // if output is NULL, init from the input tok embed
6794
0
                    if (output == NULL) {
6795
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6796
0
                    }
6797
6798
0
                    for (int i = 0; i < n_layer; ++i) {
6799
0
                        auto & layer = layers[i];
6800
6801
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6802
6803
                        // weight tensors
6804
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6805
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6806
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6807
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6808
6809
                        // bias tensors
6810
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd_head_k * n_head}, 0);
6811
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, 0);
6812
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, 0);
6813
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
6814
6815
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6816
6817
0
                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
6818
0
                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6819
0
                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6820
0
                        } else {
6821
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6822
0
                        }
6823
6824
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6825
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6826
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6827
0
                    }
6828
0
                } break;
6829
0
            case LLM_ARCH_QWEN3NEXT:
6830
0
                {
6831
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
6832
6833
                    // output
6834
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
6835
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED);
6836
6837
                    // if output is NULL, init from the input tok embed
6838
0
                    if (output == NULL) {
6839
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, TENSOR_DUPLICATED);
6840
0
                    }
6841
6842
0
                    const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
6843
6844
                    // Calculate dimensions from hyperparameters
6845
0
                    const int64_t head_k_dim = hparams.ssm_d_state;
6846
0
                    const int64_t head_v_dim = hparams.ssm_d_state;
6847
0
                    const int64_t n_k_heads  = hparams.ssm_n_group;
6848
0
                    const int64_t n_v_heads  = hparams.ssm_dt_rank;
6849
0
                    const int64_t key_dim    = head_k_dim * n_k_heads;
6850
0
                    const int64_t value_dim  = head_v_dim * n_v_heads;
6851
0
                    const int64_t conv_dim   = key_dim * 2 + value_dim;
6852
6853
                    // Calculate projection sizes
6854
0
                    const int64_t qkvz_dim = key_dim * 2 + value_dim * 2;
6855
0
                    const int64_t ba_dim   = n_v_heads * 2;
6856
6857
0
                    for (int i = 0; i < n_layer; ++i) {
6858
0
                        auto & layer = layers[i];
6859
6860
0
                        layer.attn_norm      = create_tensor(tn(LLM_TENSOR_ATTN_NORM,      "weight", i), { n_embd }, 0);
6861
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), { n_embd }, 0);
6862
6863
0
                        if (!hparams.is_recurrent(i)) {
6864
                            // Attention layers
6865
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), { n_embd, n_embd_head_k * n_head * 2 }, 0);
6866
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), { n_embd, n_embd_k_gqa }, 0);
6867
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), { n_embd, n_embd_v_gqa }, 0);
6868
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
6869
6870
                            // Q/K normalization for attention layers
6871
0
                            layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), { n_embd_head_k }, 0);
6872
0
                            layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), { n_embd_head_k }, 0);
6873
0
                        } else {
6874
                            // Linear attention (gated delta net) specific tensors
6875
                            // Create tensors with calculated dimensions
6876
                            // note: ssm_in is used by legacy GGUF
6877
0
                            layer.ssm_in         = create_tensor(tn(LLM_TENSOR_SSM_IN,         "weight", i), { n_embd, qkvz_dim }, TENSOR_NOT_REQUIRED);
6878
0
                            layer.wqkv           = create_tensor(tn(LLM_TENSOR_ATTN_QKV,       "weight", i), { n_embd, key_dim * 2 + value_dim }, TENSOR_NOT_REQUIRED);
6879
0
                            layer.wqkv_gate      = create_tensor(tn(LLM_TENSOR_ATTN_GATE,      "weight", i), { n_embd, value_dim }, TENSOR_NOT_REQUIRED);
6880
0
                            layer.ssm_conv1d     = create_tensor(tn(LLM_TENSOR_SSM_CONV1D,     "weight", i), { hparams.ssm_d_conv, conv_dim }, 0);
6881
0
                            layer.ssm_dt         = create_tensor(tn(LLM_TENSOR_SSM_DT,         "bias",   i), { hparams.ssm_dt_rank }, 0);
6882
0
                            layer.ssm_a          = create_tensor(tn(LLM_TENSOR_SSM_A_NOSCAN,             i), { hparams.ssm_dt_rank }, 0);
6883
0
                            layer.ssm_beta_alpha = create_tensor(tn(LLM_TENSOR_SSM_BETA_ALPHA, "weight", i), { n_embd, ba_dim }, 0);
6884
0
                            layer.ssm_norm       = create_tensor(tn(LLM_TENSOR_SSM_NORM,       "weight", i), { head_v_dim }, 0);
6885
0
                            layer.ssm_out        = create_tensor(tn(LLM_TENSOR_SSM_OUT,        "weight", i), { value_dim, n_embd }, 0);
6886
0
                        }
6887
6888
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), { n_embd, n_expert }, 0);
6889
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0);
6890
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, 0);
6891
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), { n_embd, n_ff_exp, n_expert }, 0);
6892
6893
                        // Shared experts
6894
0
                        layer.ffn_gate_inp_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), { n_embd }, 0);
6895
0
                        layer.ffn_gate_shexp     = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP,     "weight", i), { n_embd, hparams.n_ff_shexp }, 0);
6896
0
                        layer.ffn_up_shexp       = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,       "weight", i), { n_embd, hparams.n_ff_shexp }, 0);
6897
0
                        layer.ffn_down_shexp     = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP,     "weight", i), { hparams.n_ff_shexp, n_embd }, 0);
6898
0
                    }
6899
0
                } break;
6900
0
            case LLM_ARCH_MIMO2:
6901
0
                {
6902
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6903
6904
                    // output
6905
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6906
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6907
6908
0
                    for (int i = 0; i < n_layer; ++i) {
6909
0
                        auto & layer = layers[i];
6910
0
                        uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i);
6911
0
                        uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i);
6912
0
                        uint32_t n_head = hparams.n_head(i);
6913
6914
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
6915
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_k_gqa }, 0);
6916
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_v_gqa }, 0);
6917
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_v * n_head, n_embd }, 0);
6918
6919
0
                        layer.attn_norm  = create_tensor(tn(LLM_TENSOR_ATTN_NORM,  "weight", i), {n_embd}, 0);
6920
0
                        layer.attn_sinks = create_tensor(tn(LLM_TENSOR_ATTN_SINKS, "weight", i), {n_head}, TENSOR_NOT_REQUIRED);
6921
6922
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6923
6924
                        // non-MoE branch
6925
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
6926
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, TENSOR_NOT_REQUIRED);
6927
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
6928
6929
                        // MoE branch
6930
0
                        int64_t n_ff_exp = hparams.n_ff_exp;
6931
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, TENSOR_NOT_REQUIRED);
6932
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff_exp,   n_expert}, TENSOR_NOT_REQUIRED);
6933
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, TENSOR_NOT_REQUIRED);
6934
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff_exp,   n_expert}, TENSOR_NOT_REQUIRED);
6935
0
                        layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
6936
0
                    }
6937
0
                } break;
6938
0
            case LLM_ARCH_MAINCODER:
6939
0
                {
6940
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6941
6942
                    // output
6943
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6944
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6945
                    // if output is NULL, init from the input tok embed
6946
0
                    if (output == NULL) {
6947
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6948
0
                    }
6949
6950
0
                    for (int i = 0; i < n_layer; ++i) {
6951
0
                        auto & layer = layers[i];
6952
6953
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6954
6955
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6956
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
6957
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
6958
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6959
6960
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6961
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6962
6963
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6964
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6965
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6966
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6967
0
                    }
6968
0
                } break;
6969
0
            default:
6970
0
                throw std::runtime_error("unknown architecture");
6971
0
        }
6972
6973
0
        if (n_moved_tensors > 0) {
6974
0
            LLAMA_LOG_DEBUG("%s: tensor '%s' (%s) (and %d others) cannot be used with preferred buffer type %s, using %s instead\n",
6975
0
                __func__, first_moved_tensor->name, ggml_type_name(first_moved_tensor->type), n_moved_tensors - 1,
6976
0
                ggml_backend_buft_name(first_moved_from_buft), ggml_backend_buft_name(first_moved_to_buft));
6977
0
        }
6978
0
    }
6979
6980
0
    ml.done_getting_tensors();
6981
6982
0
    ml.init_mappings(true, use_mlock ? &pimpl->mlock_mmaps : nullptr);
6983
0
    pimpl->mappings.reserve(ml.mappings.size());
6984
6985
    // create the backend buffers
6986
0
    std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_buf_maps;
6987
0
    ctx_buf_maps.reserve(ctx_map.size());
6988
6989
    // Ensure we have enough capacity for the maximum backend buffer we will potentially create
6990
0
    const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
6991
0
    pimpl->ctxs_bufs.reserve(n_max_backend_buffer);
6992
6993
0
    for (auto & [buft, ctx_ptr] : ctx_map) {
6994
0
        ggml_context * ctx = ctx_ptr.get();
6995
6996
        // skip contexts without tensors
6997
0
        if (ggml_get_first_tensor(ctx) == nullptr) {
6998
0
            continue;
6999
0
        }
7000
7001
0
        llama_buf_map buf_map;
7002
0
        buf_map.reserve(n_max_backend_buffer);
7003
7004
        // check if it is possible to use buffer_from_host_ptr with this buffer type
7005
0
        ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
7006
0
        if (!dev) {
7007
            // FIXME: workaround for CPU backend buft having a NULL device
7008
0
            dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
7009
0
            if (!dev) {
7010
0
                throw std::runtime_error(format("%s: no CPU backend found", __func__));
7011
0
            }
7012
0
        }
7013
0
        ggml_backend_dev_props props;
7014
0
        ggml_backend_dev_get_props(dev, &props);
7015
0
        bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
7016
0
        bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev);
7017
7018
0
        std::vector<ggml_backend_buffer_ptr> bufs;
7019
0
        if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) {
7020
0
            GGML_ASSERT(!ml.no_alloc);
7021
0
            for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
7022
                // only the mmap region containing the tensors in the model is mapped to the backend buffer
7023
                // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer,
7024
                //     then we could just use metal for all layers
7025
                // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
7026
0
                void * addr = nullptr;
7027
0
                size_t first, last; // NOLINT
7028
0
                ml.get_mapping_range(&first, &last, &addr, idx, ctx);
7029
0
                if (first >= last) {
7030
0
                    continue;
7031
0
                }
7032
0
                const size_t max_size = ggml_get_max_tensor_size(ctx);
7033
0
                ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
7034
0
                if (buf == nullptr) {
7035
0
                    throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
7036
0
                }
7037
0
                bufs.emplace_back(buf);
7038
0
                buf_map.emplace(idx, buf);
7039
0
            }
7040
0
        } else {
7041
0
            ggml_backend_buffer_t buf;
7042
0
            if (ml.no_alloc) {
7043
0
                buf = ggml_backend_buft_alloc_buffer(buft, /*size =*/ 0); // dummy buffer
7044
0
                for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
7045
0
                    t->buffer = buf; // set dummy buffer for weights so that the backend scheduler won't try to allocate them
7046
0
                }
7047
0
            } else {
7048
0
                buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); // real buffer
7049
0
            }
7050
0
            if (buf == nullptr) {
7051
0
                throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
7052
0
            }
7053
0
            if (use_mlock && ggml_backend_buffer_is_host(buf)) {
7054
0
                pimpl->mlock_bufs.emplace_back(new llama_mlock);
7055
0
                auto & mlock_buf = pimpl->mlock_bufs.back();
7056
0
                mlock_buf->init   (ggml_backend_buffer_get_base(buf));
7057
0
                mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
7058
0
            }
7059
0
            bufs.emplace_back(buf);
7060
0
            for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
7061
0
                buf_map.emplace(idx, buf);
7062
0
            }
7063
0
        }
7064
0
        pimpl->ctxs_bufs.emplace_back(std::move(ctx_ptr), std::move(bufs));
7065
7066
0
        for (auto & buf : buf_map) {
7067
            // indicate that this buffer contains weights
7068
            // this is used by ggml_backend_sched to improve op scheduling: ops that use a weight are preferably scheduled to the backend that contains the weight
7069
0
            ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
7070
0
        }
7071
7072
0
        ctx_buf_maps.emplace_back(ctx, buf_map);
7073
0
    }
7074
7075
0
    if (llama_supports_gpu_offload()) {
7076
0
        const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
7077
7078
0
        int n_repeating = n_gpu;
7079
0
        if (n_repeating > 0) {
7080
0
            LLAMA_LOG_INFO("%s: offloading output layer to GPU\n", __func__);
7081
0
            n_repeating--;
7082
0
        }
7083
0
        LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_repeating);
7084
7085
0
        const int max_backend_supported_layers = hparams.n_layer + 1;
7086
0
        const int max_offloadable_layers       = hparams.n_layer + 1;
7087
7088
0
        LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
7089
0
    }
7090
7091
    // print memory requirements per buffer type
7092
0
    for (auto & [_, bufs] : pimpl->ctxs_bufs) {
7093
0
        for (auto & buf: bufs) {
7094
0
            LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n",
7095
0
                __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0);
7096
0
        }
7097
0
    }
7098
7099
    // populate tensors_by_name
7100
0
    for (auto & [ctx, _] : pimpl->ctxs_bufs) {
7101
0
        for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) {
7102
0
            tensors_by_name.emplace_back(ggml_get_name(cur), cur);
7103
0
        }
7104
0
    }
7105
7106
0
    if (ml.no_alloc) {
7107
0
        return true;
7108
0
    }
7109
7110
    // load tensor data
7111
0
    for (auto & [ctx, buf_map] : ctx_buf_maps) {
7112
0
        if (!ml.load_all_data(ctx, buf_map, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
7113
0
            return false;
7114
0
        }
7115
0
    }
7116
7117
0
    if (use_mmap_buffer) {
7118
0
        for (auto & mapping : ml.mappings) {
7119
0
            pimpl->mappings.emplace_back(std::move(mapping));
7120
0
        }
7121
0
    }
7122
7123
0
    return true;
7124
0
}
7125
7126
0
std::string llama_model::arch_name() const {
7127
0
    return llm_arch_name(arch);
7128
0
}
7129
7130
0
std::string llama_model::type_name() const {
7131
0
    return llm_type_name(type);
7132
0
}
7133
7134
0
std::string llama_model::desc() const {
7135
0
    return pimpl->desc_str;
7136
0
}
7137
7138
0
size_t llama_model::size() const {
7139
0
    return pimpl->n_bytes;
7140
0
}
7141
7142
0
size_t llama_model::n_tensors() const {
7143
0
    return tensors_by_name.size();
7144
0
}
7145
7146
0
size_t llama_model::n_devices() const {
7147
0
    return devices.size();
7148
0
}
7149
7150
0
uint32_t llama_model::n_gpu_layers() const {
7151
0
    return params.n_gpu_layers >= 0 ? params.n_gpu_layers : hparams.n_layer + 1;
7152
0
}
7153
7154
0
llama_split_mode llama_model::split_mode() const {
7155
0
    return params.split_mode;
7156
0
}
7157
7158
0
std::map<ggml_backend_buffer_type_t, size_t> llama_model::memory_breakdown() const {
7159
0
    std::map<ggml_backend_buffer_type_t, size_t> ret;
7160
0
    for (const auto & [ctx, bufs] : pimpl->ctxs_bufs) {
7161
0
        if (hparams.no_alloc) {
7162
0
            GGML_ASSERT(bufs.size() == 1);
7163
0
            ggml_backend_buffer_t buf = bufs[0].get();
7164
0
            GGML_ASSERT(ggml_backend_buffer_get_base(buf) == nullptr);
7165
0
            ggml_backend_buffer_type_t buft = ggml_backend_buffer_get_type(buf);
7166
0
            ret[buft] += ggml_backend_alloc_ctx_tensors_from_buft_size(ctx.get(), buft);
7167
0
        } else {
7168
0
            for (const auto & buf : bufs) {
7169
                // GGML_ASSERT(ggml_backend_buffer_get_base(buf.get()) != nullptr); // multi_buffer does not have a defined base
7170
0
                ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get());
7171
0
            }
7172
0
        }
7173
0
    }
7174
0
    return ret;
7175
0
}
7176
7177
0
uint64_t llama_model::n_elements() const {
7178
0
    return pimpl->n_elements;
7179
0
}
7180
7181
0
void llama_model::print_info() const {
7182
0
    const std::string rope_scaling_type = llama_rope_scaling_type_name(hparams.rope_scaling_type_train);
7183
7184
0
    auto print_f = [](const std::function<uint32_t(uint32_t)> & f, uint32_t n) {
7185
0
        bool is_var = false;
7186
7187
0
        std::vector<uint32_t> v;
7188
0
        for (uint32_t i = 0; i < n; ++i) {
7189
0
            v.push_back(f(i));
7190
0
            if (v[i] != v[0]) {
7191
0
                is_var = true;
7192
0
            }
7193
0
        }
7194
7195
0
        std::stringstream ss;
7196
7197
0
        if (is_var) {
7198
0
            ss << "[";
7199
0
            for (uint32_t i = 0; i < n; ++i) {
7200
0
                ss << v[i];
7201
0
                if (i < n - 1) {
7202
0
                    ss << ", ";
7203
0
                }
7204
0
            }
7205
0
            ss << "]";
7206
0
        } else {
7207
0
            ss << v[0];
7208
0
        }
7209
7210
0
        return ss.str();
7211
0
    };
7212
7213
    // hparams
7214
0
    LLAMA_LOG_INFO("%s: arch                  = %s\n",     __func__, arch_name().c_str());
7215
0
    LLAMA_LOG_INFO("%s: vocab_only            = %d\n",     __func__, hparams.vocab_only);
7216
0
    LLAMA_LOG_INFO("%s: no_alloc              = %d\n",     __func__, hparams.no_alloc);
7217
7218
0
    if (!hparams.vocab_only) {
7219
0
        LLAMA_LOG_INFO("%s: n_ctx_train           = %u\n",     __func__, hparams.n_ctx_train);
7220
0
        LLAMA_LOG_INFO("%s: n_embd                = %u\n",     __func__, hparams.n_embd);
7221
0
        LLAMA_LOG_INFO("%s: n_embd_inp            = %u\n",     __func__, hparams.n_embd_inp());
7222
0
        LLAMA_LOG_INFO("%s: n_layer               = %u\n",     __func__, hparams.n_layer);
7223
0
        LLAMA_LOG_INFO("%s: n_head                = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head(il);    }, hparams.n_layer).c_str());
7224
0
        LLAMA_LOG_INFO("%s: n_head_kv             = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
7225
0
        LLAMA_LOG_INFO("%s: n_rot                 = %u\n",     __func__, hparams.n_rot);
7226
0
        LLAMA_LOG_INFO("%s: n_swa                 = %u\n",     __func__, hparams.n_swa);
7227
0
        LLAMA_LOG_INFO("%s: is_swa_any            = %u\n",     __func__, hparams.is_swa_any());
7228
0
        LLAMA_LOG_INFO("%s: n_embd_head_k         = %u\n",     __func__, hparams.n_embd_head_k);
7229
0
        LLAMA_LOG_INFO("%s: n_embd_head_v         = %u\n",     __func__, hparams.n_embd_head_v);
7230
0
        LLAMA_LOG_INFO("%s: n_gqa                 = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il);        }, hparams.n_layer).c_str());
7231
0
        LLAMA_LOG_INFO("%s: n_embd_k_gqa          = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
7232
0
        LLAMA_LOG_INFO("%s: n_embd_v_gqa          = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
7233
0
        LLAMA_LOG_INFO("%s: f_norm_eps            = %.1e\n",   __func__, hparams.f_norm_eps);
7234
0
        LLAMA_LOG_INFO("%s: f_norm_rms_eps        = %.1e\n",   __func__, hparams.f_norm_rms_eps);
7235
0
        LLAMA_LOG_INFO("%s: f_clamp_kqv           = %.1e\n",   __func__, hparams.f_clamp_kqv);
7236
0
        LLAMA_LOG_INFO("%s: f_max_alibi_bias      = %.1e\n",   __func__, hparams.f_max_alibi_bias);
7237
0
        LLAMA_LOG_INFO("%s: f_logit_scale         = %.1e\n",   __func__, hparams.f_logit_scale);
7238
0
        LLAMA_LOG_INFO("%s: f_attn_scale          = %.1e\n",   __func__, hparams.f_attention_scale);
7239
0
        LLAMA_LOG_INFO("%s: n_ff                  = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
7240
0
        LLAMA_LOG_INFO("%s: n_expert              = %u\n",     __func__, hparams.n_expert);
7241
0
        LLAMA_LOG_INFO("%s: n_expert_used         = %u\n",     __func__, hparams.n_expert_used);
7242
0
        LLAMA_LOG_INFO("%s: n_expert_groups       = %d\n",     __func__, hparams.n_expert_groups);
7243
0
        LLAMA_LOG_INFO("%s: n_group_used          = %d\n",     __func__, hparams.n_group_used);
7244
0
        LLAMA_LOG_INFO("%s: causal attn           = %d\n",     __func__, hparams.causal_attn);
7245
0
        LLAMA_LOG_INFO("%s: pooling type          = %d\n",     __func__, hparams.pooling_type);
7246
0
        LLAMA_LOG_INFO("%s: rope type             = %d\n",     __func__, hparams.rope_type);
7247
0
        LLAMA_LOG_INFO("%s: rope scaling          = %s\n",     __func__, rope_scaling_type.c_str());
7248
0
        LLAMA_LOG_INFO("%s: freq_base_train       = %.1f\n",   __func__, hparams.rope_freq_base_train);
7249
0
        LLAMA_LOG_INFO("%s: freq_scale_train      = %g\n",     __func__, hparams.rope_freq_scale_train);
7250
0
        if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
7251
0
            LLAMA_LOG_INFO("%s: freq_base_swa         = %.1f\n",   __func__, hparams.rope_freq_base_train_swa);
7252
0
            LLAMA_LOG_INFO("%s: freq_scale_swa        = %g\n",     __func__, hparams.rope_freq_scale_train_swa);
7253
0
        }
7254
0
        LLAMA_LOG_INFO("%s: n_ctx_orig_yarn       = %u\n",     __func__, hparams.n_ctx_orig_yarn);
7255
0
        LLAMA_LOG_INFO("%s: rope_yarn_log_mul     = %.4f\n",   __func__, hparams.rope_yarn_log_mul);
7256
0
        LLAMA_LOG_INFO("%s: rope_finetuned        = %s\n",     __func__, hparams.rope_finetuned ? "yes" : "unknown");
7257
        // MRoPE (Multi-axis Rotary Position Embedding) sections
7258
0
        if (const auto & s = hparams.rope_sections; s[0] || s[1] || s[2] || s[3]) {
7259
0
            LLAMA_LOG_INFO("%s: mrope sections        = [%d, %d, %d, %d]\n", __func__, s[0], s[1], s[2], s[3]);
7260
0
        }
7261
0
        if (!classifier_labels.empty()) {
7262
0
            LLAMA_LOG_INFO("%s: n_cls_out             = %u\n", __func__, hparams.n_cls_out);
7263
7264
0
            size_t i = 0;
7265
0
            for (auto label : classifier_labels) {
7266
0
                LLAMA_LOG_INFO("%s: cls_label[%2zu]         = %s\n", __func__, i++, label.c_str());
7267
0
            }
7268
0
        }
7269
0
    }
7270
7271
0
    if (arch == LLM_ARCH_MAMBA ||
7272
0
        arch == LLM_ARCH_MAMBA2 ||
7273
0
        arch == LLM_ARCH_JAMBA ||
7274
0
        arch == LLM_ARCH_FALCON_H1 ||
7275
0
        arch == LLM_ARCH_PLAMO2 ||
7276
0
        arch == LLM_ARCH_GRANITE_HYBRID ||
7277
0
        arch == LLM_ARCH_QWEN3NEXT ||
7278
0
        arch == LLM_ARCH_NEMOTRON_H ||
7279
0
        arch == LLM_ARCH_NEMOTRON_H_MOE) {
7280
0
        LLAMA_LOG_INFO("%s: ssm_d_conv            = %u\n",     __func__, hparams.ssm_d_conv);
7281
0
        LLAMA_LOG_INFO("%s: ssm_d_inner           = %u\n",     __func__, hparams.ssm_d_inner);
7282
0
        LLAMA_LOG_INFO("%s: ssm_d_state           = %u\n",     __func__, hparams.ssm_d_state);
7283
0
        LLAMA_LOG_INFO("%s: ssm_dt_rank           = %u\n",     __func__, hparams.ssm_dt_rank);
7284
0
        LLAMA_LOG_INFO("%s: ssm_n_group           = %u\n",     __func__, hparams.ssm_n_group);
7285
0
        LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms        = %d\n",     __func__, hparams.ssm_dt_b_c_rms);
7286
0
    }
7287
7288
0
    LLAMA_LOG_INFO("%s: model type            = %s\n",     __func__, type_name().c_str());
7289
0
    if (pimpl->n_elements >= 1e12) {
7290
0
        LLAMA_LOG_INFO("%s: model params          = %.2f T\n", __func__, pimpl->n_elements*1e-12);
7291
0
    } else if (pimpl->n_elements >= 1e9) {
7292
0
        LLAMA_LOG_INFO("%s: model params          = %.2f B\n", __func__, pimpl->n_elements*1e-9);
7293
0
    } else if (pimpl->n_elements >= 1e6) {
7294
0
        LLAMA_LOG_INFO("%s: model params          = %.2f M\n", __func__, pimpl->n_elements*1e-6);
7295
0
    } else {
7296
0
        LLAMA_LOG_INFO("%s: model params          = %.2f K\n", __func__, pimpl->n_elements*1e-3);
7297
0
    }
7298
7299
    // general kv
7300
0
    LLAMA_LOG_INFO("%s: general.name          = %s\n",    __func__, name.c_str());
7301
7302
0
    if (arch == LLM_ARCH_DEEPSEEK) {
7303
0
        LLAMA_LOG_INFO("%s: n_layer_dense_lead    = %d\n",     __func__, hparams.n_layer_dense_lead);
7304
0
        LLAMA_LOG_INFO("%s: n_ff_exp              = %d\n",     __func__, hparams.n_ff_exp);
7305
0
        LLAMA_LOG_INFO("%s: n_expert_shared       = %d\n",     __func__, hparams.n_expert_shared);
7306
0
        LLAMA_LOG_INFO("%s: expert_weights_scale  = %.1f\n",   __func__, hparams.expert_weights_scale);
7307
0
    }
7308
7309
0
    if (arch == LLM_ARCH_DEEPSEEK2) {
7310
0
        LLAMA_LOG_INFO("%s: n_layer_dense_lead    = %d\n",     __func__, hparams.n_layer_dense_lead);
7311
0
        LLAMA_LOG_INFO("%s: n_lora_q              = %d\n",     __func__, hparams.n_lora_q);
7312
0
        LLAMA_LOG_INFO("%s: n_lora_kv             = %d\n",     __func__, hparams.n_lora_kv);
7313
0
        LLAMA_LOG_INFO("%s: n_embd_head_k_mla     = %d\n",     __func__, hparams.n_embd_head_k_mla);
7314
0
        LLAMA_LOG_INFO("%s: n_embd_head_v_mla     = %d\n",     __func__, hparams.n_embd_head_v_mla);
7315
0
        LLAMA_LOG_INFO("%s: n_ff_exp              = %d\n",     __func__, hparams.n_ff_exp);
7316
0
        LLAMA_LOG_INFO("%s: n_expert_shared       = %d\n",     __func__, hparams.n_expert_shared);
7317
0
        LLAMA_LOG_INFO("%s: expert_weights_scale  = %.1f\n",   __func__, hparams.expert_weights_scale);
7318
0
        LLAMA_LOG_INFO("%s: expert_weights_norm   = %d\n",     __func__, hparams.expert_weights_norm);
7319
0
        LLAMA_LOG_INFO("%s: expert_gating_func    = %s\n",     __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func));
7320
0
    }
7321
7322
0
    if (arch == LLM_ARCH_QWEN2MOE) {
7323
0
        LLAMA_LOG_INFO("%s: n_ff_exp              = %d\n",     __func__, hparams.n_ff_exp);
7324
0
        LLAMA_LOG_INFO("%s: n_ff_shexp            = %d\n",     __func__, hparams.n_ff_shexp);
7325
0
    }
7326
7327
0
    if (arch == LLM_ARCH_QWEN3MOE || arch == LLM_ARCH_OPENAI_MOE || arch == LLM_ARCH_QWEN3VLMOE || arch == LLM_ARCH_RND1) {
7328
0
        LLAMA_LOG_INFO("%s: n_ff_exp              = %d\n",     __func__, hparams.n_ff_exp);
7329
0
    }
7330
7331
0
    if (arch == LLM_ARCH_MINICPM ||
7332
0
        arch == LLM_ARCH_GRANITE ||
7333
0
        arch == LLM_ARCH_GRANITE_MOE ||
7334
0
        arch == LLM_ARCH_GRANITE_HYBRID ||
7335
0
        arch == LLM_ARCH_NEMOTRON_H_MOE) {
7336
0
        LLAMA_LOG_INFO("%s: f_embedding_scale     = %f\n", __func__, hparams.f_embedding_scale);
7337
0
        LLAMA_LOG_INFO("%s: f_residual_scale      = %f\n", __func__, hparams.f_residual_scale);
7338
0
        LLAMA_LOG_INFO("%s: f_attention_scale     = %f\n", __func__, hparams.f_attention_scale);
7339
0
        LLAMA_LOG_INFO("%s: n_ff_shexp            = %d\n", __func__, hparams.n_ff_shexp);
7340
0
    }
7341
7342
0
    if (arch == LLM_ARCH_BAILINGMOE) {
7343
0
        LLAMA_LOG_INFO("%s: n_layer_dense_lead    = %d\n",     __func__, hparams.n_layer_dense_lead);
7344
0
        LLAMA_LOG_INFO("%s: n_ff_exp              = %d\n",     __func__, hparams.n_ff_exp);
7345
0
        LLAMA_LOG_INFO("%s: n_expert_shared       = %d\n",     __func__, hparams.n_expert_shared);
7346
0
        LLAMA_LOG_INFO("%s: expert_weights_scale  = %.1f\n",   __func__, hparams.expert_weights_scale);
7347
0
        LLAMA_LOG_INFO("%s: expert_weights_norm   = %d\n",     __func__, hparams.expert_weights_norm);
7348
0
    }
7349
7350
0
    if (arch == LLM_ARCH_BAILINGMOE2) {
7351
0
        LLAMA_LOG_INFO("%s: n_layer_dense_lead    = %d\n",     __func__, hparams.n_layer_dense_lead);
7352
0
        LLAMA_LOG_INFO("%s: n_ff_exp              = %d\n",     __func__, hparams.n_ff_exp);
7353
0
        LLAMA_LOG_INFO("%s: n_ff_shexp            = %d\n",     __func__, hparams.n_ff_shexp);
7354
0
        LLAMA_LOG_INFO("%s: n_expert_shared       = %d\n",     __func__, hparams.n_expert_shared);
7355
0
        LLAMA_LOG_INFO("%s: expert_weights_scale  = %.1f\n",   __func__, hparams.expert_weights_scale);
7356
0
        LLAMA_LOG_INFO("%s: expert_weights_norm   = %d\n",     __func__, hparams.expert_weights_norm);
7357
0
        LLAMA_LOG_INFO("%s: expert_gating_func    = %s\n",     __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func));
7358
0
        LLAMA_LOG_INFO("%s: nextn_predict_layers  = %d\n",     __func__, hparams.nextn_predict_layers);
7359
0
    }
7360
7361
0
    if (arch == LLM_ARCH_SMALLTHINKER || arch == LLM_ARCH_LFM2MOE) {
7362
0
        LLAMA_LOG_INFO("%s: n_ff_exp              = %d\n",     __func__, hparams.n_ff_exp);
7363
0
        LLAMA_LOG_INFO("%s: expert_gating_func    = %s\n",     __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func));
7364
0
    }
7365
7366
0
    if (arch == LLM_ARCH_GROVEMOE) {
7367
0
        LLAMA_LOG_INFO("%s: n_ff_exp              = %d\n",     __func__, hparams.n_ff_exp);
7368
0
        LLAMA_LOG_INFO("%s: n_ff_chexp            = %d\n",     __func__, hparams.n_ff_chexp);
7369
0
        LLAMA_LOG_INFO("%s: n_group_experts       = %d\n",     __func__, hparams.n_group_experts);
7370
0
        LLAMA_LOG_INFO("%s: expert_group_scale    = %.2f\n",   __func__, hparams.expert_group_scale);
7371
0
    }
7372
7373
0
    vocab.print_info();
7374
0
}
7375
7376
0
ggml_backend_dev_t llama_model::dev_layer(int il) const {
7377
0
    return pimpl->dev_layer.at(il).dev;
7378
0
}
7379
7380
0
ggml_backend_dev_t llama_model::dev_output() const {
7381
0
    return pimpl->dev_output.dev;
7382
0
}
7383
7384
template<typename F>
7385
0
static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) {
7386
0
    ggml_init_params params = {
7387
0
        /*.mem_size   =*/ ggml_tensor_overhead()*8,
7388
0
        /*.mem_buffer =*/ NULL,
7389
0
        /*.no_alloc   =*/ true,
7390
0
    };
7391
7392
0
    ggml_context_ptr ctx { ggml_init(params) };
7393
0
    if (!ctx) {
7394
0
        throw std::runtime_error(format("failed to create ggml context"));
7395
0
    }
7396
7397
0
    ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) };
7398
0
    ggml_tensor * op_tensor = fn(ctx.get());
7399
0
    for (int i = 0; i < GGML_MAX_SRC; i++) {
7400
0
        if (op_tensor->src[i] != nullptr) {
7401
0
            assert(op_tensor->src[i]->buffer == nullptr);
7402
0
            op_tensor->src[i]->buffer = buf.get();
7403
0
        }
7404
0
    }
7405
7406
0
    bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
7407
7408
0
    return op_supported;
7409
0
}
7410
7411
template<typename F>
7412
0
static ggml_backend_buffer_type_t select_buft(const buft_list_t & buft_list, const F & fn) {
7413
0
    for (const auto & cur : buft_list) {
7414
0
        ggml_backend_dev_t cur_dev = cur.first;
7415
0
        ggml_backend_buffer_type_t cur_buft = cur.second;
7416
0
        if (buft_supported(cur_buft, cur_dev, fn)) {
7417
0
            return cur_buft;
7418
0
        }
7419
0
    }
7420
7421
0
    throw std::runtime_error(format("no suitable buffer type found"));
7422
0
}
7423
7424
0
ggml_backend_buffer_type_t llama_model::select_buft(int il) const {
7425
0
    return ::select_buft(
7426
0
            *pimpl->dev_layer.at(il).buft_list,
7427
0
            [&](ggml_context * ctx) {
7428
0
                ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
7429
0
                ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
7430
0
                return ggml_add(ctx, cur, layer_dir);
7431
0
            });
7432
0
}
7433
7434
0
bool llama_model::has_tensor_overrides() const {
7435
0
    return pimpl->has_tensor_overrides;
7436
0
}
7437
7438
0
const ggml_tensor * llama_model::get_tensor(const char * name) const {
7439
0
    auto it = std::find_if(tensors_by_name.begin(), tensors_by_name.end(),
7440
0
            [name](const std::pair<std::string, ggml_tensor *> & it) {
7441
0
                return it.first == name;
7442
0
            });
7443
0
    if (it == tensors_by_name.end()) {
7444
0
        return nullptr;
7445
0
    }
7446
7447
0
    return it->second;
7448
0
}
7449
7450
0
float llama_model::get_rope_freq_base (const llama_cparams & cparams, int il) const {
7451
0
    return hparams.is_swa(il) ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base;
7452
0
}
7453
7454
0
float llama_model::get_rope_freq_scale(const llama_cparams & cparams, int il) const {
7455
0
    return hparams.is_swa(il) ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale;
7456
0
}
7457
7458
0
ggml_tensor * llama_model::get_rope_factors(const llama_cparams & cparams, int il) const {
7459
0
    const uint32_t n_ctx_seq = cparams.n_ctx_seq;
7460
7461
    // choose long/short freq factors based on the context size
7462
0
    if (layers[il].rope_freqs != nullptr) {
7463
0
        return layers[il].rope_freqs;
7464
0
    }
7465
7466
0
    if (n_ctx_seq > hparams.n_ctx_orig_yarn) {
7467
0
        return layers[il].rope_long;
7468
0
    }
7469
7470
0
    return layers[il].rope_short;
7471
0
}
7472
7473
0
llama_memory_i * llama_model::create_memory(const llama_memory_params & params, const llama_cparams & cparams) const {
7474
0
    llama_memory_i * res;
7475
7476
0
    switch (arch) {
7477
        // Models that need specific instantiation should be handled in the
7478
        // switch statement
7479
0
        case LLM_ARCH_BERT:
7480
0
        case LLM_ARCH_JINA_BERT_V2:
7481
0
        case LLM_ARCH_JINA_BERT_V3:
7482
0
        case LLM_ARCH_NOMIC_BERT:
7483
0
        case LLM_ARCH_NOMIC_BERT_MOE:
7484
0
        case LLM_ARCH_NEO_BERT:
7485
0
        case LLM_ARCH_WAVTOKENIZER_DEC:
7486
0
        case LLM_ARCH_MODERN_BERT:
7487
0
        case LLM_ARCH_GEMMA_EMBEDDING:
7488
0
        case LLM_ARCH_DREAM:
7489
0
        case LLM_ARCH_LLADA:
7490
0
        case LLM_ARCH_LLADA_MOE:
7491
0
        case LLM_ARCH_RND1:
7492
0
            {
7493
0
                res = nullptr;
7494
0
            } break;
7495
        // Models that need standard caching should rely on recurrent/hybrid
7496
        // checks
7497
0
        default:
7498
0
            {
7499
0
                if (llm_arch_is_recurrent(arch)) {
7500
0
                    res = new llama_memory_recurrent(
7501
0
                            *this,
7502
0
                            GGML_TYPE_F32,
7503
0
                            GGML_TYPE_F32,
7504
0
                            cparams.offload_kqv,
7505
0
                            std::max((uint32_t) 1, cparams.n_seq_max),
7506
0
                            cparams.n_seq_max,
7507
0
                            nullptr);
7508
0
                } else if (llm_arch_is_hybrid(arch)) {
7509
7510
                    // The main difference between hybrid architectures is the
7511
                    // layer filters, so pick the right one here
7512
0
                    llama_memory_hybrid::layer_filter_cb filter_attn = nullptr;
7513
0
                    llama_memory_hybrid::layer_filter_cb filter_recr = nullptr;
7514
0
                    if (arch == LLM_ARCH_FALCON_H1) {
7515
0
                        filter_attn = [&](int32_t) { return true; };
7516
0
                        filter_recr = [&](int32_t) { return true; };
7517
0
                    } else if (arch == LLM_ARCH_NEMOTRON_H || arch == LLM_ARCH_NEMOTRON_H_MOE) {
7518
0
                        filter_attn = [&](int32_t il) {
7519
0
                            return !hparams.is_recurrent(il) && hparams.n_ff(il) == 0;
7520
0
                        };
7521
0
                        filter_recr = [&](int32_t il) {
7522
0
                            return hparams.is_recurrent(il) && hparams.n_ff(il) == 0;
7523
0
                        };
7524
0
                    }
7525
7526
0
                    res = new llama_memory_hybrid(
7527
0
                        /* model             */ *this,
7528
0
                        /* attn_type_k       */ params.type_k,
7529
0
                        /* attn_type_v       */ params.type_v,
7530
0
                        /* attn_v_trans      */ !cparams.flash_attn,
7531
0
                        /* attn_kv_size      */ cparams.n_ctx,
7532
0
                        /* attn_n_pad        */ 1,
7533
0
                        /* attn_n_swa        */ hparams.n_swa,
7534
0
                        /* attn_swa_type     */ hparams.swa_type,
7535
0
                        /* recurrent_type_k  */ GGML_TYPE_F32,
7536
0
                        /* recurrent_type_v  */ GGML_TYPE_F32,
7537
0
                        /* recurrent_kv_size */ std::max((uint32_t) 1, cparams.n_seq_max),
7538
0
                        /* n_seq_max         */ cparams.n_seq_max,
7539
0
                        /* offload           */ cparams.offload_kqv,
7540
0
                        /* unified           */ cparams.kv_unified,
7541
0
                        /* filter_attn       */ std::move(filter_attn),
7542
0
                        /* filter_recr       */ std::move(filter_recr));
7543
0
                } else {
7544
0
                    llama_memory_i::layer_reuse_cb reuse = nullptr;
7545
7546
0
                    if (arch == LLM_ARCH_GEMMA3N) {
7547
0
                        reuse = [&](int32_t il) {
7548
0
                            if (il >= (int32_t) hparams.n_layer_kv_from_start) {
7549
0
                                return (int32_t) hparams.n_layer_kv_from_start - (hparams.is_swa(il) ? 2 : 1);
7550
0
                            }
7551
7552
0
                            return -1;
7553
0
                        };
7554
0
                    }
7555
7556
0
                    if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
7557
0
                        GGML_ASSERT(hparams.is_swa_any());
7558
7559
0
                        res = new llama_kv_cache_iswa(
7560
0
                                *this,
7561
0
                                params.type_k,
7562
0
                                params.type_v,
7563
0
                                !cparams.flash_attn,
7564
0
                                cparams.offload_kqv,
7565
0
                                params.swa_full,
7566
0
                                cparams.kv_unified,
7567
0
                                cparams.n_ctx_seq,
7568
0
                                cparams.n_seq_max,
7569
0
                                cparams.n_ubatch,
7570
0
                                1,
7571
0
                                nullptr,
7572
0
                                reuse);
7573
0
                    } else {
7574
0
                        GGML_ASSERT(!hparams.is_swa_any());
7575
7576
0
                        res = new llama_kv_cache(
7577
0
                                *this,
7578
0
                                params.type_k,
7579
0
                                params.type_v,
7580
0
                                !cparams.flash_attn,
7581
0
                                cparams.offload_kqv,
7582
0
                                cparams.kv_unified,
7583
0
                                cparams.n_ctx_seq,
7584
0
                                cparams.n_seq_max,
7585
0
                                1,
7586
0
                                hparams.n_swa,
7587
0
                                hparams.swa_type,
7588
0
                                nullptr,
7589
0
                                nullptr);
7590
0
                    }
7591
0
                }
7592
0
            }
7593
0
    }
7594
7595
0
    return res;
7596
0
}
7597
7598
0
ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
7599
0
    std::unique_ptr<llm_graph_context> llm;
7600
7601
0
    switch (arch) {
7602
0
        case LLM_ARCH_LLAMA:
7603
0
            {
7604
0
                llm = std::make_unique<llm_build_llama<false>>(*this, params);
7605
0
            } break;
7606
0
        case LLM_ARCH_LLAMA4:
7607
0
            {
7608
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_NONE) {
7609
0
                    llm = std::make_unique<llm_build_llama<false>>(*this, params);
7610
0
                } else {
7611
0
                    llm = std::make_unique<llm_build_llama_iswa>(*this, params);
7612
0
                }
7613
0
            } break;
7614
0
        case LLM_ARCH_LLAMA_EMBED:
7615
0
            {
7616
0
                llm = std::make_unique<llm_build_llama<true>>(*this, params);
7617
0
            } break;
7618
0
        case LLM_ARCH_MAINCODER:
7619
0
            {
7620
0
                llm = std::make_unique<llm_build_maincoder>(*this, params);
7621
0
            } break;
7622
0
        case LLM_ARCH_DECI:
7623
0
            {
7624
0
                llm = std::make_unique<llm_build_deci>(*this, params);
7625
0
            } break;
7626
0
        case LLM_ARCH_BAICHUAN:
7627
0
            {
7628
0
                llm = std::make_unique<llm_build_baichuan>(*this, params);
7629
0
            } break;
7630
0
        case LLM_ARCH_FALCON:
7631
0
            {
7632
0
                llm = std::make_unique<llm_build_falcon>(*this, params);
7633
0
            } break;
7634
0
        case LLM_ARCH_GROK:
7635
0
            {
7636
0
                llm = std::make_unique<llm_build_grok>(*this, params);
7637
0
            } break;
7638
0
        case LLM_ARCH_STARCODER:
7639
0
            {
7640
0
                llm = std::make_unique<llm_build_starcoder>(*this, params);
7641
0
            } break;
7642
0
        case LLM_ARCH_REFACT:
7643
0
            {
7644
0
                llm = std::make_unique<llm_build_refact>(*this, params);
7645
0
            } break;
7646
0
        case LLM_ARCH_BERT:
7647
0
        case LLM_ARCH_JINA_BERT_V2:
7648
0
        case LLM_ARCH_JINA_BERT_V3:
7649
0
        case LLM_ARCH_NOMIC_BERT:
7650
0
        case LLM_ARCH_NOMIC_BERT_MOE:
7651
0
            {
7652
0
                llm = std::make_unique<llm_build_bert>(*this, params);
7653
0
            } break;
7654
0
        case LLM_ARCH_MODERN_BERT:
7655
0
            {
7656
0
                llm = std::make_unique<llm_build_modern_bert>(*this, params);
7657
0
            } break;
7658
0
        case LLM_ARCH_NEO_BERT:
7659
0
            {
7660
0
                llm = std::make_unique<llm_build_neo_bert>(*this, params);
7661
0
            } break;
7662
0
        case LLM_ARCH_BLOOM:
7663
0
            {
7664
0
                llm = std::make_unique<llm_build_bloom>(*this, params);
7665
0
            } break;
7666
0
        case LLM_ARCH_MPT:
7667
0
            {
7668
0
                llm = std::make_unique<llm_build_mpt>(*this, params);
7669
0
            } break;
7670
0
        case LLM_ARCH_STABLELM:
7671
0
            {
7672
0
                llm = std::make_unique<llm_build_stablelm>(*this, params);
7673
0
            } break;
7674
0
        case LLM_ARCH_QWEN:
7675
0
            {
7676
0
                llm = std::make_unique<llm_build_qwen>(*this, params);
7677
0
            } break;
7678
0
        case LLM_ARCH_QWEN2:
7679
0
            {
7680
0
                llm = std::make_unique<llm_build_qwen2>(*this, params);
7681
0
            } break;
7682
0
        case LLM_ARCH_DREAM:
7683
0
            {
7684
0
                llm = std::make_unique<llm_build_dream>(*this, params);
7685
0
            }
7686
0
            break;
7687
0
        case LLM_ARCH_LLADA:
7688
0
            {
7689
0
                llm = std::make_unique<llm_build_llada>(*this, params);
7690
0
            }
7691
0
            break;
7692
0
        case LLM_ARCH_LLADA_MOE:
7693
0
            {
7694
0
                llm = std::make_unique<llm_build_llada_moe>(*this, params);
7695
0
            }
7696
0
            break;
7697
0
        case LLM_ARCH_RND1:
7698
0
            {
7699
0
                llm = std::make_unique<llm_build_rnd1>(*this, params);
7700
0
            }
7701
0
            break;
7702
0
        case LLM_ARCH_QWEN2VL:
7703
0
            {
7704
0
                llm = std::make_unique<llm_build_qwen2vl>(*this, params);
7705
0
            } break;
7706
0
        case LLM_ARCH_QWEN2MOE:
7707
0
            {
7708
0
                llm = std::make_unique<llm_build_qwen2moe>(*this, params);
7709
0
            } break;
7710
0
        case LLM_ARCH_QWEN3:
7711
0
            {
7712
0
                llm = std::make_unique<llm_build_qwen3>(*this, params);
7713
0
            } break;
7714
0
        case LLM_ARCH_QWEN3MOE:
7715
0
            {
7716
0
                llm = std::make_unique<llm_build_qwen3moe>(*this, params);
7717
0
            } break;
7718
0
        case LLM_ARCH_QWEN3VL:
7719
0
            {
7720
0
                llm = std::make_unique<llm_build_qwen3vl>(*this, params);
7721
0
            } break;
7722
0
        case LLM_ARCH_QWEN3VLMOE:
7723
0
            {
7724
0
                llm = std::make_unique<llm_build_qwen3vlmoe>(*this, params);
7725
0
            } break;
7726
0
        case LLM_ARCH_PHI2:
7727
0
            {
7728
0
                llm = std::make_unique<llm_build_phi2>(*this, params);
7729
0
            } break;
7730
0
        case LLM_ARCH_PHI3:
7731
0
        case LLM_ARCH_PHIMOE:
7732
0
            {
7733
0
                if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
7734
0
                    llm = std::make_unique<llm_build_phi3<true>> (*this, params);
7735
0
                } else {
7736
0
                    llm = std::make_unique<llm_build_phi3<false>>(*this, params);
7737
0
                }
7738
0
            } break;
7739
0
        case LLM_ARCH_PLAMO:
7740
0
            {
7741
0
                llm = std::make_unique<llm_build_plamo>(*this, params);
7742
0
            } break;
7743
0
        case LLM_ARCH_PLAMO2:
7744
0
            {
7745
0
                llm = std::make_unique<llm_build_plamo2>(*this, params);
7746
0
            } break;
7747
0
        case LLM_ARCH_PLAMO3:
7748
0
            {
7749
0
                if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
7750
0
                    llm = std::make_unique<llm_build_plamo3<true>> (*this, params);
7751
0
                } else {
7752
0
                    llm = std::make_unique<llm_build_plamo3<false>>(*this, params);
7753
0
                }
7754
0
            } break;
7755
0
        case LLM_ARCH_GPT2:
7756
0
            {
7757
0
                llm = std::make_unique<llm_build_gpt2>(*this, params);
7758
0
            } break;
7759
0
        case LLM_ARCH_CODESHELL:
7760
0
            {
7761
0
                llm = std::make_unique<llm_build_codeshell>(*this, params);
7762
0
            } break;
7763
0
        case LLM_ARCH_ORION:
7764
0
            {
7765
0
                llm = std::make_unique<llm_build_orion>(*this, params);
7766
0
            } break;
7767
0
        case LLM_ARCH_INTERNLM2:
7768
0
            {
7769
0
                llm = std::make_unique<llm_build_internlm2>(*this, params);
7770
0
            } break;
7771
0
        case LLM_ARCH_MINICPM3:
7772
0
            {
7773
0
                llm = std::make_unique<llm_build_minicpm3>(*this, params);
7774
0
            } break;
7775
0
        case LLM_ARCH_GEMMA:
7776
0
            {
7777
0
                llm = std::make_unique<llm_build_gemma>(*this, params);
7778
0
            } break;
7779
0
        case LLM_ARCH_GEMMA2:
7780
0
            {
7781
0
                llm = std::make_unique<llm_build_gemma2_iswa>(*this, params);
7782
0
            } break;
7783
0
        case LLM_ARCH_GEMMA3:
7784
0
            {
7785
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) {
7786
0
                    llm = std::make_unique<llm_build_gemma3<true>>(*this, params);
7787
0
                } else {
7788
0
                    llm = std::make_unique<llm_build_gemma3<false>>(*this, params);
7789
0
                }
7790
0
            } break;
7791
0
        case LLM_ARCH_GEMMA3N:
7792
0
            {
7793
0
                llm = std::make_unique<llm_build_gemma3n_iswa>(*this, params);
7794
0
            } break;
7795
0
        case LLM_ARCH_GEMMA_EMBEDDING:
7796
0
            {
7797
0
                llm = std::make_unique<llm_build_gemma_embedding>(*this, params);
7798
0
            } break;
7799
0
        case LLM_ARCH_STARCODER2:
7800
0
            {
7801
0
                llm = std::make_unique<llm_build_starcoder2>(*this, params);
7802
0
            } break;
7803
0
        case LLM_ARCH_MAMBA:
7804
0
        case LLM_ARCH_MAMBA2:
7805
0
            {
7806
0
                llm = std::make_unique<llm_build_mamba>(*this, params);
7807
0
            } break;
7808
0
        case LLM_ARCH_JAMBA:
7809
0
            {
7810
0
                llm = std::make_unique<llm_build_jamba>(*this, params);
7811
0
            } break;
7812
0
        case LLM_ARCH_XVERSE:
7813
0
            {
7814
0
                llm = std::make_unique<llm_build_xverse>(*this, params);
7815
0
            } break;
7816
0
        case LLM_ARCH_COMMAND_R:
7817
0
            {
7818
0
                llm = std::make_unique<llm_build_command_r>(*this, params);
7819
0
            } break;
7820
0
        case LLM_ARCH_COHERE2:
7821
0
            {
7822
0
                llm = std::make_unique<llm_build_cohere2_iswa>(*this, params);
7823
0
            } break;
7824
0
        case LLM_ARCH_DBRX:
7825
0
            {
7826
0
                llm = std::make_unique<llm_build_dbrx>(*this, params);
7827
0
            } break;
7828
0
        case LLM_ARCH_OLMO:
7829
0
            {
7830
0
                llm = std::make_unique<llm_build_olmo>(*this, params);
7831
0
            } break;
7832
0
        case LLM_ARCH_OLMO2:
7833
0
            {
7834
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) {
7835
0
                    llm = std::make_unique<llm_build_olmo2<true>>(*this, params);
7836
0
                } else {
7837
0
                    llm = std::make_unique<llm_build_olmo2<false>>(*this, params);
7838
0
                }
7839
0
            } break;
7840
0
        case LLM_ARCH_OLMOE:
7841
0
            {
7842
0
                llm = std::make_unique<llm_build_olmoe>(*this, params);
7843
0
            } break;
7844
0
        case LLM_ARCH_OPENELM:
7845
0
            {
7846
0
                llm = std::make_unique<llm_build_openelm>(*this, params);
7847
0
            } break;
7848
0
        case LLM_ARCH_GPTNEOX:
7849
0
            {
7850
0
                llm = std::make_unique<llm_build_gptneox>(*this, params);
7851
0
            } break;
7852
0
        case LLM_ARCH_ARCTIC:
7853
0
            {
7854
0
                llm = std::make_unique<llm_build_arctic>(*this, params);
7855
0
            } break;
7856
0
        case LLM_ARCH_DEEPSEEK:
7857
0
            {
7858
0
                llm = std::make_unique<llm_build_deepseek>(*this, params);
7859
0
            } break;
7860
0
        case LLM_ARCH_DEEPSEEK2:
7861
0
            {
7862
0
                llm = std::make_unique<llm_build_deepseek2>(*this, params);
7863
0
            } break;
7864
0
        case LLM_ARCH_CHATGLM:
7865
0
            {
7866
0
                llm = std::make_unique<llm_build_chatglm>(*this, params);
7867
0
            } break;
7868
0
        case LLM_ARCH_GLM4:
7869
0
            {
7870
0
                llm = std::make_unique<llm_build_glm4>(*this, params);
7871
0
            } break;
7872
0
        case LLM_ARCH_GLM4_MOE:
7873
0
            {
7874
0
                llm = std::make_unique<llm_build_glm4_moe>(*this, params);
7875
0
            } break;
7876
0
        case LLM_ARCH_BITNET:
7877
0
            {
7878
0
                llm = std::make_unique<llm_build_bitnet>(*this, params);
7879
0
            } break;
7880
0
        case LLM_ARCH_T5:
7881
0
            {
7882
0
                switch (params.gtype) {
7883
0
                    case LLM_GRAPH_TYPE_ENCODER:
7884
0
                        llm = std::make_unique<llm_build_t5_enc>(*this, params);
7885
0
                        break;
7886
0
                    case LLM_GRAPH_TYPE_DEFAULT:
7887
0
                    case LLM_GRAPH_TYPE_DECODER:
7888
0
                        llm = std::make_unique<llm_build_t5_dec>(*this, params);
7889
0
                        break;
7890
0
                    default:
7891
0
                        GGML_ABORT("invalid graph type");
7892
0
                };
7893
0
            } break;
7894
0
        case LLM_ARCH_T5ENCODER:
7895
0
            {
7896
0
                llm = std::make_unique<llm_build_t5_enc>(*this, params);
7897
0
            }
7898
0
            break;
7899
0
        case LLM_ARCH_JAIS:
7900
0
            {
7901
0
                llm = std::make_unique<llm_build_jais>(*this, params);
7902
0
            } break;
7903
0
        case LLM_ARCH_NEMOTRON:
7904
0
            {
7905
0
                llm = std::make_unique<llm_build_nemotron>(*this, params);
7906
0
            } break;
7907
0
        case LLM_ARCH_NEMOTRON_H:
7908
0
        case LLM_ARCH_NEMOTRON_H_MOE:
7909
0
            {
7910
0
                llm = std::make_unique<llm_build_nemotron_h>(*this, params);
7911
0
            } break;
7912
0
        case LLM_ARCH_EXAONE:
7913
0
            {
7914
0
                llm = std::make_unique<llm_build_exaone>(*this, params);
7915
0
            } break;
7916
0
        case LLM_ARCH_EXAONE4:
7917
0
            {
7918
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) {
7919
0
                    llm = std::make_unique<llm_build_exaone4<true>>(*this, params);
7920
0
                } else {
7921
0
                    llm = std::make_unique<llm_build_exaone4<false>>(*this, params);
7922
0
                }
7923
0
            } break;
7924
0
        case LLM_ARCH_EXAONE_MOE:
7925
0
            {
7926
0
                llm = std::make_unique<llm_build_exaone_moe>(*this, params);
7927
0
            } break;
7928
0
        case LLM_ARCH_RWKV6:
7929
0
            {
7930
0
                llm = std::make_unique<llm_build_rwkv6>(*this, params);
7931
0
            } break;
7932
0
        case LLM_ARCH_RWKV6QWEN2:
7933
0
            {
7934
0
                llm = std::make_unique<llm_build_rwkv6qwen2>(*this, params);
7935
0
            } break;
7936
0
        case LLM_ARCH_RWKV7:
7937
0
            {
7938
0
                llm = std::make_unique<llm_build_rwkv7>(*this, params);
7939
0
            } break;
7940
0
        case LLM_ARCH_ARWKV7:
7941
0
            {
7942
0
                llm = std::make_unique<llm_build_arwkv7>(*this, params);
7943
0
            } break;
7944
0
        case LLM_ARCH_GRANITE:
7945
0
        case LLM_ARCH_GRANITE_MOE:
7946
0
        case LLM_ARCH_MINICPM:
7947
0
            {
7948
0
                llm = std::make_unique<llm_build_granite>(*this, params);
7949
0
            } break;
7950
0
        case LLM_ARCH_GRANITE_HYBRID:
7951
0
            {
7952
0
                llm = std::make_unique<llm_build_granite_hybrid>(*this, params);
7953
0
            } break;
7954
0
        case LLM_ARCH_CHAMELEON:
7955
0
            {
7956
0
                llm = std::make_unique<llm_build_chameleon>(*this, params);
7957
0
            } break;
7958
0
        case LLM_ARCH_WAVTOKENIZER_DEC:
7959
0
            {
7960
0
                llm = std::make_unique<llm_build_wavtokenizer_dec>(*this, params);
7961
0
            } break;
7962
0
        case LLM_ARCH_PLM:
7963
0
            {
7964
0
                llm = std::make_unique<llm_build_plm>(*this, params);
7965
0
            } break;
7966
0
        case LLM_ARCH_BAILINGMOE:
7967
0
            {
7968
0
                llm = std::make_unique<llm_build_bailingmoe>(*this, params);
7969
0
            } break;
7970
0
        case LLM_ARCH_BAILINGMOE2:
7971
0
            {
7972
0
                llm = std::make_unique<llm_build_bailingmoe2>(*this, params);
7973
0
            } break;
7974
0
        case LLM_ARCH_SEED_OSS:
7975
0
            {
7976
0
                llm = std::make_unique<llm_build_seed_oss>(*this, params);
7977
0
            } break;
7978
0
        case LLM_ARCH_DOTS1:
7979
0
            {
7980
0
                llm = std::make_unique<llm_build_dots1>(*this, params);
7981
0
            } break;
7982
0
        case LLM_ARCH_ARCEE:
7983
0
            {
7984
0
                llm = std::make_unique<llm_build_arcee>(*this, params);
7985
0
            } break;
7986
0
        case LLM_ARCH_AFMOE:
7987
0
            {
7988
0
                llm = std::make_unique<llm_build_afmoe>(*this, params);
7989
0
            } break;
7990
0
        case LLM_ARCH_ERNIE4_5:
7991
0
            {
7992
0
                llm = std::make_unique<llm_build_ernie4_5>(*this, params);
7993
0
            } break;
7994
0
        case LLM_ARCH_ERNIE4_5_MOE:
7995
0
            {
7996
0
                llm = std::make_unique<llm_build_ernie4_5_moe>(*this, params);
7997
0
            } break;
7998
0
        case LLM_ARCH_HUNYUAN_MOE:
7999
0
            {
8000
0
                llm = std::make_unique<llm_build_hunyuan_moe>(*this, params);
8001
0
            } break;
8002
0
        case LLM_ARCH_HUNYUAN_DENSE:
8003
0
            {
8004
0
                llm = std::make_unique<llm_build_hunyuan_dense>(*this, params);
8005
0
            } break;
8006
0
        case LLM_ARCH_SMOLLM3:
8007
0
            {
8008
0
                llm = std::make_unique<llm_build_smollm3>(*this, params);
8009
0
            } break;
8010
0
        case LLM_ARCH_OPENAI_MOE:
8011
0
            {
8012
0
                llm = std::make_unique<llm_build_openai_moe_iswa>(*this, params);
8013
0
            } break;
8014
0
        case LLM_ARCH_FALCON_H1:
8015
0
            {
8016
0
                llm = std::make_unique<llm_build_falcon_h1>(*this, params);
8017
0
            } break;
8018
0
        case LLM_ARCH_LFM2:
8019
0
        case LLM_ARCH_LFM2MOE:
8020
0
            {
8021
0
                llm = std::make_unique<llm_build_lfm2>(*this, params);
8022
0
            } break;
8023
0
        case LLM_ARCH_SMALLTHINKER:
8024
0
            {
8025
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) {
8026
0
                    llm = std::make_unique<llm_build_smallthinker<true>> (*this, params);
8027
0
                } else {
8028
0
                    llm = std::make_unique<llm_build_smallthinker<false>>(*this, params);
8029
0
                }
8030
0
            } break;
8031
0
        case LLM_ARCH_GROVEMOE:
8032
0
            {
8033
0
                llm = std::make_unique<llm_build_grovemoe>(*this, params);
8034
0
            } break;
8035
0
        case LLM_ARCH_APERTUS:
8036
0
            {
8037
0
                llm = std::make_unique<llm_build_apertus>(*this, params);
8038
0
            } break;
8039
0
        case LLM_ARCH_MINIMAX_M2:
8040
0
            {
8041
0
                llm = std::make_unique<llm_build_minimax_m2>(*this, params);
8042
0
            } break;
8043
0
        case LLM_ARCH_COGVLM:
8044
0
            {
8045
0
                llm = std::make_unique<llm_build_cogvlm>(*this, params);
8046
0
            } break;
8047
0
        case LLM_ARCH_PANGU_EMBED:
8048
0
            {
8049
0
                llm = std::make_unique<llm_build_pangu_embedded>(*this, params);
8050
0
            } break;
8051
0
        case LLM_ARCH_QWEN3NEXT:
8052
0
            {
8053
0
                llm = std::make_unique<llm_build_qwen3next>(*this, params);
8054
0
            } break;
8055
0
        case LLM_ARCH_MISTRAL3:
8056
0
            {
8057
0
                llm = std::make_unique<llm_build_mistral3>(*this, params);
8058
0
            } break;
8059
0
        case LLM_ARCH_MIMO2:
8060
0
            {
8061
0
                llm = std::make_unique<llm_build_mimo2_iswa>(*this, params);
8062
0
            } break;
8063
0
        default:
8064
0
            GGML_ABORT("fatal error");
8065
0
    }
8066
8067
    // add on pooling layer
8068
0
    llm->build_pooling(cls, cls_b, cls_out, cls_out_b);
8069
8070
    // add backend sampling layers (if any)
8071
0
    llm->build_sampling();
8072
8073
    // if the gguf model was converted with --sentence-transformers-dense-modules
8074
    // there will be two additional dense projection layers
8075
    // dense linear projections are applied after pooling
8076
    // TODO: move reranking logic here and generalize
8077
0
    llm->build_dense_out(dense_2_out_layers, dense_3_out_layers);
8078
8079
0
    llm->res->set_outputs();
8080
8081
0
    return llm->res->get_gf();
8082
0
}
8083
8084
8085
//
8086
// interface implementation
8087
//
8088
8089
0
llama_model_params llama_model_default_params() {
8090
0
    llama_model_params result = {
8091
0
        /*.devices                     =*/ nullptr,
8092
0
        /*.tensor_buft_overrides       =*/ nullptr,
8093
0
        /*.n_gpu_layers                =*/ -1,
8094
0
        /*.split_mode                  =*/ LLAMA_SPLIT_MODE_LAYER,
8095
0
        /*.main_gpu                    =*/ 0,
8096
0
        /*.tensor_split                =*/ nullptr,
8097
0
        /*.progress_callback           =*/ nullptr,
8098
0
        /*.progress_callback_user_data =*/ nullptr,
8099
0
        /*.kv_overrides                =*/ nullptr,
8100
0
        /*.vocab_only                  =*/ false,
8101
0
        /*.use_mmap                    =*/ true,
8102
0
        /*.use_direct_io               =*/ true,
8103
0
        /*.use_mlock                   =*/ false,
8104
0
        /*.check_tensors               =*/ false,
8105
0
        /*.use_extra_bufts             =*/ true,
8106
0
        /*.no_host                     =*/ false,
8107
0
        /*.no_alloc                    =*/ false,
8108
0
    };
8109
8110
0
    return result;
8111
0
}
8112
8113
0
const llama_vocab * llama_model_get_vocab(const llama_model * model) {
8114
0
    return &model->vocab;
8115
0
}
8116
8117
0
void llama_free_model(llama_model * model) {
8118
0
    llama_model_free(model);
8119
0
}
8120
8121
0
void llama_model_free(llama_model * model) {
8122
0
    delete model;
8123
0
}
8124
8125
0
int32_t llama_model_n_ctx_train(const llama_model * model) {
8126
0
    return model->hparams.n_ctx_train;
8127
0
}
8128
8129
0
int32_t llama_model_n_embd(const llama_model * model) {
8130
0
    return model->hparams.n_embd;
8131
0
}
8132
8133
0
int32_t llama_model_n_embd_inp(const llama_model * model) {
8134
0
    return model->hparams.n_embd_inp();
8135
0
}
8136
8137
0
int32_t llama_model_n_embd_out(const llama_model * model) {
8138
0
    return model->hparams.get_n_embd_out();
8139
0
}
8140
8141
0
int32_t llama_model_n_layer(const llama_model * model) {
8142
0
    return model->hparams.n_layer;
8143
0
}
8144
8145
0
int32_t llama_model_n_head(const llama_model * model) {
8146
0
    return model->hparams.n_head();
8147
0
}
8148
8149
0
int32_t llama_model_n_head_kv(const llama_model * model) {
8150
0
    return model->hparams.n_head_kv();
8151
0
}
8152
8153
0
int32_t llama_model_n_swa(const llama_model * model) {
8154
0
    return model->hparams.n_swa;
8155
0
}
8156
8157
0
uint32_t llama_model_n_cls_out(const struct llama_model * model) {
8158
0
    return model->hparams.n_cls_out;
8159
0
}
8160
8161
0
const char * llama_model_cls_label(const struct llama_model * model, uint32_t i) {
8162
0
    if (i < model->classifier_labels.size()) {
8163
0
        return model->classifier_labels[i].c_str();
8164
0
    }
8165
8166
0
    return nullptr;
8167
0
}
8168
8169
// deprecated
8170
0
int32_t llama_n_ctx_train(const llama_model * model) {
8171
0
    return llama_model_n_ctx_train(model);
8172
0
}
8173
8174
// deprecated
8175
0
int32_t llama_n_embd(const llama_model * model) {
8176
0
    return llama_model_n_embd(model);
8177
0
}
8178
8179
// deprecated
8180
0
int32_t llama_n_layer(const llama_model * model) {
8181
0
    return llama_model_n_layer(model);
8182
0
}
8183
8184
// deprecated
8185
0
int32_t llama_n_head(const llama_model * model) {
8186
0
    return llama_model_n_head(model);
8187
0
}
8188
8189
0
llama_rope_type llama_model_rope_type(const llama_model * model) {
8190
0
    switch (model->arch) {
8191
        // these models do not use RoPE
8192
0
        case LLM_ARCH_CLIP:
8193
0
        case LLM_ARCH_GPT2:
8194
0
        case LLM_ARCH_GPTJ:
8195
0
        case LLM_ARCH_MPT:
8196
0
        case LLM_ARCH_REFACT:
8197
0
        case LLM_ARCH_BLOOM:
8198
0
        case LLM_ARCH_MAMBA:
8199
0
        case LLM_ARCH_MAMBA2:
8200
0
        case LLM_ARCH_JAMBA:
8201
0
        case LLM_ARCH_JINA_BERT_V2:
8202
0
        case LLM_ARCH_T5:
8203
0
        case LLM_ARCH_T5ENCODER:
8204
0
        case LLM_ARCH_JAIS:
8205
0
        case LLM_ARCH_RWKV6:
8206
0
        case LLM_ARCH_RWKV6QWEN2:
8207
0
        case LLM_ARCH_RWKV7:
8208
0
        case LLM_ARCH_ARWKV7:
8209
0
        case LLM_ARCH_WAVTOKENIZER_DEC:
8210
0
        case LLM_ARCH_NEMOTRON_H:
8211
0
        case LLM_ARCH_NEMOTRON_H_MOE:
8212
0
            return LLAMA_ROPE_TYPE_NONE;
8213
8214
        // use what we call a normal RoPE, operating on pairs of consecutive head values
8215
0
        case LLM_ARCH_LLAMA:
8216
0
        case LLM_ARCH_LLADA:
8217
0
        case LLM_ARCH_LLAMA4:
8218
0
        case LLM_ARCH_DECI:
8219
0
        case LLM_ARCH_BAICHUAN:
8220
0
        case LLM_ARCH_STARCODER:
8221
0
        case LLM_ARCH_INTERNLM2:
8222
0
        case LLM_ARCH_MINICPM:
8223
0
        case LLM_ARCH_XVERSE:
8224
0
        case LLM_ARCH_COMMAND_R:
8225
0
        case LLM_ARCH_COHERE2:
8226
0
        case LLM_ARCH_OLMO:
8227
0
        case LLM_ARCH_ARCTIC:
8228
0
        case LLM_ARCH_DEEPSEEK:
8229
0
        case LLM_ARCH_DEEPSEEK2:
8230
0
        case LLM_ARCH_PLM:
8231
0
        case LLM_ARCH_CHATGLM:
8232
0
        case LLM_ARCH_GRANITE:
8233
0
        case LLM_ARCH_GRANITE_MOE:
8234
0
        case LLM_ARCH_GRANITE_HYBRID:
8235
0
        case LLM_ARCH_CHAMELEON:
8236
0
        case LLM_ARCH_BAILINGMOE:
8237
0
        case LLM_ARCH_NEO_BERT:
8238
0
        case LLM_ARCH_SMOLLM3:
8239
0
        case LLM_ARCH_ARCEE:
8240
0
        case LLM_ARCH_ERNIE4_5:
8241
0
        case LLM_ARCH_ERNIE4_5_MOE:
8242
0
        case LLM_ARCH_MISTRAL3:
8243
0
        case LLM_ARCH_LLAMA_EMBED:
8244
0
        case LLM_ARCH_MAINCODER:
8245
0
            return LLAMA_ROPE_TYPE_NORM;
8246
8247
        // the pairs of head values are offset by n_rot/2
8248
0
        case LLM_ARCH_FALCON:
8249
0
        case LLM_ARCH_FALCON_H1:
8250
0
        case LLM_ARCH_GROK:
8251
0
        case LLM_ARCH_DBRX:
8252
0
        case LLM_ARCH_BERT:
8253
0
        case LLM_ARCH_JINA_BERT_V3:
8254
0
        case LLM_ARCH_MODERN_BERT:
8255
0
        case LLM_ARCH_NOMIC_BERT:
8256
0
        case LLM_ARCH_NOMIC_BERT_MOE:
8257
0
        case LLM_ARCH_STABLELM:
8258
0
        case LLM_ARCH_BITNET:
8259
0
        case LLM_ARCH_QWEN:
8260
0
        case LLM_ARCH_QWEN2:
8261
0
        case LLM_ARCH_DREAM:
8262
0
        case LLM_ARCH_QWEN2MOE:
8263
0
        case LLM_ARCH_QWEN3:
8264
0
        case LLM_ARCH_QWEN3MOE:
8265
0
        case LLM_ARCH_LLADA_MOE:
8266
0
        case LLM_ARCH_RND1:
8267
0
        case LLM_ARCH_OLMO2:
8268
0
        case LLM_ARCH_OLMOE:
8269
0
        case LLM_ARCH_PHI2:
8270
0
        case LLM_ARCH_PHI3:
8271
0
        case LLM_ARCH_PHIMOE:
8272
0
        case LLM_ARCH_PLAMO:
8273
0
        case LLM_ARCH_PLAMO2:
8274
0
        case LLM_ARCH_PLAMO3:
8275
0
        case LLM_ARCH_GEMMA:
8276
0
        case LLM_ARCH_GEMMA2:
8277
0
        case LLM_ARCH_GEMMA3:
8278
0
        case LLM_ARCH_GEMMA3N:
8279
0
        case LLM_ARCH_GEMMA_EMBEDDING:
8280
0
        case LLM_ARCH_STARCODER2:
8281
0
        case LLM_ARCH_OPENELM:
8282
0
        case LLM_ARCH_GPTNEOX:
8283
0
        case LLM_ARCH_CODESHELL:
8284
0
        case LLM_ARCH_ORION:
8285
0
        case LLM_ARCH_NEMOTRON:
8286
0
        case LLM_ARCH_EXAONE:
8287
0
        case LLM_ARCH_EXAONE4:
8288
0
        case LLM_ARCH_EXAONE_MOE:
8289
0
        case LLM_ARCH_MINICPM3:
8290
0
        case LLM_ARCH_BAILINGMOE2:
8291
0
        case LLM_ARCH_DOTS1:
8292
0
        case LLM_ARCH_HUNYUAN_MOE:
8293
0
        case LLM_ARCH_OPENAI_MOE:
8294
0
        case LLM_ARCH_HUNYUAN_DENSE:
8295
0
        case LLM_ARCH_LFM2:
8296
0
        case LLM_ARCH_LFM2MOE:
8297
0
        case LLM_ARCH_SMALLTHINKER:
8298
0
        case LLM_ARCH_SEED_OSS:
8299
0
        case LLM_ARCH_GROVEMOE:
8300
0
        case LLM_ARCH_APERTUS:
8301
0
        case LLM_ARCH_MINIMAX_M2:
8302
0
        case LLM_ARCH_COGVLM:
8303
0
        case LLM_ARCH_PANGU_EMBED:
8304
0
        case LLM_ARCH_AFMOE:
8305
0
        case LLM_ARCH_QWEN3NEXT:
8306
0
        case LLM_ARCH_MIMO2:
8307
0
            return LLAMA_ROPE_TYPE_NEOX;
8308
8309
0
        case LLM_ARCH_QWEN2VL:
8310
0
            return LLAMA_ROPE_TYPE_MROPE;
8311
0
        case LLM_ARCH_QWEN3VL:
8312
0
        case LLM_ARCH_QWEN3VLMOE:
8313
0
            return LLAMA_ROPE_TYPE_IMROPE;
8314
8315
0
        case LLM_ARCH_GLM4:
8316
0
            return model->hparams.use_mrope() ? LLAMA_ROPE_TYPE_MROPE : LLAMA_ROPE_TYPE_NORM;
8317
0
        case LLM_ARCH_GLM4_MOE:
8318
0
            return model->hparams.use_mrope() ? LLAMA_ROPE_TYPE_MROPE : LLAMA_ROPE_TYPE_NEOX;
8319
8320
        // all model arches should be listed explicitly here
8321
0
        case LLM_ARCH_UNKNOWN:
8322
0
            GGML_ABORT("unknown architecture");
8323
0
    }
8324
8325
0
    return LLAMA_ROPE_TYPE_NONE;
8326
0
}
8327
8328
0
float llama_model_rope_freq_scale_train(const llama_model * model) {
8329
0
    return model->hparams.rope_freq_scale_train;
8330
0
}
8331
8332
0
int32_t llama_model_meta_val_str(const llama_model * model, const char * key, char * buf, size_t buf_size) {
8333
0
    const auto & it = model->gguf_kv.find(key);
8334
0
    if (it == model->gguf_kv.end()) {
8335
0
        if (buf_size > 0) {
8336
0
            buf[0] = '\0';
8337
0
        }
8338
0
        return -1;
8339
0
    }
8340
0
    return snprintf(buf, buf_size, "%s", it->second.c_str());
8341
0
}
8342
8343
0
int32_t llama_model_meta_count(const llama_model * model) {
8344
0
    return (int)model->gguf_kv.size();
8345
0
}
8346
8347
0
const char * llama_model_meta_key_str(llama_model_meta_key key) {
8348
0
    switch (key) {
8349
0
        case LLAMA_MODEL_META_KEY_SAMPLING_SEQUENCE:        return "general.sampling.sequence";
8350
0
        case LLAMA_MODEL_META_KEY_SAMPLING_TOP_K:           return "general.sampling.top_k";
8351
0
        case LLAMA_MODEL_META_KEY_SAMPLING_TOP_P:           return "general.sampling.top_p";
8352
0
        case LLAMA_MODEL_META_KEY_SAMPLING_MIN_P:           return "general.sampling.min_p";
8353
0
        case LLAMA_MODEL_META_KEY_SAMPLING_XTC_PROBABILITY: return "general.sampling.xtc_probability";
8354
0
        case LLAMA_MODEL_META_KEY_SAMPLING_XTC_THRESHOLD:   return "general.sampling.xtc_threshold";
8355
0
        case LLAMA_MODEL_META_KEY_SAMPLING_TEMP:            return "general.sampling.temp";
8356
0
        case LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_LAST_N:  return "general.sampling.penalty_last_n";
8357
0
        case LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_REPEAT:  return "general.sampling.penalty_repeat";
8358
0
        case LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT:        return "general.sampling.mirostat";
8359
0
        case LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_TAU:    return "general.sampling.mirostat_tau";
8360
0
        case LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_ETA:    return "general.sampling.mirostat_eta";
8361
0
        default:                                            return nullptr;
8362
0
    }
8363
0
}
8364
8365
0
int32_t llama_model_meta_key_by_index(const llama_model * model, int i, char * buf, size_t buf_size) {
8366
0
    if (i < 0 || i >= (int)model->gguf_kv.size()) {
8367
0
        if (buf_size > 0) {
8368
0
            buf[0] = '\0';
8369
0
        }
8370
0
        return -1;
8371
0
    }
8372
0
    auto it = model->gguf_kv.begin();
8373
0
    std::advance(it, i);
8374
0
    return snprintf(buf, buf_size, "%s", it->first.c_str());
8375
0
}
8376
8377
0
int32_t llama_model_meta_val_str_by_index(const llama_model * model, int32_t i, char * buf, size_t buf_size) {
8378
0
    if (i < 0 || i >= (int)model->gguf_kv.size()) {
8379
0
        if (buf_size > 0) {
8380
0
            buf[0] = '\0';
8381
0
        }
8382
0
        return -1;
8383
0
    }
8384
0
    auto it = model->gguf_kv.begin();
8385
0
    std::advance(it, i);
8386
0
    return snprintf(buf, buf_size, "%s", it->second.c_str());
8387
0
}
8388
8389
0
int32_t llama_model_desc(const llama_model * model, char * buf, size_t buf_size) {
8390
0
    return snprintf(buf, buf_size, "%s", model->desc().c_str());
8391
0
}
8392
8393
0
uint64_t llama_model_size(const llama_model * model) {
8394
0
    return model->size();
8395
0
}
8396
8397
0
const char * llama_model_chat_template(const llama_model * model, const char * name) {
8398
0
    const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE)
8399
0
        : LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE);
8400
0
    const auto & it = model->gguf_kv.find(key);
8401
0
    if (it == model->gguf_kv.end()) {
8402
        // one-off fix for very popular models (so we are not flooded with issues)
8403
        // do not extend this list unless absolutely necessary
8404
        // Mistral-Small-2503 does not have built-in chat template
8405
0
        llama_vocab_pre_type pre_type = model->vocab.get_pre_type();
8406
0
        if (!name && pre_type == LLAMA_VOCAB_PRE_TYPE_TEKKEN && model->layers.size() == 40) {
8407
0
            return "mistral-v7-tekken";
8408
0
        }
8409
8410
0
        return nullptr;
8411
0
    }
8412
8413
0
    return it->second.c_str();
8414
0
}
8415
8416
0
uint64_t llama_model_n_params(const llama_model * model) {
8417
0
    return model->n_elements();
8418
0
}
8419
8420
0
bool llama_model_has_encoder(const llama_model * model) {
8421
0
    switch (model->arch) {
8422
0
        case LLM_ARCH_T5:        return true;
8423
0
        case LLM_ARCH_T5ENCODER: return true;
8424
0
        default:                 return false;
8425
0
    }
8426
0
}
8427
8428
0
bool llama_model_has_decoder(const llama_model * model) {
8429
0
    switch (model->arch) {
8430
0
        case LLM_ARCH_T5ENCODER: return false;
8431
0
        default:                 return true;
8432
0
    }
8433
0
}
8434
8435
0
llama_token llama_model_decoder_start_token(const llama_model * model) {
8436
0
    return model->hparams.dec_start_token_id;
8437
0
}
8438
8439
0
bool llama_model_is_recurrent(const llama_model * model) {
8440
0
    return llm_arch_is_recurrent(model->arch);
8441
0
}
8442
8443
0
bool llama_model_is_hybrid(const llama_model * model) {
8444
0
    return llm_arch_is_hybrid(model->arch);
8445
0
}
8446
8447
0
bool llama_model_is_diffusion(const llama_model * model) {
8448
0
    return llm_arch_is_diffusion(model->arch);
8449
0
}
8450
8451
0
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
8452
0
    return model->tensors_by_name;
8453
0
}