Coverage Report

Created: 2026-01-09 06:17

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-model.cpp
Line
Count
Source
1
#include "llama-model.h"
2
3
#include "llama-impl.h"
4
#include "llama-mmap.h"
5
#include "llama-cparams.h"
6
#include "llama-model-loader.h"
7
8
#include "llama-kv-cache.h"
9
#include "llama-kv-cache-iswa.h"
10
#include "llama-memory-hybrid.h"
11
#include "llama-memory-recurrent.h"
12
13
#include "ggml-cpp.h"
14
15
#include "models/models.h"
16
17
#include <algorithm>
18
#include <cassert>
19
#include <cfloat>
20
#include <cstring>
21
#include <cmath>
22
#include <functional>
23
#include <map>
24
#include <regex>
25
#include <sstream>
26
#include <stdexcept>
27
28
0
const char * llm_type_name(llm_type type) {
29
0
    switch (type) {
30
0
        case LLM_TYPE_14M:           return "14M";
31
0
        case LLM_TYPE_17M:           return "17M";
32
0
        case LLM_TYPE_22M:           return "22M";
33
0
        case LLM_TYPE_33M:           return "33M";
34
0
        case LLM_TYPE_47M:           return "47M";
35
0
        case LLM_TYPE_60M:           return "60M";
36
0
        case LLM_TYPE_70M:           return "70M";
37
0
        case LLM_TYPE_80M:           return "80M";
38
0
        case LLM_TYPE_109M:          return "109M";
39
0
        case LLM_TYPE_137M:          return "137M";
40
0
        case LLM_TYPE_140M:          return "140M";
41
0
        case LLM_TYPE_149M:          return "149M";
42
0
        case LLM_TYPE_160M:          return "160M";
43
0
        case LLM_TYPE_190M:          return "190M";
44
0
        case LLM_TYPE_220M:          return "220M";
45
0
        case LLM_TYPE_250M:          return "250M";
46
0
        case LLM_TYPE_256M:          return "256M";
47
0
        case LLM_TYPE_270M:          return "270M";
48
0
        case LLM_TYPE_335M:          return "335M";
49
0
        case LLM_TYPE_350M:          return "350M";
50
0
        case LLM_TYPE_360M:          return "360M";
51
0
        case LLM_TYPE_395M:          return "395M";
52
0
        case LLM_TYPE_410M:          return "410M";
53
0
        case LLM_TYPE_450M:          return "450M";
54
0
        case LLM_TYPE_475M:          return "475M";
55
0
        case LLM_TYPE_558M:          return "558M";
56
0
        case LLM_TYPE_700M:          return "700M";
57
0
        case LLM_TYPE_770M:          return "770M";
58
0
        case LLM_TYPE_780M:          return "780M";
59
0
        case LLM_TYPE_950M:          return "950M";
60
0
        case LLM_TYPE_0_3B:          return "0.3B";
61
0
        case LLM_TYPE_0_5B:          return "0.5B";
62
0
        case LLM_TYPE_0_6B:          return "0.6B";
63
0
        case LLM_TYPE_1B:            return "1B";
64
0
        case LLM_TYPE_1_2B:          return "1.2B";
65
0
        case LLM_TYPE_1_3B:          return "1.3B";
66
0
        case LLM_TYPE_1_4B:          return "1.4B";
67
0
        case LLM_TYPE_1_5B:          return "1.5B";
68
0
        case LLM_TYPE_1_6B:          return "1.6B";
69
0
        case LLM_TYPE_1_7B:          return "1.7B";
70
0
        case LLM_TYPE_1_8B:          return "1.8B";
71
0
        case LLM_TYPE_2B:            return "2B";
72
0
        case LLM_TYPE_2_6B:          return "2.6B";
73
0
        case LLM_TYPE_2_8B:          return "2.8B";
74
0
        case LLM_TYPE_2_9B:          return "2.9B";
75
0
        case LLM_TYPE_3B:            return "3B";
76
0
        case LLM_TYPE_4B:            return "4B";
77
0
        case LLM_TYPE_6B:            return "6B";
78
0
        case LLM_TYPE_6_9B:          return "6.9B";
79
0
        case LLM_TYPE_7B:            return "7B";
80
0
        case LLM_TYPE_8B:            return "8B";
81
0
        case LLM_TYPE_9B:            return "9B";
82
0
        case LLM_TYPE_11B:           return "11B";
83
0
        case LLM_TYPE_12B:           return "12B";
84
0
        case LLM_TYPE_13B:           return "13B";
85
0
        case LLM_TYPE_14B:           return "14B";
86
0
        case LLM_TYPE_15B:           return "15B";
87
0
        case LLM_TYPE_16B:           return "16B";
88
0
        case LLM_TYPE_20B:           return "20B";
89
0
        case LLM_TYPE_26B:           return "26B";
90
0
        case LLM_TYPE_27B:           return "27B";
91
0
        case LLM_TYPE_30B:           return "30B";
92
0
        case LLM_TYPE_32B:           return "32B";
93
0
        case LLM_TYPE_34B:           return "34B";
94
0
        case LLM_TYPE_35B:           return "35B";
95
0
        case LLM_TYPE_36B:           return "36B";
96
0
        case LLM_TYPE_40B:           return "40B";
97
0
        case LLM_TYPE_65B:           return "65B";
98
0
        case LLM_TYPE_70B:           return "70B";
99
0
        case LLM_TYPE_120B:          return "120B";
100
0
        case LLM_TYPE_142B:          return "142B";
101
0
        case LLM_TYPE_236B:          return "236B";
102
0
        case LLM_TYPE_290B:          return "290B";
103
0
        case LLM_TYPE_314B:          return "314B";
104
0
        case LLM_TYPE_405B:          return "405B";
105
0
        case LLM_TYPE_671B:          return "671B";
106
0
        case LLM_TYPE_SMALL:         return "0.1B";
107
0
        case LLM_TYPE_MEDIUM:        return "0.4B";
108
0
        case LLM_TYPE_LARGE:         return "0.8B";
109
0
        case LLM_TYPE_XL:            return "1.5B";
110
0
        case LLM_TYPE_A1_7B:         return "A1.7B";
111
0
        case LLM_TYPE_A2_7B:         return "A2.7B";
112
0
        case LLM_TYPE_8x7B:          return "8x7B";
113
0
        case LLM_TYPE_8x22B:         return "8x22B";
114
0
        case LLM_TYPE_16x12B:        return "16x12B";
115
0
        case LLM_TYPE_16x3_8B:       return "16x3.8B";
116
0
        case LLM_TYPE_10B_128x3_66B: return "10B+128x3.66B";
117
0
        case LLM_TYPE_57B_A14B:      return "57B.A14B";
118
0
        case LLM_TYPE_17B_16E:       return "17Bx16E (Scout)";
119
0
        case LLM_TYPE_17B_128E:      return "17Bx128E (Maverick)";
120
0
        case LLM_TYPE_A13B:          return "A13B";
121
0
        case LLM_TYPE_7B_A1B:        return "7B.A1B";
122
0
        case LLM_TYPE_8B_A1B:        return "8B.A1B";
123
0
        case LLM_TYPE_16B_A1B:       return "16B.A1B";
124
0
        case LLM_TYPE_21B_A3B:       return "21B.A3B";
125
0
        case LLM_TYPE_30B_A3B:       return "30B.A3B";
126
0
        case LLM_TYPE_31B_A3_5B:     return "31B.A3.5B";
127
0
        case LLM_TYPE_80B_A3B:       return "80B.A3B";
128
0
        case LLM_TYPE_100B_A6B:      return "100B.A6B";
129
0
        case LLM_TYPE_102B_A12B:     return "102B.A12B";
130
0
        case LLM_TYPE_106B_A12B:     return "106B.A12B";
131
0
        case LLM_TYPE_230B_A10B:     return "230B.A10B";
132
0
        case LLM_TYPE_235B_A22B:     return "235B.A22B";
133
0
        case LLM_TYPE_300B_A47B:     return "300B.A47B";
134
0
        case LLM_TYPE_310B_A15B:     return "310B.A15B";
135
0
        case LLM_TYPE_355B_A32B:     return "355B.A32B";
136
0
        case LLM_TYPE_E2B:           return "E2B";
137
0
        case LLM_TYPE_E4B:           return "E4B";
138
0
        default:                     return "?B";
139
0
    }
140
0
}
141
142
0
static const char * llama_expert_gating_func_name(llama_expert_gating_func_type type) {
143
0
    switch (type) {
144
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: return "softmax";
145
0
        case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: return "sigmoid";
146
0
        default:                                    return "unknown";
147
0
    }
148
0
}
149
150
static const std::map<llama_rope_scaling_type, const char *> LLAMA_ROPE_SCALING_TYPES = {
151
    { LLAMA_ROPE_SCALING_TYPE_NONE,       "none"       },
152
    { LLAMA_ROPE_SCALING_TYPE_LINEAR,     "linear"     },
153
    { LLAMA_ROPE_SCALING_TYPE_YARN,       "yarn"       },
154
    { LLAMA_ROPE_SCALING_TYPE_LONGROPE,   "longrope"   },
155
};
156
157
0
std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type) {
158
0
    return LLAMA_ROPE_SCALING_TYPES.at(rope_scaling_type);
159
0
}
160
161
0
static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
162
0
    for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
163
0
        if (kv.second == name) {
164
0
            return (llama_rope_scaling_type) kv.first;
165
0
        }
166
0
    }
167
168
0
    return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
169
0
}
170
171
// checks if the weight tensor can be used with the specified buffer type and device
172
0
static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w, ggml_op op, ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev) {
173
0
    GGML_ASSERT(w != nullptr);
174
175
0
    if (op == GGML_OP_NONE) {
176
0
        return true;
177
0
    }
178
179
0
    ggml_init_params params = {
180
0
        /*.mem_size   =*/ ggml_tensor_overhead()*8,
181
0
        /*.mem_buffer =*/ NULL,
182
0
        /*.no_alloc   =*/ true,
183
0
    };
184
0
    ggml_context_ptr ctx_ptr { ggml_init(params) };
185
0
    if (!ctx_ptr) {
186
0
        throw std::runtime_error(format("failed to create ggml context"));
187
0
    }
188
0
    ggml_context * ctx = ctx_ptr.get();
189
190
0
    ggml_tensor * op_tensor = nullptr;
191
192
0
    switch (op) {
193
0
        case GGML_OP_GET_ROWS:
194
0
            {
195
0
                ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
196
0
                op_tensor = ggml_get_rows(ctx, w, b);
197
0
            } break;
198
0
        case GGML_OP_MUL_MAT:
199
0
            {
200
0
                ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], 512, w->ne[2], w->ne[3]);
201
0
                op_tensor = ggml_mul_mat(ctx, w, b);
202
0
            } break;
203
0
        case GGML_OP_MUL_MAT_ID:
204
0
            {
205
0
                int n_expert_used = hparams.n_expert_used;
206
0
                ggml_tensor * b = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512);
207
0
                ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512);
208
0
                op_tensor = ggml_mul_mat_id(ctx, w, b, ids);
209
0
            } break;
210
0
        case GGML_OP_ADD:
211
0
            {
212
0
                ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
213
0
                op_tensor = ggml_add(ctx, a, w);
214
0
            } break;
215
0
        case GGML_OP_ADD_ID:
216
0
            {
217
0
                int n_expert_used = hparams.n_expert_used;
218
0
                ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512);
219
0
                ggml_tensor * c = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512);
220
0
                op_tensor = ggml_add_id(ctx, a, w, c);
221
0
            } break;
222
0
        case GGML_OP_MUL:
223
0
            {
224
0
                ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
225
0
                op_tensor = ggml_mul(ctx, a, w);
226
0
            } break;
227
0
        case GGML_OP_DIV:
228
0
            {
229
0
                ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, w->ne[0]);
230
0
                op_tensor = ggml_div(ctx, a, w);
231
0
            } break;
232
0
        case GGML_OP_ROPE:
233
0
            {
234
0
                int n_embd_head = hparams.n_embd_head_v;
235
0
                int n_head = hparams.n_head();
236
0
                ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_embd_head, n_head, 512);
237
0
                ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
238
0
                op_tensor = ggml_rope_ext(
239
0
                    ctx, a, b, w,
240
0
                    0, 0, 0, 0, 0,
241
0
                    0, 0, 0, 0
242
0
                );
243
244
0
            } break;
245
0
        case GGML_OP_SSM_CONV:
246
0
            {
247
0
                const int64_t n_seq_tokens = 512;
248
0
                const int64_t n_seqs       = 3;
249
0
                ggml_tensor * conv_x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0] - 1 + n_seq_tokens, w->ne[1], n_seqs);
250
0
                op_tensor = ggml_ssm_conv(ctx, conv_x, w);
251
0
            } break;
252
0
        case GGML_OP_SSM_SCAN:
253
0
            {
254
                // w is ssm_a, which is used to distinguish Mamba-1 and Mamba-2
255
0
                const int64_t d_state      = w->ne[0] == 1 ? hparams.ssm_d_state : w->ne[0];
256
0
                const int64_t n_head       = w->ne[1];
257
0
                const int64_t head_dim     = hparams.ssm_d_inner / n_head;
258
0
                const int64_t n_group      = hparams.ssm_n_group ? hparams.ssm_n_group : 1;
259
0
                const int64_t n_seq_tokens = 512;
260
0
                const int64_t n_seqs       = 3;
261
0
                ggml_tensor * s   = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, head_dim, n_head, n_seqs);
262
0
                ggml_tensor * x   = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, head_dim, n_head, n_seq_tokens, n_seqs);
263
0
                ggml_tensor * dt  = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_head, n_seq_tokens, n_seqs);
264
0
                ggml_tensor * B   = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, n_group, n_seq_tokens, n_seqs);
265
0
                ggml_tensor * C   = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, n_group, n_seq_tokens, n_seqs);
266
0
                ggml_tensor * ids = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_seqs);
267
0
                op_tensor = ggml_ssm_scan(ctx, s, x, dt, w, B, C, ids);
268
0
            } break;
269
0
        case GGML_OP_RWKV_WKV6:
270
0
            {
271
                // FIXME
272
0
                const int64_t S = 123;
273
0
                const int64_t H = 123;
274
0
                const int64_t n_tokens = 123;
275
0
                const int64_t n_seqs = 123;
276
0
                ggml_tensor  * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
277
0
                ggml_tensor  * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
278
0
                ggml_tensor  * r = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
279
0
                ggml_tensor  * tf = w;
280
0
                ggml_tensor  * td = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
281
0
                ggml_tensor  * state = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, n_seqs, S, H);
282
0
                op_tensor = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, state);
283
0
            } break;
284
0
        case GGML_OP_IM2COL:
285
0
            {
286
0
                const int n_embd_inp = hparams.n_embd_inp();
287
0
                ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd_inp, w->ne[1], 1, 1);
288
0
                op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16);
289
0
            } break;
290
0
        case GGML_OP_SCALE:
291
0
            {
292
0
                op_tensor = ggml_scale(ctx, w, 1.0f);
293
0
            } break;
294
0
        default:
295
0
            GGML_ABORT("%s: missing test for op %s for tensor %s", __func__, ggml_op_name(op), w->name);
296
0
    }
297
298
    // create a temporary dummy buffer for the weight so that supports_op can check the buffer type
299
0
    GGML_ASSERT(w->buffer == nullptr);
300
0
    w->buffer = ggml_backend_buft_alloc_buffer(buft, 0);
301
0
    bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
302
0
    ggml_backend_buffer_free(w->buffer);
303
0
    w->buffer = nullptr;
304
305
0
    return op_supported;
306
0
}
307
308
// lists of buffer types used for each layer
309
using buft_list_t = std::vector<std::pair<ggml_backend_dev_t, ggml_backend_buffer_type_t>>;
310
311
// find the first buffer type in the list that can use the tensor
312
0
static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hparams, ggml_tensor * tensor, ggml_op op, const buft_list_t & buft_list) {
313
0
    GGML_ASSERT(!buft_list.empty());
314
0
    for (const auto & cur : buft_list) {
315
0
        ggml_backend_dev_t cur_dev = cur.first;
316
0
        ggml_backend_buffer_type_t cur_buft = cur.second;
317
0
        if (weight_buft_supported(hparams, tensor, op, cur_buft, cur_dev)) {
318
0
            return cur_buft;
319
0
        }
320
0
    }
321
322
0
    return nullptr;
323
0
}
324
325
// CPU: ACCEL -> GPU host -> CPU extra -> CPU
326
0
static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices, bool use_extra_bufts, bool no_host) {
327
0
    buft_list_t buft_list;
328
329
    // add ACCEL buffer types
330
0
    for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
331
0
        ggml_backend_dev_t dev = ggml_backend_dev_get(i);
332
0
        if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
333
0
            auto * buft = ggml_backend_dev_buffer_type(dev);
334
            // skip
335
0
            if (buft != ggml_backend_cpu_buffer_type()) {
336
0
                buft_list.emplace_back(dev, buft);
337
0
            }
338
0
        }
339
0
    }
340
341
    // add a host buffer type
342
    // storing the tensors in a host buffer is useful when the processing of large batches
343
    // is offloaded to a GPU device, since it reduces the time spent on data transfers
344
    // generally, this will be done using the first device in the list
345
    // a better approach would be to handle this on a weight-by-weight basis using the offload_op
346
    // function of the device to determine if it would benefit from being stored in a host buffer
347
0
    if (!no_host) {
348
0
        for (auto * dev : devices) {
349
0
            ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
350
0
            if (buft) {
351
0
                buft_list.emplace_back(dev, buft);
352
0
                break;
353
0
            }
354
0
        }
355
0
    }
356
357
    // add extra buffer types
358
0
    if (use_extra_bufts) {
359
0
        auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
360
0
        if (cpu_dev == nullptr) {
361
0
            throw std::runtime_error(format("%s: no CPU backend found", __func__));
362
0
        }
363
364
0
        auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
365
0
        auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
366
0
            ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts");
367
0
        if (ggml_backend_dev_get_extra_bufts_fn) {
368
0
            ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
369
0
            while (extra_bufts && *extra_bufts) {
370
0
                buft_list.emplace_back(cpu_dev, *extra_bufts);
371
0
                ++extra_bufts;
372
0
            }
373
0
        }
374
0
    }
375
376
    // add the CPU buffer type
377
0
    for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
378
0
        ggml_backend_dev_t dev = ggml_backend_dev_get(i);
379
0
        if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU) {
380
0
            buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
381
0
        }
382
0
    }
383
384
0
    return buft_list;
385
0
}
386
387
// GPU: split if LLAMA_SPLIT_MODE_ROW -> GPU
388
0
static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, llama_split_mode split_mode, const float * tensor_split) {
389
0
    buft_list_t buft_list;
390
391
    // add the device split buffer type if requested and available
392
0
    if (split_mode == LLAMA_SPLIT_MODE_ROW) {
393
0
        ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
394
0
        auto ggml_backend_split_buffer_type_fn = (ggml_backend_split_buffer_type_t)
395
0
            ggml_backend_reg_get_proc_address(reg, "ggml_backend_split_buffer_type");
396
0
        if (ggml_backend_split_buffer_type_fn) {
397
0
            size_t dev_index = [&]() {
398
0
                auto * reg = ggml_backend_dev_backend_reg(dev);
399
0
                for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); ++i) {
400
0
                    if (ggml_backend_reg_dev_get(reg, i) == dev) {
401
0
                        return i;
402
0
                    }
403
0
                }
404
0
                throw std::runtime_error(format("device %s not found in its backend reg", ggml_backend_dev_name(dev)));
405
0
            }();
406
0
            auto * buft = ggml_backend_split_buffer_type_fn(dev_index, tensor_split);
407
0
            if (buft != nullptr) {
408
0
                buft_list.emplace_back(dev, buft);
409
0
            }
410
0
        }
411
0
    }
412
413
    // add the device default buffer type
414
0
    buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
415
416
    // add the device extra buffer type (if any)
417
0
    ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
418
0
    auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
419
0
        ggml_backend_reg_get_proc_address(reg, "ggml_backend_dev_get_extra_bufts");
420
421
0
    if (ggml_backend_dev_get_extra_bufts_fn) {
422
0
        ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(dev);
423
0
        while (extra_bufts && *extra_bufts) {
424
0
            buft_list.emplace_back(dev, *extra_bufts);
425
0
            ++extra_bufts;
426
0
        }
427
0
    }
428
429
0
    return buft_list;
430
0
}
431
432
struct llama_model::impl {
433
0
    impl() = default;
434
0
    ~impl() = default;
435
436
    uint64_t n_elements = 0;
437
438
    size_t n_bytes = 0;
439
440
    std::string desc_str;
441
442
    // model memory mapped files
443
    llama_mmaps mappings;
444
445
    // objects representing data potentially being locked in memory
446
    llama_mlocks mlock_bufs;
447
    llama_mlocks mlock_mmaps;
448
449
    // contexts where the model tensors metadata is stored as well ass the corresponding buffers:
450
    std::vector<std::pair<ggml_context_ptr, std::vector<ggml_backend_buffer_ptr>>> ctxs_bufs;
451
452
    buft_list_t cpu_buft_list;
453
    std::map<ggml_backend_dev_t, buft_list_t> gpu_buft_list;
454
455
    struct layer_dev {
456
        ggml_backend_dev_t dev;
457
        buft_list_t * buft_list;
458
    };
459
460
    layer_dev dev_input = {};
461
    layer_dev dev_output = {};
462
    std::vector<layer_dev> dev_layer;
463
464
    bool has_tensor_overrides;
465
};
466
467
0
llama_model::llama_model(const llama_model_params & params) : params(params), pimpl(std::make_unique<impl>()) {
468
0
    pimpl->has_tensor_overrides = params.tensor_buft_overrides && params.tensor_buft_overrides[0].pattern;
469
0
}
470
471
0
llama_model::~llama_model() = default;
472
473
0
void llama_model::load_stats(llama_model_loader & ml) {
474
0
    pimpl->n_elements = ml.n_elements;
475
0
    pimpl->n_bytes = ml.n_bytes;
476
0
}
477
478
0
void llama_model::load_arch(llama_model_loader & ml) {
479
0
    arch = ml.get_arch();
480
0
    if (arch == LLM_ARCH_UNKNOWN) {
481
0
        throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
482
0
    }
483
0
}
484
485
0
void llama_model::load_hparams(llama_model_loader & ml) {
486
0
    const gguf_context * ctx = ml.meta.get();
487
488
    // get metadata as string
489
0
    for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
490
0
        gguf_type type = gguf_get_kv_type(ctx, i);
491
0
        if (type == GGUF_TYPE_ARRAY) {
492
0
            continue;
493
0
        }
494
0
        const char * name = gguf_get_key(ctx, i);
495
0
        const std::string value = gguf_kv_to_str(ctx, i);
496
0
        gguf_kv.emplace(name, value);
497
0
    }
498
499
    // get general kv
500
0
    ml.get_key(LLM_KV_GENERAL_NAME, name, false);
501
502
    // everything past this point is not vocab-related
503
    // for CLIP models, we only need to load tensors, no hparams
504
0
    if (hparams.vocab_only || ml.get_arch() == LLM_ARCH_CLIP) {
505
0
        return;
506
0
    }
507
508
0
    ml.get_key(LLM_KV_CONTEXT_LENGTH,          hparams.n_ctx_train);
509
0
    ml.get_key(LLM_KV_EMBEDDING_LENGTH,        hparams.n_embd);
510
0
    ml.get_key(LLM_KV_EMBEDDING_LENGTH_OUT,    hparams.n_embd_out, false);
511
0
    ml.get_key(LLM_KV_BLOCK_COUNT,             hparams.n_layer);
512
0
    ml.get_key(LLM_KV_EXPERT_COUNT,            hparams.n_expert,        false);
513
0
    ml.get_key(LLM_KV_EXPERT_USED_COUNT,       hparams.n_expert_used,   false);
514
0
    ml.get_key(LLM_KV_EXPERT_GROUP_COUNT,      hparams.n_expert_groups, false);
515
0
    ml.get_key(LLM_KV_EXPERT_GROUP_USED_COUNT, hparams.n_group_used,    false);
516
517
0
    if (arch == LLM_ARCH_WAVTOKENIZER_DEC) {
518
0
        ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features);
519
520
0
        ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd);
521
0
        ml.get_key(LLM_KV_POSNET_BLOCK_COUNT,      hparams.posnet.n_layer);
522
523
0
        ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd);
524
0
        ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT,      hparams.convnext.n_layer);
525
0
    }
526
527
0
    GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
528
0
    GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
529
0
    if (hparams.n_expert > 0) {
530
0
        GGML_ASSERT(hparams.n_expert_used > 0);
531
0
        GGML_ASSERT(hparams.n_expert_groups < hparams.n_expert);
532
0
        if (hparams.n_expert_groups > 1) {
533
0
            GGML_ASSERT(hparams.n_expert % hparams.n_expert_groups == 0);
534
0
            GGML_ASSERT(hparams.n_group_used > 0);
535
0
            GGML_ASSERT(hparams.n_group_used < hparams.n_expert_groups);
536
0
        }
537
0
    } else {
538
0
        GGML_ASSERT(hparams.n_expert_used == 0);
539
0
        GGML_ASSERT(hparams.n_expert_groups == 0);
540
0
    }
541
542
0
    std::fill(hparams.n_head_arr.begin(),    hparams.n_head_arr.end(),    0);
543
0
    std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
544
0
    std::fill(hparams.n_ff_arr.begin(),      hparams.n_ff_arr.end(),      0);
545
0
    std::fill(
546
0
        hparams.recurrent_layer_arr.begin(),
547
0
        hparams.recurrent_layer_arr.end(),
548
0
        llm_arch_is_recurrent(ml.get_arch()));
549
550
0
    std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0);
551
0
    std::fill(hparams.swa_layers.begin(), hparams.swa_layers.end(), 0);
552
553
0
    std::fill(hparams.xielu_alpha_n.begin(), hparams.xielu_alpha_n.end(), 0.0f);
554
0
    std::fill(hparams.xielu_alpha_p.begin(), hparams.xielu_alpha_p.end(), 0.0f);
555
0
    std::fill(hparams.xielu_beta.begin(), hparams.xielu_beta.end(), 0.0f);
556
0
    std::fill(hparams.xielu_eps.begin(), hparams.xielu_eps.end(), 0.0f);
557
558
0
    ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH,  hparams.n_ff_arr,   hparams.n_layer, false);
559
0
    ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);
560
561
    // n_head_kv is optional, default to n_head
562
0
    hparams.n_head_kv_arr = hparams.n_head_arr;
563
564
0
    ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false);
565
566
0
    bool rope_finetuned = false;
567
0
    ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
568
0
    hparams.rope_finetuned = rope_finetuned;
569
570
0
    hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
571
0
    ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
572
573
    // rope_freq_base (optional)
574
0
    hparams.rope_freq_base_train = 10000.0f;
575
0
    ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
576
577
0
    std::string rope_scaling("linear");
578
0
    ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
579
0
    hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
580
0
    GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
581
582
    // TODO: Handle SWA metadata similarly when models start implementing it
583
    // rope_freq_scale (inverse of the kv) is optional
584
0
    float ropescale = 0.0f;
585
0
    if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
586
        // try the old key name
587
0
        ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
588
0
    }
589
0
    hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
590
591
0
    ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false);
592
593
    // non-transformer models do not have attention heads
594
0
    if (hparams.n_head() > 0) {
595
        // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
596
        // gpt-j n_rot = rotary_dim
597
598
0
        hparams.n_embd_head_k = hparams.n_embd / hparams.n_head();
599
0
        ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
600
601
0
        hparams.n_embd_head_v = hparams.n_embd / hparams.n_head();
602
0
        ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
603
604
        // sanity check for n_rot (optional)
605
0
        hparams.n_rot = hparams.n_embd_head_k;
606
607
0
        ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
608
609
0
        if (arch == LLM_ARCH_LLAMA || arch == LLM_ARCH_DECI || arch == LLM_ARCH_FALCON || arch == LLM_ARCH_LLAMA_EMBED) {
610
0
            if (hparams.n_rot != hparams.n_embd_head_k) {
611
0
                throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k));
612
0
            }
613
0
        }
614
0
    } else {
615
0
        hparams.n_rot = 0;
616
0
        hparams.n_embd_head_k = 0;
617
0
        hparams.n_embd_head_v = 0;
618
0
    }
619
620
    // for differentiating model types
621
0
    uint32_t n_vocab = 0;
622
0
    ml.get_key(LLM_KV_VOCAB_SIZE, n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, n_vocab, false);
623
624
    // for classifier models
625
0
    ml.get_arr(LLM_KV_CLASSIFIER_OUTPUT_LABELS, classifier_labels, false);
626
0
    if (!classifier_labels.empty()) {
627
0
        hparams.n_cls_out = classifier_labels.size();
628
0
    }
629
630
    // arch-specific KVs
631
0
    switch (arch) {
632
0
        case LLM_ARCH_LLAMA:
633
0
        case LLM_ARCH_LLAMA_EMBED:
634
0
            {
635
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
636
637
0
                if (hparams.n_expert == 8) {
638
0
                    switch (hparams.n_layer) {
639
0
                        case 32: type = LLM_TYPE_8x7B; break;
640
0
                        case 56: type = LLM_TYPE_8x22B; break;
641
0
                        default: type = LLM_TYPE_UNKNOWN;
642
0
                    }
643
0
                } else {
644
0
                    switch (hparams.n_layer) {
645
0
                        case 16: type = LLM_TYPE_1B; break; // Llama 3.2 1B
646
0
                        case 22: type = LLM_TYPE_1B; break;
647
0
                        case 26: type = LLM_TYPE_3B; break;
648
0
                        case 28: type = LLM_TYPE_3B; break; // Llama 3.2 3B
649
0
                        case 30: type = LLM_TYPE_256M; break; // smoldocling 256M
650
                        // granite uses a vocab with len 49152
651
0
                        case 32: type = n_vocab == 49152 ? LLM_TYPE_3B : (n_vocab < 40000 ? LLM_TYPE_7B : LLM_TYPE_8B); break;
652
0
                        case 36: type = LLM_TYPE_8B; break; // granite
653
0
                        case 40: type = LLM_TYPE_13B; break;
654
0
                        case 48: type = LLM_TYPE_34B; break;
655
0
                        case 60: type = LLM_TYPE_30B; break;
656
0
                        case 80: type = hparams.n_head() == hparams.n_head_kv() ? LLM_TYPE_65B : LLM_TYPE_70B; break;
657
0
                        default: type = LLM_TYPE_UNKNOWN;
658
0
                    }
659
0
                }
660
0
            } break;
661
0
        case LLM_ARCH_LLAMA4:
662
0
            {
663
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
664
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
665
0
                ml.get_key(LLM_KV_INTERLEAVE_MOE_LAYER_STEP,   hparams.n_moe_layer_step);
666
667
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
668
0
                if (found_swa && hparams.n_swa == 0) {
669
0
                    hparams.swa_type             = LLAMA_SWA_TYPE_NONE;
670
0
                    hparams.n_no_rope_layer_step = hparams.n_layer; // always use rope
671
0
                } else {
672
0
                    hparams.swa_type                = LLAMA_SWA_TYPE_CHUNKED;
673
0
                    hparams.n_swa                   = 8192;
674
0
                    hparams.n_attn_temp_floor_scale = 8192;
675
0
                    hparams.f_attn_temp_scale       = 0.1f;
676
0
                    hparams.f_attn_temp_offset      = 1.0f;
677
0
                    hparams.set_swa_pattern(4);   // pattern: 3 chunked - 1 full
678
679
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
680
0
                    hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
681
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
682
0
                }
683
684
0
                switch (hparams.n_expert) {
685
0
                    case 0: {
686
                        // MobileLLM (no MoE)
687
0
                        switch (hparams.n_embd) {
688
0
                            case 2048: type = LLM_TYPE_140M; break;
689
0
                            case 4096: type = LLM_TYPE_360M; break;
690
0
                            case 6144: type = LLM_TYPE_950M; break;
691
0
                            default:   type = LLM_TYPE_UNKNOWN;
692
0
                        }
693
0
                    } break;
694
0
                    case 16:  type = LLM_TYPE_17B_16E; break;
695
0
                    case 128: type = LLM_TYPE_17B_128E; break;
696
0
                    default:  type = LLM_TYPE_UNKNOWN;
697
0
                }
698
699
0
                hparams.use_kq_norm = type != LLM_TYPE_17B_128E;
700
0
            } break;
701
0
        case LLM_ARCH_ARCEE:
702
0
            {
703
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
704
705
                // Arcee uses the same structure as Llama
706
0
                switch (hparams.n_layer) {
707
0
                    case 36: type = LLM_TYPE_4B; break;
708
0
                    default: type = LLM_TYPE_UNKNOWN;
709
0
                }
710
0
            } break;
711
0
        case LLM_ARCH_AFMOE:
712
0
            {
713
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
714
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
715
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
716
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
717
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func, false);
718
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale, false);
719
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,         hparams.expert_weights_norm, false);
720
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa, false);
721
722
                // Set up interleaved sliding window attention (ISWA)
723
                // Pattern: 3 sliding - 1 full (global_attn_every_n_layers = 4)
724
0
                if (hparams.n_swa > 0) {
725
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
726
0
                    hparams.set_swa_pattern(4);
727
728
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
729
0
                    hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
730
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
731
0
                } else {
732
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
733
0
                }
734
735
                // Default to sigmoid if not set
736
0
                if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
737
0
                    hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID;
738
0
                }
739
740
0
                switch (hparams.n_layer) {
741
0
                    case 56: type = LLM_TYPE_6B; break;
742
0
                    case 32: type = LLM_TYPE_26B; break;
743
0
                    default: type = LLM_TYPE_UNKNOWN;
744
0
                }
745
0
            } break;
746
0
        case LLM_ARCH_DECI:
747
0
            {
748
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
749
0
                switch (hparams.n_layer) {
750
0
                    case 32: type = LLM_TYPE_7B; break;
751
0
                    case 80: type = LLM_TYPE_70B; break;
752
0
                    case 162: type = LLM_TYPE_405B; break;
753
0
                    default: type = LLM_TYPE_UNKNOWN;
754
0
                }
755
0
            } break;
756
0
        case LLM_ARCH_MINICPM:
757
0
            {
758
                // Backward-compatible defaults for older MiniCPM GGUFs
759
0
                hparams.f_embedding_scale = 12.0f;
760
0
                hparams.f_residual_scale  = 1.4f / sqrtf(float(hparams.n_layer));
761
0
                hparams.f_logit_scale     = hparams.n_embd ? (256.0f / float(hparams.n_embd)) : 1.0f;
762
763
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
764
765
                // Optional KV reads, override defaults if present in newer GGUF exports
766
0
                ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale, /*required=*/false);
767
0
                ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale, /*required=*/false);
768
0
                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale, /*required=*/false);
769
770
                // MiniCPM uses rope by default, unlike Granite which uses it as a switch
771
0
                hparams.rope_finetuned = true;
772
773
0
                switch (hparams.n_layer) {
774
0
                    case 52: type = LLM_TYPE_1B; break;
775
0
                    case 40: type = LLM_TYPE_2B; break;
776
0
                    default: type = LLM_TYPE_UNKNOWN;
777
0
                }
778
0
            } break;
779
0
        case LLM_ARCH_MINICPM3:
780
0
            {
781
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
782
0
                ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK,       hparams.n_lora_q);
783
0
                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK,      hparams.n_lora_kv);
784
785
0
                switch (hparams.n_layer) {
786
0
                    case 62: type = LLM_TYPE_4B; break;
787
0
                    default: type = LLM_TYPE_UNKNOWN;
788
0
                }
789
0
            } break;
790
0
        case LLM_ARCH_GROK:
791
0
            {
792
                // defaults for old GGUFs
793
0
                hparams.yarn_beta_fast = 8.0f;
794
0
                hparams.f_logit_scale = 0.5773502691896257f;
795
0
                hparams.f_embedding_scale = 78.38367176906169f;
796
0
                hparams.f_attn_out_scale = 0.08838834764831845f;
797
0
                hparams.f_attn_logit_softcapping = 30.0f;
798
0
                hparams.f_router_logit_softcapping = 30.0f;
799
                // no final_logit_softcapping in grok-1
800
0
                hparams.f_final_logit_softcapping = 0.0f;
801
802
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,  hparams.f_norm_rms_eps);
803
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,   hparams.n_ff_exp, false);
804
0
                ml.get_key(LLM_KV_LOGIT_SCALE,                  hparams.f_logit_scale, false);
805
0
                ml.get_key(LLM_KV_EMBEDDING_SCALE,              hparams.f_embedding_scale, false);
806
0
                ml.get_key(LLM_KV_ATTENTION_OUTPUT_SCALE,       hparams.f_attn_out_scale, false);
807
0
                ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING,       hparams.f_attn_logit_softcapping, false);
808
0
                ml.get_key(LLM_KV_ROUTER_LOGIT_SOFTCAPPING,     hparams.f_router_logit_softcapping, false);
809
0
                ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING,      hparams.f_final_logit_softcapping, false);
810
811
0
                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH,  hparams.attn_temp_length, false);
812
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_EXT_FACTOR,  hparams.yarn_ext_factor, false);
813
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_ATTN_FACTOR, hparams.yarn_attn_factor, false);
814
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_FAST,   hparams.yarn_beta_fast, false);
815
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_SLOW,   hparams.yarn_beta_slow, false);
816
817
0
                switch (hparams.n_layer) {
818
0
                    case 64: type = LLM_TYPE_314B; break;
819
0
                    default: type = LLM_TYPE_UNKNOWN;
820
0
                }
821
0
            } break;
822
0
        case LLM_ARCH_FALCON:
823
0
            {
824
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
825
826
0
                switch (hparams.n_layer) {
827
0
                    case 32: type = LLM_TYPE_7B; break;
828
0
                    case 60: type = LLM_TYPE_40B; break;
829
0
                    default: type = LLM_TYPE_UNKNOWN;
830
0
                }
831
0
            } break;
832
0
        case LLM_ARCH_BAICHUAN:
833
0
            {
834
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
835
0
                switch (hparams.n_layer) {
836
0
                    case 32: type = LLM_TYPE_7B; break;
837
0
                    case 40: type = LLM_TYPE_13B; break;
838
0
                    default: type = LLM_TYPE_UNKNOWN;
839
0
                }
840
841
0
                if (type == LLM_TYPE_13B) {
842
                    // TODO: become GGUF KV parameter
843
0
                    hparams.f_max_alibi_bias = 8.0f;
844
0
                }
845
0
            } break;
846
0
        case LLM_ARCH_STARCODER:
847
0
            {
848
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
849
0
                switch (hparams.n_layer) {
850
0
                    case 24: type = LLM_TYPE_1B; break;
851
0
                    case 36: type = LLM_TYPE_3B; break;
852
0
                    case 42: type = LLM_TYPE_7B; break;
853
0
                    case 40: type = LLM_TYPE_15B; break;
854
0
                    default: type = LLM_TYPE_UNKNOWN;
855
0
                }
856
0
            } break;
857
0
        case LLM_ARCH_REFACT:
858
0
            {
859
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
860
0
                switch (hparams.n_layer) {
861
0
                    case 32: type = LLM_TYPE_1B; break;
862
0
                    default: type = LLM_TYPE_UNKNOWN;
863
0
                }
864
865
                // TODO: become GGUF KV parameter
866
0
                hparams.f_max_alibi_bias = 8.0f;
867
0
            } break;
868
0
        case LLM_ARCH_BERT:
869
0
            {
870
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
871
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
872
0
                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
873
874
0
                switch (hparams.n_layer) {
875
0
                    case 3:
876
0
                        type = LLM_TYPE_17M; break; // bge-micro
877
0
                    case 6:
878
0
                        type = LLM_TYPE_22M; break; // MiniLM-L6
879
0
                    case 12:
880
0
                        switch (hparams.n_embd) {
881
0
                            case 384: type = LLM_TYPE_33M; break; // MiniLM-L12, bge-small
882
0
                            case 768: type = LLM_TYPE_109M; break; // bge-base
883
0
                            default: type = LLM_TYPE_UNKNOWN;
884
0
                        } break;
885
0
                    case 24:
886
0
                        type = LLM_TYPE_335M; break; // bge-large
887
0
                    default: type = LLM_TYPE_UNKNOWN;
888
0
                }
889
0
            } break;
890
0
        case LLM_ARCH_MODERN_BERT:
891
0
            {
892
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
893
0
                if (found_swa && hparams.n_swa > 0) {
894
0
                    uint32_t swa_period = 3;
895
0
                    hparams.swa_type = LLAMA_SWA_TYPE_SYMMETRIC;
896
897
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa);
898
0
                    ml.get_key_or_arr(LLM_KV_ATTENTION_SLIDING_WINDOW_PATTERN, swa_period, false);
899
0
                    hparams.set_swa_pattern(swa_period);
900
0
                } else {
901
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
902
0
                }
903
904
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
905
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,        hparams.causal_attn);
906
0
                ml.get_key(LLM_KV_POOLING_TYPE,            hparams.pooling_type, false);
907
908
0
                switch (hparams.n_layer) {
909
0
                    case 12:
910
0
                        type = LLM_TYPE_47M; break; // granite-embedding-small
911
0
                    case 22:
912
0
                        type = LLM_TYPE_149M; break; // modern-bert-base
913
0
                    case 28:
914
0
                        type = LLM_TYPE_395M; break; // modern-bert-large
915
0
                    default: type = LLM_TYPE_UNKNOWN;
916
0
                }
917
0
            } break;
918
0
        case LLM_ARCH_JINA_BERT_V2:
919
0
            {
920
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
921
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
922
0
                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
923
0
                hparams.f_max_alibi_bias = 8.0f;
924
925
0
                switch (hparams.n_layer) {
926
0
                    case 4:  type = LLM_TYPE_33M;  break; // jina-embeddings-small
927
0
                    case 12: type = LLM_TYPE_137M; break; // jina-embeddings-base
928
0
                    default: type = LLM_TYPE_UNKNOWN;
929
0
                }
930
0
            } break;
931
0
        case LLM_ARCH_JINA_BERT_V3:
932
0
            {
933
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
934
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
935
0
                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
936
937
0
                switch (hparams.n_layer) {
938
0
                    case 24:
939
0
                        type = LLM_TYPE_558M; break;
940
0
                    default: type = LLM_TYPE_UNKNOWN;
941
0
                }
942
0
            } break;
943
0
        case LLM_ARCH_NOMIC_BERT:
944
0
        case LLM_ARCH_NOMIC_BERT_MOE:
945
0
            {
946
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
947
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
948
0
                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type);
949
0
                ml.get_key(LLM_KV_MOE_EVERY_N_LAYERS,         hparams.moe_every_n_layers, 0);
950
951
0
                if (hparams.n_layer == 12 && hparams.n_embd == 768) {
952
0
                    if (arch == LLM_ARCH_NOMIC_BERT) {
953
0
                        type = LLM_TYPE_137M;
954
0
                    } else if (arch == LLM_ARCH_NOMIC_BERT_MOE && hparams.moe_every_n_layers == 2) {
955
0
                        type = LLM_TYPE_475M;
956
0
                    }
957
0
                }
958
0
            } break;
959
0
        case LLM_ARCH_NEO_BERT:
960
0
            {
961
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
962
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,            hparams.causal_attn);
963
0
                ml.get_key(LLM_KV_POOLING_TYPE,                hparams.pooling_type);
964
965
0
                if (hparams.n_layer == 28) {
966
0
                    type = LLM_TYPE_250M;
967
0
                }
968
0
            } break;
969
0
        case LLM_ARCH_BLOOM:
970
0
            {
971
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
972
973
0
                switch (hparams.n_layer) {
974
0
                    case 24: type = LLM_TYPE_1B; break;
975
0
                    case 30:
976
0
                        switch (hparams.n_embd) {
977
0
                            case 2560: type = LLM_TYPE_3B; break;
978
0
                            case 4096: type = LLM_TYPE_7B; break;
979
0
                            default: type = LLM_TYPE_UNKNOWN;
980
0
                        } break;
981
0
                    default: type = LLM_TYPE_UNKNOWN;
982
0
                }
983
984
                // TODO: become GGUF KV parameter
985
0
                hparams.f_max_alibi_bias = 8.0f;
986
0
            } break;
987
0
        case LLM_ARCH_MPT:
988
0
            {
989
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,  hparams.f_norm_eps);
990
0
                ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,      hparams.f_clamp_kqv, false);
991
0
                ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
992
993
0
                switch (hparams.n_layer) {
994
0
                    case 32: type = LLM_TYPE_7B; break;
995
0
                    case 48: type = LLM_TYPE_30B; break;
996
0
                    default: type = LLM_TYPE_UNKNOWN;
997
0
                }
998
0
            } break;
999
0
        case LLM_ARCH_STABLELM:
1000
0
            {
1001
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1002
1003
0
                switch (hparams.n_layer) {
1004
0
                    case 24: type = LLM_TYPE_1B; break;
1005
0
                    case 32: type = LLM_TYPE_3B; break;
1006
0
                    case 40: type = LLM_TYPE_12B; break;
1007
0
                    default: type = LLM_TYPE_UNKNOWN;
1008
0
               }
1009
0
            } break;
1010
0
        case LLM_ARCH_QWEN:
1011
0
            {
1012
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1013
1014
0
                switch (hparams.n_layer) {
1015
0
                    case 32: type = LLM_TYPE_7B; break;
1016
0
                    case 40: type = LLM_TYPE_13B; break;
1017
0
                    default: type = LLM_TYPE_UNKNOWN;
1018
0
                }
1019
0
            } break;
1020
0
        case LLM_ARCH_QWEN2VL:
1021
0
            {
1022
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
1023
0
            }
1024
            // fall through
1025
0
        case LLM_ARCH_QWEN2:
1026
0
            {
1027
0
                ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
1028
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1029
0
                switch (hparams.n_layer) {
1030
0
                    case 24: type = hparams.n_embd == 1024 ? LLM_TYPE_0_5B : LLM_TYPE_1B; break;
1031
0
                    case 28: type = hparams.n_embd == 1536 ? LLM_TYPE_1_5B : LLM_TYPE_7B; break;
1032
0
                    case 32: type = LLM_TYPE_7B; break;
1033
0
                    case 36: type = LLM_TYPE_3B; break;
1034
0
                    case 40: type = hparams.n_head() == 20 ? LLM_TYPE_4B : LLM_TYPE_13B; break;
1035
0
                    case 48: type = LLM_TYPE_14B; break;
1036
0
                    case 64: type = LLM_TYPE_32B; break;
1037
0
                    case 80: type = LLM_TYPE_70B; break;
1038
0
                    default: type = LLM_TYPE_UNKNOWN;
1039
0
                }
1040
0
            } break;
1041
0
        case LLM_ARCH_DREAM:
1042
0
            {
1043
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1044
                // Dream models are primarily 7B with 28 layers
1045
0
                switch (hparams.n_layer) {
1046
0
                    case 28:
1047
0
                        type = LLM_TYPE_7B;
1048
0
                        break;
1049
0
                    default:
1050
0
                        type = LLM_TYPE_UNKNOWN;
1051
0
                }
1052
                // Set non-causal attention for diffusion models
1053
0
                hparams.causal_attn = false;
1054
0
            }
1055
0
            break;
1056
0
        case LLM_ARCH_LLADA:
1057
0
            {
1058
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1059
                // LLaDA-8B has 32 layers, similar to LLaMA but for diffusion
1060
0
                switch (hparams.n_layer) {
1061
0
                    case 32:
1062
0
                        type = LLM_TYPE_8B;
1063
0
                        break;
1064
0
                    default:
1065
0
                        type = LLM_TYPE_UNKNOWN;
1066
0
                }
1067
                // Set non-causal attention for diffusion models
1068
0
                hparams.causal_attn = false;
1069
0
            }
1070
0
            break;
1071
0
        case LLM_ARCH_LLADA_MOE:
1072
0
            {
1073
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
1074
1075
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1076
                // diffusion language model uses non-causal attention
1077
0
                hparams.causal_attn = false;
1078
0
                switch (hparams.n_layer) {
1079
0
                    case 16: type = LLM_TYPE_A1_7B; break;
1080
0
                    default: type = LLM_TYPE_UNKNOWN;
1081
0
                }
1082
0
            } break;
1083
0
        case LLM_ARCH_RND1:
1084
0
            {
1085
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
1086
1087
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1088
0
                switch (hparams.n_layer) {
1089
0
                    case 48: type = LLM_TYPE_30B_A3B; break;
1090
0
                    default: type = LLM_TYPE_UNKNOWN;
1091
0
                }
1092
                // Set non-causal attention for diffusion models
1093
0
                hparams.causal_attn = false;
1094
0
            } break;
1095
0
        case LLM_ARCH_QWEN2MOE:
1096
0
            {
1097
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp, false);
1098
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
1099
1100
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1101
0
                switch (hparams.n_layer) {
1102
0
                    case 24: type = LLM_TYPE_A2_7B; break;
1103
0
                    case 28: type = LLM_TYPE_57B_A14B; break;
1104
0
                    default: type = LLM_TYPE_UNKNOWN;
1105
0
                }
1106
0
            } break;
1107
0
        case LLM_ARCH_QWEN3:
1108
0
            {
1109
0
                ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false);
1110
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1111
0
                switch (hparams.n_layer) {
1112
0
                    case 28: type = hparams.n_embd == 1024 ? LLM_TYPE_0_6B : LLM_TYPE_1_7B; break;
1113
0
                    case 36: type = hparams.n_embd == 2560 ? LLM_TYPE_4B : LLM_TYPE_8B; break;
1114
0
                    case 40: type = LLM_TYPE_14B; break;
1115
0
                    case 64: type = LLM_TYPE_32B; break;
1116
0
                    default: type = LLM_TYPE_UNKNOWN;
1117
0
                }
1118
0
            } break;
1119
0
        case LLM_ARCH_MAINCODER:
1120
0
            {
1121
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1122
0
                switch (hparams.n_layer) {
1123
0
                    case 32: type = LLM_TYPE_1B; break;
1124
0
                    default: type = LLM_TYPE_UNKNOWN;
1125
0
                }
1126
0
            } break;
1127
0
        case LLM_ARCH_QWEN3VL:
1128
0
            {
1129
0
                ml.get_key(LLM_KV_NUM_DEEPSTACK_LAYERS, hparams.n_deepstack_layers, false);
1130
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
1131
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1132
0
                switch (hparams.n_layer) {
1133
0
                    case 28: type = LLM_TYPE_1_7B; break;
1134
0
                    case 36: type = hparams.n_embd == 2560 ? LLM_TYPE_4B : LLM_TYPE_8B; break;
1135
0
                    case 64: type = LLM_TYPE_32B; break;
1136
0
                    default: type = LLM_TYPE_UNKNOWN;
1137
0
                }
1138
0
            } break;
1139
0
        case LLM_ARCH_QWEN3MOE:
1140
0
            {
1141
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp, false);
1142
1143
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1144
0
                switch (hparams.n_layer) {
1145
0
                    case 48: type = LLM_TYPE_30B_A3B; break;
1146
0
                    case 94: type = LLM_TYPE_235B_A22B; break;
1147
0
                    default: type = LLM_TYPE_UNKNOWN;
1148
0
                }
1149
0
            } break;
1150
0
        case LLM_ARCH_QWEN3VLMOE:
1151
0
            {
1152
0
                ml.get_key(LLM_KV_NUM_DEEPSTACK_LAYERS, hparams.n_deepstack_layers, false);
1153
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true);
1154
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
1155
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1156
0
                switch (hparams.n_layer) {
1157
0
                    case 48: type = LLM_TYPE_30B_A3B; break;
1158
0
                    case 94: type = LLM_TYPE_235B_A22B; break;
1159
0
                    default: type = LLM_TYPE_UNKNOWN;
1160
0
                }
1161
0
            } break;
1162
0
        case LLM_ARCH_PHI2:
1163
0
            {
1164
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1165
1166
0
                switch (hparams.n_layer) {
1167
0
                    case 24: type = LLM_TYPE_1B; break;
1168
0
                    case 32: type = LLM_TYPE_3B; break;
1169
0
                    default: type = LLM_TYPE_UNKNOWN;
1170
0
                }
1171
0
            } break;
1172
0
        case LLM_ARCH_PHI3:
1173
0
            {
1174
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1175
1176
0
                switch (hparams.n_layer) {
1177
0
                    case 24: type = LLM_TYPE_1B; break;
1178
0
                    case 32: type = LLM_TYPE_3B; break;
1179
0
                    case 40: type = LLM_TYPE_14B; break;
1180
0
                    default: type = LLM_TYPE_UNKNOWN;
1181
0
                }
1182
1183
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
1184
1185
0
                if (found_swa && hparams.n_swa > 0) {
1186
0
                    LLAMA_LOG_WARN("%s: Phi SWA is currently disabled - results might be suboptimal for some models (see %s)\n",
1187
0
                            __func__, "https://github.com/ggml-org/llama.cpp/pull/13676");
1188
1189
                    // TODO: fix conversion scripts to correctly populate `n_swa` and `n_swa_pattern`
1190
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
1191
1192
0
                    hparams.n_swa         = 0;
1193
0
                    hparams.set_swa_pattern(1);
1194
0
                }
1195
0
            } break;
1196
0
        case LLM_ARCH_PHIMOE:
1197
0
            {
1198
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1199
1200
0
                switch (hparams.n_layer) {
1201
0
                    case 32: type = LLM_TYPE_16x3_8B; break;
1202
0
                    default: type = LLM_TYPE_UNKNOWN;
1203
0
                }
1204
0
            } break;
1205
0
        case LLM_ARCH_PLAMO:
1206
0
            {
1207
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1208
1209
0
                switch (hparams.n_layer) {
1210
0
                    case 40: type = LLM_TYPE_13B; break;
1211
0
                    default: type = LLM_TYPE_UNKNOWN;
1212
0
               }
1213
0
            } break;
1214
0
        case LLM_ARCH_PLAMO2:
1215
0
            {
1216
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1217
1218
                // Load Mamba SSM parameters
1219
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1220
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1221
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1222
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1223
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
1224
1225
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
1226
0
                    hparams.recurrent_layer_arr[i] = hparams.n_head_kv(i) == 0;
1227
0
                }
1228
1229
0
                switch (hparams.n_layer) {
1230
0
                    case 16: type = LLM_TYPE_1B; break;
1231
0
                    case 32:
1232
0
                        if (hparams.n_embd == 2048) {
1233
0
                            type = LLM_TYPE_2B;
1234
0
                        } else if (hparams.n_embd == 4096) {
1235
0
                            type = LLM_TYPE_8B;
1236
0
                        }
1237
0
                        break;
1238
0
                    default: type = LLM_TYPE_UNKNOWN;
1239
0
                }
1240
1241
                // Load attention parameters
1242
0
                ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH,   hparams.n_embd_head_k, false);
1243
0
                ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
1244
0
            } break;
1245
0
        case LLM_ARCH_PLAMO3:
1246
0
            {
1247
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1248
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
1249
0
                if (found_swa && hparams.n_swa > 0) {
1250
0
                    uint32_t swa_period = 8;
1251
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1252
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa);
1253
0
                    ml.get_key_or_arr(LLM_KV_ATTENTION_SLIDING_WINDOW_PATTERN, swa_period, false);
1254
0
                    hparams.set_swa_pattern(swa_period);
1255
0
                } else {
1256
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
1257
0
                }
1258
1259
0
                switch (hparams.n_layer) {
1260
0
                    case 24: type = LLM_TYPE_2B; break;
1261
0
                    default: type = LLM_TYPE_UNKNOWN;
1262
0
                }
1263
0
            } break;
1264
0
        case LLM_ARCH_GPT2:
1265
0
            {
1266
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1267
0
                switch (hparams.n_layer) {
1268
0
                    case 12: type = LLM_TYPE_SMALL; break;
1269
0
                    case 24: type = LLM_TYPE_MEDIUM; break;
1270
0
                    case 36: type = LLM_TYPE_LARGE; break;
1271
0
                    case 48: type = LLM_TYPE_XL; break;
1272
0
                    default: type = LLM_TYPE_UNKNOWN;
1273
0
                }
1274
0
            } break;
1275
0
        case LLM_ARCH_CODESHELL:
1276
0
            {
1277
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1278
0
                switch (hparams.n_layer) {
1279
0
                    case 42: type = LLM_TYPE_7B; break;
1280
0
                    default: type = LLM_TYPE_UNKNOWN;
1281
0
                }
1282
0
            } break;
1283
0
        case LLM_ARCH_ORION:
1284
0
            {
1285
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1286
1287
0
                switch (hparams.n_layer) {
1288
0
                    case 40: type = LLM_TYPE_14B; break;
1289
0
                    default: type = LLM_TYPE_UNKNOWN;
1290
0
                }
1291
0
            } break;
1292
0
        case LLM_ARCH_INTERNLM2:
1293
0
            {
1294
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1295
0
                switch (hparams.n_layer) {
1296
0
                    case 32: type = LLM_TYPE_7B; break;
1297
0
                    case 48: type = LLM_TYPE_20B; break;
1298
0
                    default: type = LLM_TYPE_UNKNOWN;
1299
0
                }
1300
0
            } break;
1301
0
        case LLM_ARCH_GEMMA:
1302
0
            {
1303
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1304
1305
0
                switch (hparams.n_layer) {
1306
0
                    case 18: type = LLM_TYPE_2B; break;
1307
0
                    case 28: type = LLM_TYPE_7B; break;
1308
0
                    default: type = LLM_TYPE_UNKNOWN;
1309
0
               }
1310
0
            } break;
1311
0
        case LLM_ARCH_GEMMA2:
1312
0
            {
1313
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1314
0
                hparams.n_swa = 4096; // default value of gemma 2
1315
0
                hparams.set_swa_pattern(2);
1316
0
                hparams.attn_soft_cap = true;
1317
0
                hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
1318
0
                hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
1319
1320
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA,          hparams.rope_freq_base_train_swa, false);
1321
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa, false);
1322
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1323
0
                ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING,      hparams.f_attn_logit_softcapping, false);
1324
0
                ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING,     hparams.f_final_logit_softcapping, false);
1325
1326
0
                switch (hparams.n_layer) {
1327
0
                    case 26: type = LLM_TYPE_2B; break;
1328
0
                    case 42: type = LLM_TYPE_9B; break;
1329
0
                    case 46: type = LLM_TYPE_27B; break;
1330
0
                    default: type = LLM_TYPE_UNKNOWN;
1331
0
               }
1332
1333
                // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/config.py#L173
1334
0
                hparams.f_attention_scale = type == LLM_TYPE_27B
1335
0
                    ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0)))
1336
0
                    : 1.0f / std::sqrt(float(hparams.n_embd_head_k));
1337
0
            } break;
1338
0
        case LLM_ARCH_GEMMA3:
1339
0
            {
1340
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
1341
0
                if (found_swa && hparams.n_swa > 0) {
1342
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1343
0
                    hparams.set_swa_pattern(6);
1344
1345
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
1346
0
                } else {
1347
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
1348
0
                }
1349
1350
0
                hparams.f_final_logit_softcapping = 0.0f;
1351
0
                ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
1352
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1353
1354
0
                switch (hparams.n_layer) {
1355
0
                    case 18: type = LLM_TYPE_270M; break;
1356
0
                    case 26: type = LLM_TYPE_1B; break;
1357
0
                    case 32: type = LLM_TYPE_8B; break; // Rnj-1
1358
0
                    case 34: type = LLM_TYPE_4B; break;
1359
0
                    case 48: type = LLM_TYPE_12B; break;
1360
0
                    case 62: type = LLM_TYPE_27B; break;
1361
0
                    default: type = LLM_TYPE_UNKNOWN;
1362
0
                }
1363
1364
                // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/config.py#L289
1365
0
                hparams.f_attention_scale = type == LLM_TYPE_27B
1366
0
                    ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0)))
1367
0
                    : 1.0f / std::sqrt(float(hparams.n_embd_head_k));
1368
0
            } break;
1369
0
        case LLM_ARCH_GEMMA3N:
1370
0
            {
1371
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1372
0
                hparams.set_swa_pattern(5);
1373
1374
0
                hparams.n_layer_kv_from_start     = 20;
1375
0
                hparams.f_attention_scale         = 1.0f;
1376
1377
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA,          hparams.rope_freq_base_train_swa, false);
1378
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa);
1379
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1380
1381
0
                switch (hparams.n_layer) {
1382
0
                    case 30: type = LLM_TYPE_E2B; break;
1383
0
                    case 35: type = LLM_TYPE_E4B; break;
1384
0
                    default: type = LLM_TYPE_UNKNOWN;
1385
0
                }
1386
0
            } break;
1387
0
        case LLM_ARCH_GEMMA_EMBEDDING:
1388
0
            {
1389
0
                hparams.swa_type = LLAMA_SWA_TYPE_SYMMETRIC;
1390
0
                hparams.set_swa_pattern(6);
1391
1392
0
                hparams.causal_attn = false; // embeddings do not use causal attention
1393
1394
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
1395
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
1396
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1397
0
                ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
1398
1399
                //applied only if model converted with --sentence-transformers-dense-modules
1400
0
                ml.get_key(LLM_KV_DENSE_2_FEAT_IN, hparams.dense_2_feat_in, false);
1401
0
                ml.get_key(LLM_KV_DENSE_2_FEAT_OUT, hparams.dense_2_feat_out, false);
1402
0
                ml.get_key(LLM_KV_DENSE_3_FEAT_IN, hparams.dense_3_feat_in, false);
1403
0
                ml.get_key(LLM_KV_DENSE_3_FEAT_OUT, hparams.dense_3_feat_out, false);
1404
1405
0
                GGML_ASSERT((hparams.dense_2_feat_in == 0 || hparams.dense_2_feat_in == hparams.n_embd) && "dense_2_feat_in must be equal to n_embd");
1406
0
                GGML_ASSERT((hparams.dense_3_feat_out == 0 || hparams.dense_3_feat_out == hparams.n_embd) && "dense_3_feat_out must be equal to n_embd");
1407
1408
0
                switch (hparams.n_layer) {
1409
0
                    case 24: type = LLM_TYPE_0_3B; break;
1410
0
                    default: type = LLM_TYPE_UNKNOWN;
1411
0
                }
1412
0
                hparams.f_attention_scale = 1.0f / std::sqrt(float(hparams.n_embd_head_k));
1413
1414
0
            } break;
1415
0
        case LLM_ARCH_STARCODER2:
1416
0
            {
1417
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1418
0
                switch (hparams.n_layer) {
1419
0
                    case 30: type = LLM_TYPE_3B; break;
1420
0
                    case 32: type = LLM_TYPE_7B; break;
1421
0
                    case 40: type = LLM_TYPE_15B; break;
1422
0
                    case 52: type = LLM_TYPE_20B; break; // granite
1423
0
                    case 88: type = LLM_TYPE_34B; break; // granite
1424
0
                    default: type = LLM_TYPE_UNKNOWN;
1425
0
                }
1426
0
            } break;
1427
0
        case LLM_ARCH_MAMBA:
1428
0
            {
1429
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1430
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1431
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1432
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1433
0
                ml.get_key(LLM_KV_SSM_DT_B_C_RMS,     hparams.ssm_dt_b_c_rms, false);
1434
1435
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1436
1437
0
                switch (hparams.n_layer) {
1438
0
                    case 24:
1439
0
                        switch (hparams.n_embd) {
1440
0
                            case 768: type = LLM_TYPE_SMALL; break;
1441
0
                            default: type = LLM_TYPE_UNKNOWN;
1442
0
                        } break;
1443
0
                    case 48:
1444
0
                        switch (hparams.n_embd) {
1445
0
                            case 1024: type = LLM_TYPE_MEDIUM; break;
1446
0
                            case 1536: type = LLM_TYPE_LARGE; break;
1447
0
                            case 2048: type = LLM_TYPE_XL; break;
1448
0
                            default:   type = LLM_TYPE_UNKNOWN;
1449
0
                        } break;
1450
0
                    case 64:
1451
0
                        switch (hparams.n_embd) {
1452
0
                            case 2560: type = LLM_TYPE_3B; break;
1453
0
                            default: type = LLM_TYPE_UNKNOWN;
1454
0
                        } break;
1455
0
                    default: type = LLM_TYPE_UNKNOWN;
1456
0
                }
1457
0
            } break;
1458
0
        case LLM_ARCH_MAMBA2:
1459
0
            {
1460
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1461
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1462
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1463
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1464
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
1465
1466
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1467
1468
0
                switch (hparams.n_layer) {
1469
0
                    case 24:
1470
0
                        switch (hparams.n_embd) {
1471
0
                            case 768: type = LLM_TYPE_SMALL; break;
1472
0
                            default: type = LLM_TYPE_UNKNOWN;
1473
0
                        } break;
1474
0
                    case 48:
1475
0
                        switch (hparams.n_embd) {
1476
0
                            case 1024: type = LLM_TYPE_MEDIUM; break;
1477
0
                            case 1536: type = LLM_TYPE_LARGE; break;
1478
0
                            case 2048: type = LLM_TYPE_XL; break;
1479
0
                            default: type = LLM_TYPE_UNKNOWN;
1480
0
                        } break;
1481
0
                    case 64:
1482
0
                        switch (hparams.n_embd) {
1483
0
                            case 2560: type = LLM_TYPE_3B; break;
1484
0
                            case 4096: type = LLM_TYPE_7B; break;
1485
0
                            default: type = LLM_TYPE_UNKNOWN;
1486
0
                        } break;
1487
0
                    default: type = LLM_TYPE_UNKNOWN;
1488
0
                }
1489
0
            } break;
1490
0
        case LLM_ARCH_JAMBA:
1491
0
            {
1492
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1493
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1494
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1495
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1496
1497
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1498
1499
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
1500
0
                    hparams.recurrent_layer_arr[i] = hparams.n_head_kv(i) == 0;
1501
0
                }
1502
1503
0
                switch (hparams.n_layer) {
1504
                    // TODO: Jamba layers are a bit heterogenous, so naming this is hard.
1505
0
                    case 12: // 900M  8x???M
1506
0
                    case 32: // 51B  16x?B
1507
0
                    default: type = LLM_TYPE_UNKNOWN;
1508
0
                }
1509
0
            } break;
1510
0
        case LLM_ARCH_XVERSE:
1511
0
            {
1512
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1513
0
                switch (hparams.n_layer) {
1514
0
                    case 32: type = LLM_TYPE_7B; break;
1515
0
                    case 40: type = LLM_TYPE_13B; break;
1516
0
                    case 80: type = LLM_TYPE_65B; break;
1517
0
                    default: type = LLM_TYPE_UNKNOWN;
1518
0
                }
1519
0
            } break;
1520
0
        case LLM_ARCH_COMMAND_R:
1521
0
            {
1522
0
                ml.get_key(LLM_KV_LOGIT_SCALE,             hparams.f_logit_scale);
1523
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1524
0
                switch (hparams.n_layer) {
1525
0
                    case 40: type = LLM_TYPE_35B; break;
1526
0
                    default: type = LLM_TYPE_UNKNOWN;
1527
0
                }
1528
0
            } break;
1529
0
        case LLM_ARCH_COHERE2:
1530
0
            {
1531
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1532
0
                hparams.set_swa_pattern(4);
1533
0
                hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
1534
0
                hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
1535
1536
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA,       hparams.rope_freq_base_train_swa, false);
1537
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
1538
0
                ml.get_key(LLM_KV_LOGIT_SCALE,              hparams.f_logit_scale);
1539
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,  hparams.f_norm_eps);
1540
0
                switch (hparams.n_layer) {
1541
0
                    case 32: type = LLM_TYPE_8B; break;
1542
0
                    default: type = LLM_TYPE_UNKNOWN;
1543
0
                }
1544
0
            } break;
1545
0
        case LLM_ARCH_DBRX:
1546
0
        {
1547
0
            ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1548
0
            ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,     hparams.f_clamp_kqv);
1549
1550
0
            switch (hparams.n_layer) {
1551
0
                case 40: type = LLM_TYPE_16x12B; break;
1552
0
                default: type = LLM_TYPE_UNKNOWN;
1553
0
            }
1554
0
        } break;
1555
0
        case LLM_ARCH_OLMO:
1556
0
            {
1557
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1558
0
                ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,     hparams.f_clamp_kqv, false);
1559
1560
0
                switch (hparams.n_layer) {
1561
0
                    case 22: type = LLM_TYPE_1B; break;
1562
0
                    case 32: type = LLM_TYPE_7B; break;
1563
0
                    case 80: type = LLM_TYPE_70B; break;
1564
0
                    default: type = LLM_TYPE_UNKNOWN;
1565
0
                }
1566
0
            } break;
1567
0
        case LLM_ARCH_OLMO2:
1568
0
            {
1569
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1570
1571
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
1572
0
                if (found_swa && hparams.n_swa > 0) {
1573
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1574
0
                    hparams.set_swa_pattern(4);
1575
1576
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
1577
0
                    hparams.rope_freq_scale_train_swa = 1.0; // See olmo2.cpp
1578
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
1579
0
                } else {
1580
0
                    hparams.swa_type = LLAMA_SWA_TYPE_NONE;
1581
0
                }
1582
1583
0
                switch (hparams.n_layer) {
1584
0
                    case 16: type = LLM_TYPE_1B; break;
1585
0
                    case 32: type = LLM_TYPE_7B; break;
1586
0
                    case 40: type = LLM_TYPE_13B; break;
1587
0
                    case 64: type = LLM_TYPE_32B; break;
1588
0
                    default: type = LLM_TYPE_UNKNOWN;
1589
0
                }
1590
0
            } break;
1591
0
        case LLM_ARCH_SEED_OSS:
1592
0
            {
1593
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1594
0
                switch (hparams.n_layer) {
1595
0
                    case 64: type = LLM_TYPE_36B; break;
1596
0
                    default: type = LLM_TYPE_UNKNOWN;
1597
0
                }
1598
0
            } break;
1599
0
        case LLM_ARCH_OLMOE:
1600
0
            {
1601
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1602
0
                switch (hparams.n_layer) {
1603
0
                    case 16: type = LLM_TYPE_A1_7B; break;
1604
0
                    default: type = LLM_TYPE_UNKNOWN;
1605
0
                }
1606
0
            } break;
1607
0
        case LLM_ARCH_OPENELM:
1608
0
            {
1609
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1610
1611
0
                switch (hparams.n_layer) {
1612
0
                case 16: type = LLM_TYPE_270M; break;
1613
0
                case 20: type = LLM_TYPE_450M; break;
1614
0
                case 28: type = LLM_TYPE_1B; break;
1615
0
                case 36: type = LLM_TYPE_3B; break;
1616
0
                default: type = LLM_TYPE_UNKNOWN;
1617
0
                }
1618
0
            } break;
1619
0
        case LLM_ARCH_GPTNEOX:
1620
0
            {
1621
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1622
0
                ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL,   hparams.use_par_res);
1623
0
                switch (hparams.n_layer) {
1624
0
                    case 6:
1625
0
                        switch (hparams.n_ff()) {
1626
0
                            case 512:  type = LLM_TYPE_14M; break;
1627
0
                            case 2048: type = LLM_TYPE_70M; break;
1628
0
                            default:   type = LLM_TYPE_UNKNOWN;
1629
0
                        } break;
1630
0
                    case 12:
1631
0
                        switch (hparams.n_ff()) {
1632
0
                            case 3072: type = LLM_TYPE_160M; break;
1633
0
                            default: type = LLM_TYPE_UNKNOWN;
1634
0
                        } break;
1635
0
                    case 16:
1636
0
                        switch (hparams.n_ff()) {
1637
0
                            case 8192: type = LLM_TYPE_1B; break;
1638
0
                            default: type = LLM_TYPE_UNKNOWN;
1639
0
                        } break;
1640
0
                    case 24:
1641
0
                        switch (hparams.n_ff()) {
1642
0
                            case 4096: type = LLM_TYPE_410M; break;
1643
0
                            case 8192: type = LLM_TYPE_1_4B; break;
1644
0
                            default: type = LLM_TYPE_UNKNOWN;
1645
0
                        } break;
1646
0
                    case 32:
1647
0
                        switch (hparams.n_ff()) {
1648
0
                            case 10240: type = LLM_TYPE_2_8B; break;
1649
0
                            case 16384: type = LLM_TYPE_6_9B; break;
1650
0
                            default: type = LLM_TYPE_UNKNOWN;
1651
0
                        } break;
1652
0
                    case 36:
1653
0
                        switch (hparams.n_ff()) {
1654
0
                            case 20480: type = LLM_TYPE_12B; break;
1655
0
                            default: type = LLM_TYPE_UNKNOWN;
1656
0
                        } break;
1657
0
                    case 44:
1658
0
                        switch (hparams.n_ff()) {
1659
0
                            case 24576: type = LLM_TYPE_20B; break;
1660
0
                            default: type = LLM_TYPE_UNKNOWN;
1661
0
                        } break;
1662
0
                    default: type = LLM_TYPE_UNKNOWN;
1663
0
                }
1664
0
            } break;
1665
0
        case LLM_ARCH_ARCTIC:
1666
0
            {
1667
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1668
1669
0
                if (hparams.n_expert == 128) {
1670
0
                    switch (hparams.n_layer) {
1671
0
                        case 35: type = LLM_TYPE_10B_128x3_66B; break;
1672
0
                        default: type = LLM_TYPE_UNKNOWN;
1673
0
                    }
1674
0
                } else {
1675
0
                    type = LLM_TYPE_UNKNOWN;
1676
0
                }
1677
0
            } break;
1678
0
        case LLM_ARCH_DEEPSEEK:
1679
0
            {
1680
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1681
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
1682
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
1683
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
1684
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale);
1685
1686
0
                switch (hparams.n_ff_exp) {
1687
0
                    case 1408: type = LLM_TYPE_16B; break;
1688
0
                    case 1792: type = LLM_TYPE_20B; break;
1689
0
                    default: type = LLM_TYPE_UNKNOWN;
1690
0
                }
1691
0
            } break;
1692
0
        case LLM_ARCH_DEEPSEEK2:
1693
0
            {
1694
                // lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B
1695
0
                bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26);
1696
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1697
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
1698
0
                if (!is_lite) {
1699
0
                    ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
1700
0
                }
1701
0
                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK,     hparams.n_lora_kv);
1702
0
                ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH_MLA,   hparams.n_embd_head_k_mla, false);
1703
0
                ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH_MLA, hparams.n_embd_head_v_mla, false);
1704
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
1705
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,        hparams.n_expert_shared);
1706
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,       hparams.expert_weights_scale, false);
1707
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,        hparams.expert_weights_norm, false);
1708
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,         hparams.expert_gating_func, false);
1709
0
                if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
1710
                    // for compatibility with existing DeepSeek V2 and V2.5 GGUFs
1711
                    // that have no expert_gating_func model parameter set
1712
0
                    hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
1713
0
                }
1714
1715
0
                if (ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, 0.0f)) {
1716
                    // [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX]
1717
                    // cancel the factor from the convert script
1718
0
                    hparams.rope_yarn_log_mul /= 0.1f;
1719
0
                }
1720
1721
                // (optional) temperature tuning - used by mistral-large
1722
0
                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_SCALE,  hparams.f_attn_temp_scale,       false);
1723
0
                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_LENGTH, hparams.n_attn_temp_floor_scale, false);
1724
1725
0
                hparams.f_attn_temp_offset = 0.0f;
1726
1727
0
                switch (hparams.n_layer) {
1728
0
                    case 27: type = LLM_TYPE_16B; break;
1729
0
                    case 60: type = LLM_TYPE_236B; break;
1730
0
                    case 61: type = LLM_TYPE_671B; break;
1731
0
                    default: type = LLM_TYPE_UNKNOWN;
1732
0
                }
1733
0
            } break;
1734
0
        case LLM_ARCH_PLM:
1735
0
            {
1736
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1737
0
                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
1738
0
                switch (hparams.n_layer) {
1739
0
                    case 32: type = LLM_TYPE_1_8B; break;
1740
0
                    default: type = LLM_TYPE_UNKNOWN;
1741
0
                }
1742
0
            } break;
1743
0
        case LLM_ARCH_CHATGLM:
1744
0
            {
1745
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1746
0
                switch (hparams.n_layer) {
1747
0
                    case 28: {
1748
0
                        if (hparams.n_head(0) == 16) {
1749
0
                            type = LLM_TYPE_1_5B;
1750
0
                        } else {
1751
0
                            type = LLM_TYPE_6B;
1752
0
                        }
1753
0
                    } break;
1754
0
                    case 40: {
1755
0
                        if (hparams.n_head(0) == 24) {
1756
0
                            type = LLM_TYPE_4B;
1757
0
                        } else {
1758
0
                            type = LLM_TYPE_9B;
1759
0
                        }
1760
0
                    } break;
1761
0
                    default: type = LLM_TYPE_UNKNOWN;
1762
0
                }
1763
0
            } break;
1764
0
        case LLM_ARCH_GLM4:
1765
0
            {
1766
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,    hparams.f_norm_rms_eps);
1767
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, false);
1768
0
                switch (hparams.n_layer) {
1769
0
                    case 40: type = LLM_TYPE_9B; break;
1770
0
                    case 61: type = LLM_TYPE_32B; break;
1771
0
                    default: type = LLM_TYPE_UNKNOWN;
1772
0
                }
1773
0
            } break;
1774
0
        case LLM_ARCH_GLM4_MOE:
1775
0
            {
1776
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,     hparams.n_ff_exp);
1777
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,    hparams.f_norm_rms_eps);
1778
0
                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, false);
1779
1780
                // MoE parameters
1781
0
                ml.get_key(LLM_KV_EXPERT_COUNT,                hparams.n_expert);
1782
0
                ml.get_key(LLM_KV_EXPERT_USED_COUNT,           hparams.n_expert_used);
1783
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
1784
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead, false);
1785
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale);
1786
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,         hparams.expert_weights_norm, false);
1787
1788
                // Expert gating function (GLM-4.5 uses sigmoid)
1789
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func, false);
1790
0
                if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
1791
0
                    hparams.expert_gating_func =  LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID;
1792
0
                }
1793
1794
                // NextN/MTP parameters
1795
0
                ml.get_key(LLM_KV_NEXTN_PREDICT_LAYERS,        hparams.nextn_predict_layers, false);
1796
1797
                // TODO: when MTP is implemented, this should probably be updated if needed
1798
0
                hparams.n_layer_kv_from_start = hparams.n_layer - hparams.nextn_predict_layers;
1799
1800
0
                switch (hparams.n_layer) {
1801
0
                    case 47: type = LLM_TYPE_106B_A12B; break; // GLM-4.5-Air (46 layers + 1 NextN layer)
1802
0
                    case 48: type = LLM_TYPE_102B_A12B; break; // Solar Open
1803
0
                    case 93: type = LLM_TYPE_355B_A32B; break; // GLM-4.5 (92 layers + 1 NextN layer)
1804
0
                    default: type = LLM_TYPE_UNKNOWN;
1805
0
                }
1806
0
            } break;
1807
0
        case LLM_ARCH_BITNET:
1808
0
            {
1809
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1810
1811
0
                switch (hparams.n_layer) {
1812
0
                    case 26: type = LLM_TYPE_3B; break;
1813
0
                    default: type = LLM_TYPE_UNKNOWN;
1814
0
                }
1815
0
            } break;
1816
0
        case LLM_ARCH_T5:
1817
0
            {
1818
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,      hparams.f_norm_rms_eps);
1819
0
                ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
1820
1821
0
                uint32_t dec_start_token_id;
1822
0
                if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) {
1823
0
                    hparams.dec_start_token_id = dec_start_token_id;
1824
0
                }
1825
1826
0
                hparams.dec_n_layer = hparams.n_layer;
1827
0
                ml.get_key(LLM_KV_DECODER_BLOCK_COUNT, hparams.dec_n_layer, false);
1828
1829
0
                switch (hparams.n_layer) {
1830
0
                    case 6:  type = LLM_TYPE_60M;  break; // t5-small
1831
0
                    case 8:  type = LLM_TYPE_80M;  break; // flan-t5-small
1832
0
                    case 12:
1833
0
                        switch (hparams.n_ff()) {
1834
0
                            case 3072: type = LLM_TYPE_220M; break; // t5-base
1835
0
                            case 2048: type = LLM_TYPE_250M; break; // flan-t5-base
1836
0
                            default: type = LLM_TYPE_UNKNOWN;
1837
0
                        } break;
1838
0
                    case 24:
1839
0
                        switch (hparams.n_ff()) {
1840
0
                            case 4096:  type = LLM_TYPE_770M; break; // t5-large
1841
0
                            case 2816:  type = LLM_TYPE_780M; break; // flan-t5-large
1842
0
                            case 16384: type = LLM_TYPE_3B;   break; // t5-3b
1843
0
                            case 5120:  type = LLM_TYPE_3B;   break; // flan-t5-xl
1844
0
                            case 65536: type = LLM_TYPE_11B;  break; // t5-11b
1845
0
                            case 10240: type = LLM_TYPE_11B;  break; // flan-t5-xxl
1846
0
                            default: type = LLM_TYPE_UNKNOWN;
1847
0
                        } break;
1848
0
                    default: type = LLM_TYPE_UNKNOWN;
1849
0
               }
1850
0
            } break;
1851
0
        case LLM_ARCH_T5ENCODER:
1852
0
            {
1853
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1854
0
                ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
1855
0
                type = LLM_TYPE_UNKNOWN;
1856
0
            } break;
1857
0
        case LLM_ARCH_JAIS:
1858
0
            {
1859
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1860
0
                ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
1861
1862
0
                switch (hparams.n_layer) {
1863
0
                    case 24: type = LLM_TYPE_1_3B; break;
1864
0
                    case 40: type = LLM_TYPE_13B; break;
1865
                    /* TODO: add variants */
1866
0
                    default: type = LLM_TYPE_UNKNOWN;
1867
0
                }
1868
0
            } break;
1869
0
        case LLM_ARCH_NEMOTRON:
1870
0
            {
1871
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
1872
0
                switch (hparams.n_layer) {
1873
0
                    case 32: type = LLM_TYPE_4B; break;
1874
0
                    default: type = LLM_TYPE_UNKNOWN;
1875
0
                }
1876
0
            } break;
1877
0
        case LLM_ARCH_NEMOTRON_H:
1878
0
        case LLM_ARCH_NEMOTRON_H_MOE:
1879
0
            {
1880
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
1881
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
1882
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
1883
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
1884
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
1885
1886
                // A layer is recurrent IFF the n_head_kv value is set to 0 and
1887
                // the n_ff value is set to 0
1888
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
1889
0
                    hparams.recurrent_layer_arr[i] = (hparams.n_head_kv(i) == 0 && hparams.n_ff(i) == 0);
1890
0
                }
1891
1892
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1893
1894
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp,        false);
1895
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp,      false);
1896
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,               hparams.n_expert_shared, false);
1897
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,               hparams.expert_weights_norm, false);
1898
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,              hparams.expert_weights_scale, false);
1899
1900
0
                switch (hparams.n_layer) {
1901
0
                    case 52: type = LLM_TYPE_31B_A3_5B; break; // Nemotron-H_MOE 31B
1902
0
                    case 56: type = LLM_TYPE_9B; break;
1903
0
                    default: type = LLM_TYPE_UNKNOWN;
1904
0
                }
1905
0
            } break;
1906
0
        case LLM_ARCH_EXAONE:
1907
0
            {
1908
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1909
1910
0
                switch (hparams.n_layer) {
1911
0
                    case 32: type = LLM_TYPE_8B; break;
1912
0
                    default: type = LLM_TYPE_UNKNOWN;
1913
0
                }
1914
0
            } break;
1915
0
        case LLM_ARCH_EXAONE4:
1916
0
            {
1917
0
                if (hparams.n_layer == 64) {    // 32B
1918
0
                    hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
1919
0
                    hparams.n_swa = 4096;
1920
0
                    hparams.set_swa_pattern(4);
1921
1922
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
1923
0
                    hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
1924
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
1925
0
                }
1926
1927
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa, false);
1928
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
1929
1930
0
                switch (hparams.n_layer) {
1931
0
                    case 30: type = LLM_TYPE_1_2B; break;
1932
0
                    case 64: type = LLM_TYPE_32B; break;
1933
0
                    default: type = LLM_TYPE_UNKNOWN;
1934
0
                }
1935
0
            } break;
1936
0
        case LLM_ARCH_RWKV6:
1937
0
        case LLM_ARCH_RWKV6QWEN2:
1938
0
            {
1939
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,     hparams.f_norm_eps, false);
1940
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps, false);
1941
0
                ml.get_key(LLM_KV_WKV_HEAD_SIZE,               hparams.wkv_head_size);
1942
0
                ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM,          hparams.time_mix_extra_dim);
1943
0
                ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM,        hparams.time_decay_extra_dim);
1944
0
                ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS,      hparams.rescale_every_n_layers, false);
1945
0
                ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT,           hparams.token_shift_count, false);
1946
1947
0
                switch (hparams.n_layer) {
1948
0
                    case 24: type = LLM_TYPE_1_6B; break;
1949
0
                    case 32:
1950
0
                        switch (hparams.n_embd) {
1951
0
                            case 2560: type = LLM_TYPE_3B; break;
1952
0
                            case 4096: type = LLM_TYPE_7B; break;
1953
0
                            default: type = LLM_TYPE_UNKNOWN;
1954
0
                        } break;
1955
0
                    case 61: type = LLM_TYPE_14B; break;
1956
0
                    case 64: type = LLM_TYPE_32B; break;
1957
0
                    default: type = LLM_TYPE_UNKNOWN;
1958
0
                }
1959
0
            } break;
1960
0
        case LLM_ARCH_RWKV7:
1961
0
        case LLM_ARCH_ARWKV7:
1962
0
            {
1963
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,                hparams.f_norm_eps, false);
1964
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,            hparams.f_norm_rms_eps, false);
1965
0
                ml.get_key(LLM_KV_WKV_HEAD_SIZE,                          hparams.wkv_head_size);
1966
0
                ml.get_key(LLM_KV_ATTENTION_DECAY_LORA_RANK,              hparams.n_lora_decay);
1967
0
                ml.get_key(LLM_KV_ATTENTION_ICLR_LORA_RANK,               hparams.n_lora_iclr);
1968
0
                ml.get_key(LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, hparams.n_lora_value_res_mix);
1969
0
                ml.get_key(LLM_KV_ATTENTION_GATE_LORA_RANK,               hparams.n_lora_gate, false);
1970
0
                ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT,                      hparams.token_shift_count, false);
1971
1972
0
                switch (hparams.n_layer) {
1973
0
                    case 12:
1974
0
                        switch (hparams.n_embd) {
1975
0
                            case 768: type = LLM_TYPE_190M; break;
1976
0
                            default: type = LLM_TYPE_UNKNOWN;
1977
0
                        } break;
1978
0
                    case 24:
1979
0
                        switch (hparams.n_embd) {
1980
0
                            case 1024: type = LLM_TYPE_450M; break;
1981
0
                            case 2048: type = LLM_TYPE_1_5B; break;
1982
0
                            default: type = LLM_TYPE_UNKNOWN;
1983
0
                        } break;
1984
0
                    case 28:
1985
0
                        switch (hparams.n_embd) {
1986
0
                            case 1536: type = LLM_TYPE_1_5B; break;
1987
0
                            case 3584: type = LLM_TYPE_7B; break;
1988
0
                            default: type = LLM_TYPE_UNKNOWN;
1989
0
                        } break;
1990
0
                    case 32:
1991
0
                        switch (hparams.n_embd) {
1992
0
                            case 2560: type = LLM_TYPE_2_9B; break;
1993
0
                            case 4096: type = LLM_TYPE_7B; break;
1994
0
                            default: type = LLM_TYPE_UNKNOWN;
1995
0
                        } break;
1996
0
                    case 61:
1997
0
                        switch (hparams.n_embd) {
1998
0
                            case 4096: type = LLM_TYPE_14B; break;
1999
0
                            default: type = LLM_TYPE_UNKNOWN;
2000
0
                        } break;
2001
0
                    default: type = LLM_TYPE_UNKNOWN;
2002
0
                }
2003
0
            } break;
2004
0
        case LLM_ARCH_GRANITE:
2005
0
        case LLM_ARCH_GRANITE_MOE:
2006
0
            {
2007
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2008
0
                ml.get_key(LLM_KV_LOGIT_SCALE,                 hparams.f_logit_scale);
2009
0
                ml.get_key(LLM_KV_RESIDUAL_SCALE,              hparams.f_residual_scale);
2010
0
                ml.get_key(LLM_KV_EMBEDDING_SCALE,             hparams.f_embedding_scale);
2011
0
                ml.get_key(LLM_KV_ATTENTION_SCALE,             hparams.f_attention_scale);
2012
2013
                // Granite uses rope_finetuned as a switch for rope, so default to true
2014
0
                bool rope_finetuned = true;
2015
0
                ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
2016
0
                hparams.rope_finetuned = rope_finetuned;
2017
2018
0
                switch (hparams.n_layer) {
2019
0
                    case 32: type = LLM_TYPE_3B; break;
2020
0
                    case 40: type = LLM_TYPE_3B; break;
2021
                    // Add additional layer/vocab/etc checks here for other model sizes
2022
0
                    default: type = LLM_TYPE_UNKNOWN;
2023
0
                }
2024
2025
                // For Granite MoE Shared
2026
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, /* required */ false);
2027
0
            } break;
2028
0
        case LLM_ARCH_GRANITE_HYBRID:
2029
0
            {
2030
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2031
0
                ml.get_key(LLM_KV_LOGIT_SCALE,                 hparams.f_logit_scale, /* required */ false);
2032
0
                ml.get_key(LLM_KV_RESIDUAL_SCALE,              hparams.f_residual_scale, /* required */ false);
2033
0
                ml.get_key(LLM_KV_EMBEDDING_SCALE,             hparams.f_embedding_scale, /* required */ false);
2034
0
                ml.get_key(LLM_KV_ATTENTION_SCALE,             hparams.f_attention_scale, /* required */ false);
2035
2036
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
2037
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
2038
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
2039
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
2040
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
2041
2042
                // Granite uses rope_finetuned as a switch for rope, so default to true
2043
0
                bool rope_finetuned = true;
2044
0
                ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
2045
0
                hparams.rope_finetuned = rope_finetuned;
2046
2047
                // A layer is recurrent IFF the n_head_kv value is set to 0
2048
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
2049
0
                    hparams.recurrent_layer_arr[i] = hparams.n_head_kv(i) == 0;
2050
0
                }
2051
2052
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2053
2054
0
                switch (hparams.n_embd) {
2055
0
                    case 768: type = LLM_TYPE_350M; break;
2056
0
                    case 1536: type = (hparams.n_embd == 2048 ? LLM_TYPE_7B_A1B : LLM_TYPE_1B); break;
2057
0
                    case 2048: case 2560: type = LLM_TYPE_3B; break;
2058
0
                    case 4096: type = LLM_TYPE_32B; break;
2059
0
                    default: type = LLM_TYPE_UNKNOWN;
2060
0
                }
2061
2062
                // For Granite MoE Shared
2063
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, /* required */ false);
2064
0
            } break;
2065
0
        case LLM_ARCH_CHAMELEON:
2066
0
            {
2067
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2068
0
                hparams.f_norm_eps = 1e-5;  // eps for qk-norm, torch default
2069
0
                ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm);
2070
2071
0
                switch (hparams.n_layer) {
2072
0
                    case 32: type = LLM_TYPE_7B; break;
2073
0
                    case 48: type = LLM_TYPE_34B; break;
2074
0
                    default: type = LLM_TYPE_UNKNOWN;
2075
0
               }
2076
0
            } break;
2077
0
        case LLM_ARCH_WAVTOKENIZER_DEC:
2078
0
            {
2079
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
2080
0
                ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS,    hparams.f_norm_group_eps);
2081
0
                ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups);
2082
0
                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
2083
0
            } break;
2084
0
        case LLM_ARCH_BAILINGMOE:
2085
0
            {
2086
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2087
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
2088
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
2089
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
2090
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale);
2091
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,         hparams.expert_weights_norm, false);
2092
2093
0
                switch (hparams.n_layer) {
2094
0
                    case 28: type = LLM_TYPE_16B; break;
2095
0
                    case 88: type = LLM_TYPE_290B; break;
2096
0
                    default: type = LLM_TYPE_UNKNOWN;
2097
0
                }
2098
0
            } break;
2099
0
        case LLM_ARCH_BAILINGMOE2:
2100
0
            {
2101
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,       hparams.f_norm_rms_eps);
2102
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,         hparams.n_layer_dense_lead);
2103
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp);
2104
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp);
2105
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,               hparams.n_expert_shared);
2106
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,              hparams.expert_weights_scale);
2107
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,               hparams.expert_weights_norm, false);
2108
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,                hparams.expert_gating_func);
2109
0
                ml.get_key(LLM_KV_NEXTN_PREDICT_LAYERS,              hparams.nextn_predict_layers, false);
2110
2111
                // TODO: when MTP is implemented, this should probably be updated if needed
2112
0
                hparams.n_layer_kv_from_start = hparams.n_layer - hparams.nextn_predict_layers;
2113
2114
0
                switch (hparams.n_layer) {
2115
0
                    case 20: type = LLM_TYPE_16B_A1B; break;
2116
0
                    case 21: type = LLM_TYPE_16B_A1B; break;
2117
0
                    case 32: type = LLM_TYPE_100B_A6B; break;
2118
0
                    case 33: type = LLM_TYPE_100B_A6B; break;
2119
0
                    default: type = LLM_TYPE_UNKNOWN;
2120
0
                }
2121
0
            } break;
2122
0
        case LLM_ARCH_DOTS1:
2123
0
            {
2124
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2125
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
2126
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
2127
0
                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT,         hparams.n_expert_shared);
2128
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE,        hparams.expert_weights_scale);
2129
0
                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM,         hparams.expert_weights_norm, false);
2130
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func, false);
2131
0
                switch (hparams.n_layer) {
2132
0
                    case 62: type = LLM_TYPE_142B; break;
2133
0
                    default: type = LLM_TYPE_UNKNOWN;
2134
0
                }
2135
0
            } break;
2136
0
        case LLM_ARCH_ERNIE4_5:
2137
0
        case LLM_ARCH_ERNIE4_5_MOE:
2138
0
            {
2139
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2140
0
                if (arch == LLM_ARCH_ERNIE4_5_MOE) {
2141
0
                    ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp);
2142
0
                    ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
2143
0
                    ml.get_key(LLM_KV_INTERLEAVE_MOE_LAYER_STEP,         hparams.n_moe_layer_step);
2144
0
                    ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,         hparams.n_layer_dense_lead);
2145
0
                }
2146
2147
0
                switch (hparams.n_layer) {
2148
0
                    case 18: type = LLM_TYPE_0_3B; break;
2149
0
                    case 28: type = LLM_TYPE_21B_A3B; break;
2150
0
                    case 54: type = LLM_TYPE_300B_A47B; break;
2151
0
                    default: type = LLM_TYPE_UNKNOWN;
2152
0
                }
2153
0
            } break;
2154
0
        case LLM_ARCH_FALCON_H1:
2155
0
            {
2156
                // Common parameters
2157
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2158
2159
                // SSM parameters
2160
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
2161
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
2162
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
2163
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
2164
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
2165
2166
0
                std::fill(hparams.recurrent_layer_arr.begin(), hparams.recurrent_layer_arr.end(), true);
2167
2168
0
                switch (hparams.n_layer) {
2169
0
                    case 36:
2170
0
                        type = LLM_TYPE_0_5B; break;
2171
0
                    case 24:
2172
0
                        type = LLM_TYPE_1_5B; break;
2173
0
                    case 66:
2174
0
                        type = LLM_TYPE_1B; break;
2175
0
                    case 32:
2176
0
                        type = LLM_TYPE_3B; break;
2177
0
                    case 44:
2178
0
                        type = LLM_TYPE_7B; break;
2179
0
                    case 72:
2180
0
                        type = LLM_TYPE_34B; break;
2181
0
                    default:
2182
0
                        type = LLM_TYPE_UNKNOWN;
2183
0
                }
2184
0
            } break;
2185
0
        case LLM_ARCH_HUNYUAN_MOE:
2186
0
            {
2187
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,       hparams.f_norm_rms_eps);
2188
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp);
2189
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp);
2190
2191
0
                switch (hparams.n_layer) {
2192
0
                    case 32: type = LLM_TYPE_A13B; break;
2193
0
                    default: type = LLM_TYPE_UNKNOWN;
2194
0
                }
2195
0
            } break;
2196
0
        case LLM_ARCH_HUNYUAN_DENSE:
2197
0
            {
2198
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2199
2200
0
                switch (hparams.n_embd) {
2201
0
                    case 1024: type = LLM_TYPE_0_5B; break;
2202
0
                    case 2048: type = LLM_TYPE_1_8B; break;
2203
0
                    case 3072: type = LLM_TYPE_4B; break;
2204
0
                    case 4096: type = LLM_TYPE_7B; break;
2205
0
                    default: type = LLM_TYPE_UNKNOWN;
2206
0
                }
2207
0
            } break;
2208
0
        case LLM_ARCH_SMOLLM3:
2209
0
            {
2210
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2211
0
                hparams.n_no_rope_layer_step = 4;
2212
2213
0
                switch (hparams.n_layer) {
2214
0
                    case 36: type = LLM_TYPE_3B; break;
2215
0
                    default: type = LLM_TYPE_UNKNOWN;
2216
0
                }
2217
0
            } break;
2218
0
        case LLM_ARCH_OPENAI_MOE:
2219
0
            {
2220
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2221
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
2222
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,    hparams.n_swa);
2223
2224
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
2225
0
                hparams.set_swa_pattern(2);
2226
2227
0
                hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
2228
0
                hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
2229
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
2230
2231
0
                switch (hparams.n_layer) {
2232
0
                    case 24: type = LLM_TYPE_20B; break;
2233
0
                    case 36: type = LLM_TYPE_120B; break;
2234
0
                    default: type = LLM_TYPE_UNKNOWN;
2235
0
                }
2236
0
            } break;
2237
0
        case LLM_ARCH_LFM2:
2238
0
            {
2239
0
                ml.get_key(LLM_KV_SHORTCONV_L_CACHE,           hparams.n_shortconv_l_cache);
2240
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2241
0
                for (uint32_t il = 0; il < hparams.n_layer; ++il) {
2242
0
                    hparams.recurrent_layer_arr[il] = hparams.n_head_kv(il) == 0;
2243
0
                }
2244
0
                hparams.n_layer_dense_lead = hparams.n_layer;
2245
0
                switch (hparams.n_ff()) {
2246
0
                    case  4608: type = LLM_TYPE_350M; break;
2247
0
                    case  6912: type = LLM_TYPE_700M; break;
2248
0
                    case  8192: type = LLM_TYPE_1_2B; break;
2249
0
                    case 10752: type = LLM_TYPE_2_6B; break;
2250
0
                    default:    type = LLM_TYPE_UNKNOWN;
2251
0
                }
2252
0
            } break;
2253
0
        case LLM_ARCH_LFM2MOE:
2254
0
            {
2255
0
                ml.get_key(LLM_KV_SHORTCONV_L_CACHE,           hparams.n_shortconv_l_cache);
2256
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2257
0
                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT,   hparams.n_layer_dense_lead);
2258
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp);
2259
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func);
2260
2261
0
                for (uint32_t il = 0; il < hparams.n_layer; ++il) {
2262
0
                    hparams.recurrent_layer_arr[il] = hparams.n_head_kv(il) == 0;
2263
0
                }
2264
2265
0
                type = LLM_TYPE_8B_A1B;
2266
0
            } break;
2267
0
        case LLM_ARCH_SMALLTHINKER:
2268
0
            {
2269
0
                const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
2270
2271
0
                if (found_swa && hparams.n_swa > 0) {
2272
0
                    hparams.swa_type      = LLAMA_SWA_TYPE_STANDARD;
2273
0
                    hparams.n_swa         = 4096;
2274
0
                    hparams.set_swa_pattern(4, true);
2275
2276
0
                    hparams.rope_freq_base_train_swa  = hparams.rope_freq_base_train;
2277
0
                    hparams.rope_freq_scale_train_swa = hparams.rope_freq_scale_train;
2278
0
                    ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA, hparams.rope_freq_base_train_swa, false);
2279
0
                } else {
2280
0
                    hparams.swa_type             = LLAMA_SWA_TYPE_NONE;
2281
0
                    hparams.n_no_rope_layer_step = hparams.n_layer;
2282
0
                }
2283
2284
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,  hparams.n_ff_exp, false);
2285
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2286
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,          hparams.expert_gating_func, false);
2287
2288
0
                switch (hparams.n_layer) {
2289
0
                    case 32: type = LLM_TYPE_4B;  break;
2290
0
                    case 52: type = LLM_TYPE_20B; break;
2291
0
                    default: type = LLM_TYPE_UNKNOWN;
2292
0
                }
2293
0
            } break;
2294
0
        case LLM_ARCH_GROVEMOE:
2295
0
            {
2296
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp);
2297
0
                ml.get_key(LLM_KV_EXPERT_CHUNK_FEED_FORWARD_LENGTH,  hparams.n_ff_chexp);
2298
0
                ml.get_key(LLM_KV_EXPERT_GROUP_SCALE,                hparams.expert_group_scale);
2299
0
                ml.get_key(LLM_KV_EXPERTS_PER_GROUP,                 hparams.n_group_experts);
2300
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,       hparams.f_norm_rms_eps);
2301
2302
0
                switch (hparams.n_layer) {
2303
0
                    case 48: type = LLM_TYPE_30B_A3B; break;
2304
0
                    default: type = LLM_TYPE_UNKNOWN;
2305
0
                }
2306
0
            } break;
2307
0
        case LLM_ARCH_APERTUS:
2308
0
            {
2309
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2310
0
                ml.get_key_or_arr(LLM_KV_XIELU_ALPHA_N,        hparams.xielu_alpha_n, hparams.n_layer);
2311
0
                ml.get_key_or_arr(LLM_KV_XIELU_ALPHA_P,        hparams.xielu_alpha_p, hparams.n_layer);
2312
0
                ml.get_key_or_arr(LLM_KV_XIELU_BETA,           hparams.xielu_beta,    hparams.n_layer);
2313
0
                ml.get_key_or_arr(LLM_KV_XIELU_EPS,            hparams.xielu_eps,     hparams.n_layer);
2314
2315
0
                switch (hparams.n_layer) {
2316
0
                    case 32: type = LLM_TYPE_8B; break;
2317
0
                    default: type = LLM_TYPE_UNKNOWN;
2318
0
                }
2319
0
            } break;
2320
0
        case LLM_ARCH_MINIMAX_M2:
2321
0
            {
2322
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,  hparams.f_norm_rms_eps);
2323
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,   hparams.n_ff_exp);
2324
0
                ml.get_key(LLM_KV_EXPERT_GATING_FUNC,           hparams.expert_gating_func, false);
2325
2326
0
                switch (hparams.n_layer) {
2327
0
                    case 62: type = LLM_TYPE_230B_A10B; break;
2328
0
                    default: type = LLM_TYPE_UNKNOWN;
2329
0
                }
2330
0
            } break;
2331
0
        case LLM_ARCH_COGVLM:
2332
0
            {
2333
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2334
0
                switch (hparams.n_layer) {
2335
0
                    case 32: type = LLM_TYPE_13B; break;
2336
0
                    default: type = LLM_TYPE_UNKNOWN;
2337
0
                }
2338
0
            } break;
2339
0
        case LLM_ARCH_PANGU_EMBED:
2340
0
            {
2341
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2342
0
                switch (hparams.n_layer) {
2343
0
                    case 26: type = LLM_TYPE_1B; break; // openPangu-Embedded-1B-V1.1
2344
0
                    case 34: type = LLM_TYPE_7B; break; // openPangu-Embedded-7B-V1.1
2345
0
                    default: type = LLM_TYPE_UNKNOWN;
2346
0
                }
2347
0
            } break;
2348
0
        case LLM_ARCH_QWEN3NEXT:
2349
0
            {
2350
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        hparams.n_ff_exp, false);
2351
0
                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
2352
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,       hparams.f_norm_rms_eps);
2353
2354
                // Load linear attention (gated delta net) parameters
2355
0
                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
2356
0
                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
2357
0
                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
2358
0
                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
2359
0
                ml.get_key(LLM_KV_SSM_GROUP_COUNT,    hparams.ssm_n_group);
2360
2361
                // Mark recurrent layers (linear attention layers)
2362
0
                for (uint32_t i = 0; i < hparams.n_layer; ++i) {
2363
0
                    hparams.recurrent_layer_arr[i] = ((i + 1) % 4 != 0); // TODO: extract the magic 4 from "full_attention_interval"
2364
0
                }
2365
2366
0
                switch (hparams.n_layer) {
2367
0
                    case 48: type = LLM_TYPE_80B_A3B; break;
2368
0
                    default: type = LLM_TYPE_UNKNOWN;
2369
0
                }
2370
0
            } break;
2371
0
        case LLM_ARCH_MISTRAL3:
2372
0
            {
2373
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2374
0
                ml.get_key(LLM_KV_ATTENTION_TEMPERATURE_SCALE, hparams.f_attn_temp_scale, false);
2375
2376
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_FAST, hparams.yarn_beta_fast,    false);
2377
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_BETA_SLOW, hparams.yarn_beta_slow,    false);
2378
0
                ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL,   hparams.rope_yarn_log_mul, 0.0f);
2379
2380
0
                hparams.f_attn_temp_offset = 0.0f;
2381
2382
                // TODO: maybe add n_attn_temp_floor_scale as a separate KV?
2383
0
                if (hparams.f_attn_temp_scale != 0.0f) {
2384
0
                    hparams.n_attn_temp_floor_scale = hparams.n_ctx_orig_yarn;
2385
0
                    if (hparams.n_attn_temp_floor_scale == 0) {
2386
0
                        throw std::runtime_error("invalid n_ctx_orig_yarn for attention temperature scaling");
2387
0
                    }
2388
0
                }
2389
2390
0
                switch (hparams.n_layer) {
2391
0
                    case 26: type = LLM_TYPE_3B; break;
2392
0
                    case 34: type = LLM_TYPE_8B; break;
2393
0
                    case 40: type = LLM_TYPE_14B; break;
2394
0
                    default: type = LLM_TYPE_UNKNOWN;
2395
0
                }
2396
0
            } break;
2397
0
        case LLM_ARCH_MIMO2:
2398
0
            {
2399
0
                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2400
2401
0
                hparams.swa_type = LLAMA_SWA_TYPE_STANDARD;
2402
2403
0
                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
2404
0
                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW,   hparams.n_swa);
2405
0
                ml.get_key(LLM_KV_ROPE_FREQ_BASE_SWA,         hparams.rope_freq_base_train_swa);
2406
0
                ml.get_key_or_arr(LLM_KV_ATTENTION_SLIDING_WINDOW_PATTERN, hparams.swa_layers, hparams.n_layer);
2407
2408
0
                switch (hparams.n_layer) {
2409
0
                    case 48: type = LLM_TYPE_310B_A15B; break;
2410
0
                    default: type = LLM_TYPE_UNKNOWN;
2411
0
                }
2412
0
            } break;
2413
0
        default: throw std::runtime_error("unsupported model architecture");
2414
0
    }
2415
2416
0
    pimpl->n_bytes = ml.n_bytes;
2417
2418
0
    pimpl->desc_str = arch_name() + " " + type_name() + " " + ml.ftype_name();
2419
2420
0
    if (hparams.f_max_alibi_bias > 0.0f) {
2421
0
        hparams.use_alibi = true;
2422
0
    }
2423
2424
0
    hparams.rope_type = llama_model_rope_type(this);
2425
0
}
2426
2427
0
void llama_model::load_vocab(llama_model_loader & ml) {
2428
0
    const auto kv = LLM_KV(arch);
2429
2430
0
    vocab.load(ml, kv);
2431
0
}
2432
2433
0
bool llama_model::load_tensors(llama_model_loader & ml) {
2434
0
    const auto & split_mode   = params.split_mode;
2435
0
    const auto & use_mlock    = params.use_mlock;
2436
0
    const auto & tensor_split = params.tensor_split;
2437
2438
0
    const int n_layer      = hparams.n_layer;
2439
0
    const int n_gpu_layers = this->n_gpu_layers();
2440
2441
0
    const bool use_mmap_buffer = true;
2442
2443
0
    LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s, direct_io = %s)\n",
2444
0
        __func__, ml.use_mmap ? "true" : "false", ml.use_direct_io ? "true" : "false");
2445
2446
    // build a list of buffer types for the CPU and GPU devices
2447
0
    pimpl->cpu_buft_list = make_cpu_buft_list(devices, params.use_extra_bufts, params.no_host);
2448
0
    for (auto * dev : devices) {
2449
0
        buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split);
2450
        // add CPU buffer types as a fallback
2451
0
        buft_list.insert(buft_list.end(), pimpl->cpu_buft_list.begin(), pimpl->cpu_buft_list.end());
2452
0
        pimpl->gpu_buft_list.emplace(dev, std::move(buft_list));
2453
0
    }
2454
2455
0
    ggml_backend_dev_t cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
2456
0
    if (cpu_dev == nullptr) {
2457
0
        throw std::runtime_error(format("%s: no CPU backend found", __func__));
2458
0
    }
2459
2460
    // calculate the split points
2461
0
    bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + n_devices(), [](float x) { return x == 0.0f; });
2462
0
    std::vector<float> splits(n_devices());
2463
0
    if (all_zero) {
2464
        // default split, by free memory
2465
0
        for (size_t i = 0; i < n_devices(); ++i) {
2466
0
            ggml_backend_dev_t dev = devices[i];
2467
0
            size_t total;
2468
0
            size_t free;
2469
0
            ggml_backend_dev_memory(dev, &free, &total);
2470
2471
            // devices can return 0 bytes for free and total memory if they do not
2472
            // have any to report. in this case, we will use the host memory as a fallback
2473
            // fixes: https://github.com/ggml-org/llama.cpp/issues/18577
2474
0
            if (free == 0 && total == 0) {
2475
0
                ggml_backend_dev_memory(cpu_dev, &free, &total);
2476
0
            }
2477
0
            splits[i] = free;
2478
0
        }
2479
0
    } else {
2480
0
        std::copy(tensor_split, tensor_split + n_devices(), splits.begin());
2481
0
    }
2482
2483
    // sum and normalize the splits to get the split points
2484
0
    float split_sum = 0.0f;
2485
0
    for (size_t i = 0; i < n_devices(); ++i) {
2486
0
        split_sum += splits[i];
2487
0
        splits[i] = split_sum;
2488
0
    }
2489
0
    for (size_t i = 0; i < n_devices(); ++i) {
2490
0
        splits[i] /= split_sum;
2491
0
    }
2492
2493
0
    const int i_gpu_start = std::max(int(hparams.n_layer) + 1 - n_gpu_layers, 0);
2494
0
    const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, int(n_layer) + 1);
2495
0
    auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev {
2496
0
        const bool is_swa = il < int(hparams.n_layer) && hparams.is_swa(il);
2497
0
        if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) {
2498
0
            LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(cpu_dev), is_swa);
2499
0
            return {cpu_dev, &pimpl->cpu_buft_list};
2500
0
        }
2501
0
        const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin();
2502
0
        auto * dev = devices.at(layer_gpu);
2503
0
        LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s, is_swa = %d\n", il, ggml_backend_dev_name(dev), is_swa);
2504
0
        return {dev, &pimpl->gpu_buft_list.at(dev)};
2505
0
    };
2506
2507
    // assign the input layer
2508
    // there is very little benefit to offloading the input layer, so always keep it on the CPU
2509
0
    pimpl->dev_input = { cpu_dev, &pimpl->cpu_buft_list };
2510
2511
    // assign the repeating layers to the devices according to the splits
2512
0
    pimpl->dev_layer.resize(n_layer);
2513
0
    for (int il = 0; il < n_layer; ++il) {
2514
0
        pimpl->dev_layer[il] = get_layer_buft_list(il);
2515
0
    }
2516
2517
    // assign the output layer
2518
0
    pimpl->dev_output = get_layer_buft_list(n_layer);
2519
2520
    // one ggml context per buffer type
2521
0
    int max_n_tensors = ml.n_tensors;
2522
0
    max_n_tensors += 1;         // duplicated output tensor
2523
0
    max_n_tensors += n_layer*2; // duplicated rope freq tensors
2524
0
    const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors;
2525
2526
    // define a comparator for the buft -> ctx map to ensure that the order is well-defined:
2527
0
    struct ggml_backend_buft_comparator {
2528
0
        bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const {
2529
0
            return strcmp(ggml_backend_buft_name(lhs), ggml_backend_buft_name(rhs)) < 0;
2530
0
        }
2531
0
    };
2532
0
    std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map;
2533
2534
0
    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
2535
0
        auto it = ctx_map.find(buft);
2536
0
        if (it == ctx_map.end()) {
2537
0
            ggml_init_params params = {
2538
0
                /*.mem_size   =*/ ctx_size,
2539
0
                /*.mem_buffer =*/ NULL,
2540
0
                /*.no_alloc   =*/ true,
2541
0
            };
2542
2543
0
            ggml_context * ctx = ggml_init(params);
2544
0
            if (!ctx) {
2545
0
                throw std::runtime_error(format("failed to create ggml context"));
2546
0
            }
2547
2548
0
            ctx_map.emplace(buft, ctx);
2549
2550
0
            return ctx;
2551
0
        }
2552
0
        return it->second.get();
2553
0
    };
2554
2555
0
    const auto TENSOR_DUPLICATED   = llama_model_loader::TENSOR_DUPLICATED;
2556
0
    const auto TENSOR_NOT_REQUIRED = llama_model_loader::TENSOR_NOT_REQUIRED;
2557
0
    const auto TENSOR_SKIP         = llama_model_loader::TENSOR_SKIP;
2558
2559
    // create tensors for the weights
2560
0
    {
2561
        // note: cast to int64_t since we will use these for the tensor dimensions
2562
0
        const int64_t n_head        = hparams.n_head();
2563
0
        const int64_t n_head_kv     = hparams.n_head_kv();
2564
0
        const int64_t n_embd        = hparams.n_embd;
2565
0
        const int64_t n_embd_k_gqa  = hparams.n_embd_k_gqa();
2566
0
        const int64_t n_embd_v_gqa  = hparams.n_embd_v_gqa();
2567
0
        const int64_t n_embd_head_k = hparams.n_embd_head_k;
2568
0
        const int64_t n_embd_head_v = hparams.n_embd_head_v;
2569
0
        const int64_t n_ff          = hparams.n_ff();
2570
0
        const int64_t n_embd_gqa    = n_embd_v_gqa;
2571
0
        const int64_t n_vocab       = vocab.n_tokens();
2572
0
        const int64_t n_token_types = vocab.n_token_types();
2573
0
        const int64_t n_rot         = hparams.n_rot;
2574
0
        const int64_t n_expert      = hparams.n_expert;
2575
0
        const int64_t n_expert_used = hparams.n_expert_used;
2576
0
        const int64_t n_ctx_train   = hparams.n_ctx_train;
2577
2578
0
        if (n_expert > 0 && hparams.n_expert_used == 0) {
2579
0
            throw std::runtime_error("model has expert layers but no expert layers are used");
2580
0
        }
2581
2582
0
        int n_moved_tensors = 0;
2583
0
        ggml_tensor * first_moved_tensor = nullptr;
2584
0
        ggml_backend_buffer_type_t first_moved_from_buft = nullptr;
2585
0
        ggml_backend_buffer_type_t first_moved_to_buft = nullptr;
2586
2587
0
        auto create_tensor = [&](const LLM_TN_IMPL & tn, const std::initializer_list<int64_t> & ne, int flags) -> ggml_tensor * {
2588
0
            ggml_tensor * t_meta = ml.get_tensor_meta(tn.str().c_str());
2589
2590
0
            if (!t_meta) {
2591
0
                if (flags & TENSOR_NOT_REQUIRED) {
2592
0
                    return nullptr;
2593
0
                }
2594
0
                throw std::runtime_error(format("missing tensor '%s'", tn.str().c_str()));
2595
0
            }
2596
2597
            // some models use the token embedding tensor as the output, but since these are used in different layers and with different ops
2598
            // the tensor is duplicated
2599
            // to handle this, we check if the tensor is duplicated, and if so, we assume that it is being loaded as the output tensor
2600
0
            llm_tensor tn_tensor = tn.tensor;
2601
0
            if (tn.tensor == LLM_TENSOR_TOKEN_EMBD && flags & TENSOR_DUPLICATED) {
2602
0
                tn_tensor = LLM_TENSOR_OUTPUT;
2603
0
            }
2604
2605
0
            llm_tensor_info info;
2606
0
            try {
2607
0
                info = llm_tensor_info_for(tn_tensor);
2608
0
            } catch (const std::out_of_range & e) {
2609
0
                throw std::runtime_error(format("missing tensor info mapping for %s", tn.str().c_str()));
2610
0
            }
2611
2612
            // skip unused tensors
2613
0
            if (info.op == GGML_OP_NONE || flags & TENSOR_SKIP) {
2614
0
                const size_t nbytes = ggml_nbytes(t_meta);
2615
0
                LLAMA_LOG_WARN("model has unused tensor %s (size = %zu bytes) -- ignoring\n", tn.str().c_str(), nbytes);
2616
2617
0
                ml.size_data -= nbytes;
2618
0
                ml.n_created++;
2619
2620
0
                return nullptr;
2621
0
            }
2622
2623
            // tensors with "bias" suffix are always used with GGML_OP_ADD or GGML_OP_ADD_ID
2624
0
            ggml_op op;
2625
0
            bool bias = tn.suffix != nullptr && strcmp(tn.suffix, "bias") == 0;
2626
0
            if (bias) {
2627
0
                if (info.op == GGML_OP_MUL_MAT_ID) {
2628
0
                    op = GGML_OP_ADD_ID;
2629
0
                } else {
2630
0
                    op = GGML_OP_ADD;
2631
0
                }
2632
0
            } else {
2633
0
                op = info.op;
2634
0
            }
2635
2636
            // sanity checks
2637
0
            if (info.layer == LLM_TENSOR_LAYER_INPUT || info.layer == LLM_TENSOR_LAYER_OUTPUT) {
2638
0
                if (tn.bid != -1) {
2639
0
                    GGML_ABORT("input/output layer tensor %s used with a layer number", tn.str().c_str());
2640
0
                }
2641
0
            } else {
2642
0
                if (tn.bid == -1) {
2643
0
                    GGML_ABORT("repeating layer tensor %s used without a layer number", tn.str().c_str());
2644
0
                }
2645
0
            }
2646
2647
            // select the buffer type for this tensor
2648
0
            buft_list_t * buft_list;
2649
0
            switch (info.layer) {
2650
0
                case LLM_TENSOR_LAYER_INPUT:
2651
0
                    buft_list = pimpl->dev_input.buft_list;
2652
0
                    break;
2653
0
                case LLM_TENSOR_LAYER_OUTPUT:
2654
0
                    buft_list = pimpl->dev_output.buft_list;
2655
0
                    break;
2656
0
                case LLM_TENSOR_LAYER_REPEATING:
2657
0
                    buft_list = pimpl->dev_layer.at(tn.bid).buft_list;
2658
0
                    break;
2659
0
                default:
2660
0
                    GGML_ABORT("invalid layer %d for tensor %s", info.layer, tn.str().c_str());
2661
0
            }
2662
2663
0
            ggml_backend_buffer_type_t buft = nullptr;
2664
2665
            // check overrides
2666
0
            if (ml.tensor_buft_overrides) {
2667
0
                std::string tensor_name = tn.str();
2668
0
                for (const auto * overrides = ml.tensor_buft_overrides; overrides->pattern != nullptr; ++overrides) {
2669
0
                    std::regex pattern(overrides->pattern);
2670
0
                    if (std::regex_search(tensor_name, pattern)) {
2671
0
                        if (overrides->buft == ggml_backend_cpu_buffer_type()) {
2672
                            // when overriding to a CPU buffer, consider the extra buffer types
2673
0
                            buft = select_weight_buft(hparams, t_meta, op, pimpl->cpu_buft_list);
2674
0
                        } else {
2675
0
                            buft = overrides->buft;
2676
0
                        }
2677
2678
0
                        LLAMA_LOG_DEBUG("tensor %s (%zu MiB %s) buffer type overridden to %s\n",
2679
0
                                tensor_name.c_str(),
2680
0
                                ggml_nbytes(t_meta) / 1024 / 1024, ggml_type_name(t_meta->type),
2681
0
                                ggml_backend_buft_name(buft));
2682
0
                        break;
2683
0
                    }
2684
0
                }
2685
0
            }
2686
2687
0
            if (!buft) {
2688
0
                buft = select_weight_buft(hparams, t_meta, op, *buft_list);
2689
0
                if (!buft) {
2690
0
                    throw std::runtime_error(format("failed to find a compatible buffer type for tensor %s", tn.str().c_str()));
2691
0
                }
2692
0
            }
2693
2694
            // avoid using a host buffer when using mmap
2695
0
            auto * buft_dev = ggml_backend_buft_get_device(buft);
2696
0
            if (ml.use_mmap && buft_dev && buft == ggml_backend_dev_host_buffer_type(buft_dev)) {
2697
0
                auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
2698
0
                if (!cpu_dev) {
2699
0
                    throw std::runtime_error("no CPU backend found");
2700
0
                }
2701
0
                buft = ggml_backend_dev_buffer_type(cpu_dev);
2702
0
            }
2703
2704
0
            if (buft != buft_list->front().second) {
2705
0
                n_moved_tensors++;
2706
0
                if (!first_moved_tensor) {
2707
0
                    first_moved_tensor = t_meta;
2708
0
                    first_moved_from_buft = buft_list->front().second;
2709
0
                    first_moved_to_buft   = buft;
2710
0
                }
2711
0
            }
2712
2713
0
            ggml_context * ctx = ctx_for_buft(buft);
2714
2715
            // if duplicated, check if the original tensor was allocated in the same buffer type context and avoid creating a new one
2716
0
            if (flags & TENSOR_DUPLICATED) {
2717
0
                ggml_tensor * t = ggml_get_tensor(ctx, tn.str().c_str());
2718
0
                if (t) {
2719
0
                    return t;
2720
0
                }
2721
0
            }
2722
0
            return ml.create_tensor(ctx, tn, ne, flags);
2723
0
        };
2724
2725
0
        layers.resize(n_layer);
2726
2727
        // TODO: move to a separate function
2728
0
        const auto tn = LLM_TN(arch);
2729
0
        switch (arch) {
2730
0
            case LLM_ARCH_LLAMA:
2731
0
            case LLM_ARCH_REFACT:
2732
0
            case LLM_ARCH_MINICPM:
2733
0
            case LLM_ARCH_GRANITE:
2734
0
            case LLM_ARCH_GRANITE_MOE:
2735
0
            case LLM_ARCH_MISTRAL3:
2736
0
            case LLM_ARCH_LLAMA_EMBED:
2737
0
                {
2738
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
2739
2740
                    // output
2741
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2742
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
2743
2744
                    // if output is NULL, init from the input tok embed
2745
0
                    if (output == NULL) {
2746
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
2747
0
                    }
2748
2749
0
                    for (int i = 0; i < n_layer; ++i) {
2750
0
                        auto & layer = layers[i];
2751
2752
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2753
2754
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
2755
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
2756
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
2757
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
2758
2759
                        // optional bias tensors
2760
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
2761
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
2762
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
2763
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
2764
2765
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
2766
2767
0
                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
2768
0
                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2769
0
                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2770
0
                        }
2771
0
                        else {
2772
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2773
0
                        }
2774
2775
0
                        if (n_expert == 0) {
2776
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
2777
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
2778
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
2779
2780
                            // optional MLP bias
2781
0
                            layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
2782
0
                            layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
2783
0
                            layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
2784
0
                        } else {
2785
0
                            layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
2786
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, TENSOR_NOT_REQUIRED);
2787
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
2788
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
2789
2790
                            // For Granite MoE Shared
2791
0
                            if (hparams.n_ff_shexp > 0) {
2792
0
                                layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
2793
0
                                layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
2794
0
                                layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0);
2795
0
                            }
2796
0
                        }
2797
0
                    }
2798
0
                } break;
2799
0
            case LLM_ARCH_LLADA:
2800
0
                {
2801
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
2802
2803
                    // output
2804
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
2805
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED);
2806
2807
                    // if output is NULL, init from the input tok embed
2808
0
                    if (output == NULL) {
2809
0
                        output =
2810
0
                            create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, TENSOR_DUPLICATED);
2811
0
                    }
2812
2813
0
                    for (int i = 0; i < n_layer; ++i) {
2814
0
                        auto & layer = layers[i];
2815
2816
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
2817
2818
                        // Use separate Q, K, V projections without bias, matching LLaDALlamaBlock
2819
0
                        layer.wq =
2820
0
                            create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
2821
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_k_gqa }, 0);
2822
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_v_gqa }, 0);
2823
                        // No bias for QKV projections as per config: include_bias=false, include_qkv_bias=false
2824
0
                        layer.wo =
2825
0
                            create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
2826
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, TENSOR_NOT_REQUIRED);
2827
2828
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
2829
2830
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), { n_rot / 2 },
2831
0
                                                         TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2832
2833
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0);
2834
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
2835
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
2836
2837
                        // optional MLP bias
2838
0
                        layer.ffn_gate_b =
2839
0
                            create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), { n_ff }, TENSOR_NOT_REQUIRED);
2840
0
                        layer.ffn_down_b =
2841
0
                            create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), { n_embd }, TENSOR_NOT_REQUIRED);
2842
0
                        layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), { n_ff }, TENSOR_NOT_REQUIRED);
2843
0
                    }
2844
0
                }
2845
0
                break;
2846
0
            case LLM_ARCH_LLADA_MOE:
2847
0
                {
2848
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
2849
2850
                    // output
2851
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2852
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
2853
2854
0
                    GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for llada-moe");
2855
0
                    GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for llada-moe");
2856
2857
0
                    for (int i = 0; i < n_layer; ++i) {
2858
0
                        auto & layer = layers[i];
2859
2860
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2861
2862
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
2863
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
2864
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
2865
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
2866
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
2867
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
2868
2869
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
2870
2871
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
2872
2873
0
                        const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
2874
2875
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
2876
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
2877
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
2878
0
                    }
2879
0
                } break;
2880
0
            case LLM_ARCH_LLAMA4:
2881
0
                {
2882
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
2883
2884
                    // output
2885
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2886
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
2887
2888
                    // if output is NULL, init from the input tok embed
2889
0
                    if (output == NULL) {
2890
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
2891
0
                    }
2892
2893
0
                    for (int i = 0; i < n_layer; ++i) {
2894
0
                        bool is_moe_layer = hparams.n_moe_layer_step > 0 && (i + 1) % hparams.n_moe_layer_step == 0;
2895
2896
0
                        auto & layer = layers[i];
2897
2898
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2899
2900
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
2901
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
2902
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
2903
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
2904
2905
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
2906
2907
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2908
2909
0
                        if (is_moe_layer) {
2910
0
                            int n_ff_exp = hparams.n_ff_exp;
2911
2912
0
                            layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
2913
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff_exp, n_expert}, 0);
2914
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff_exp, n_embd, n_expert}, 0);
2915
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff_exp, n_expert}, 0);
2916
2917
                            // Shared expert
2918
0
                            const int64_t n_ff_shexp = n_ff_exp;
2919
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {    n_embd, n_ff_shexp}, 0);
2920
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd    }, 0);
2921
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {    n_embd, n_ff_shexp}, 0);
2922
0
                        } else {
2923
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
2924
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
2925
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
2926
0
                        }
2927
0
                    }
2928
0
                } break;
2929
0
            case LLM_ARCH_DECI:
2930
0
                {
2931
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
2932
2933
                    // output
2934
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
2935
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
2936
2937
                    // if output is NULL, init from the input tok embed
2938
0
                    if (output == NULL) {
2939
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
2940
0
                    }
2941
2942
0
                    for (int i = 0; i < n_layer; ++i) {
2943
0
                        auto & layer = layers[i];
2944
0
                        const int64_t n_embd_k_gqa  = hparams.n_embd_k_gqa(i);
2945
0
                        const int64_t n_embd_v_gqa  = hparams.n_embd_v_gqa(i);
2946
0
                        const int64_t n_embd_gqa    = hparams.n_embd_v_gqa(i);
2947
0
                        const int64_t n_ff          = hparams.n_ff(i);
2948
0
                        const int64_t n_head        = hparams.n_head(i);
2949
0
                        const int64_t n_head_kv     = hparams.n_head_kv(i);
2950
2951
0
                        if (n_head_kv == 0 && n_head > 0) {
2952
                            // linear attention for DeciLMCausalModel
2953
0
                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2954
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
2955
0
                        }
2956
0
                        else if (n_head_kv > 0) {
2957
0
                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
2958
2959
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
2960
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
2961
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
2962
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
2963
0
                        }
2964
2965
                        // optional bias tensors
2966
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
2967
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
2968
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
2969
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
2970
2971
0
                        if (n_ff > 0) {
2972
0
                            layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
2973
0
                        }
2974
2975
0
                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
2976
0
                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2977
0
                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2978
0
                        }
2979
0
                        else {
2980
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
2981
0
                        }
2982
2983
0
                        if (n_ff > 0) {
2984
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
2985
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
2986
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
2987
0
                        }
2988
2989
                        // optional MLP bias
2990
0
                        layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
2991
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
2992
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
2993
0
                    }
2994
0
                } break;
2995
0
            case LLM_ARCH_MINICPM3:
2996
0
                {
2997
0
                    const int64_t n_embd_head_qk_rope = hparams.n_rot;
2998
0
                    const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
2999
3000
0
                    const int64_t q_lora_rank  = hparams.n_lora_q;
3001
0
                    const int64_t kv_lora_rank = hparams.n_lora_kv;
3002
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3003
3004
                    // output
3005
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3006
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3007
3008
                    // if output is NULL, init from the input tok embed
3009
0
                    if (output == NULL) {
3010
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3011
0
                    }
3012
3013
0
                    for (int i = 0; i < n_layer; ++i) {
3014
0
                        auto & layer = layers[i];
3015
3016
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3017
0
                        layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
3018
3019
0
                        layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
3020
3021
0
                        layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
3022
0
                        layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k}, 0);
3023
3024
0
                        layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
3025
0
                        layer.wkv_b     = create_tensor(tn(LLM_TENSOR_ATTN_KV_B,     "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
3026
0
                        layer.wo        = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {              n_head * (                      n_embd_head_v), n_embd}, 0);
3027
3028
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3029
3030
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3031
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3032
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3033
3034
0
                        layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3035
0
                        layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3036
0
                    }
3037
0
                } break;
3038
0
            case LLM_ARCH_GROK:
3039
0
                {
3040
0
                    if (n_expert == 0) {
3041
0
                        throw std::runtime_error("Grok model cannot have zero experts");
3042
0
                    }
3043
3044
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3045
3046
                    // output
3047
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3048
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3049
3050
                    // if output is NULL, init from the input tok embed
3051
0
                    if (output == NULL) {
3052
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3053
0
                    }
3054
3055
0
                    const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff/* / n_expert_used*/; // grok-1 n_ff_exp == n_ff
3056
0
                    for (int i = 0; i < n_layer; ++i) {
3057
0
                        auto & layer = layers[i];
3058
3059
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3060
3061
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3062
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3063
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3064
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3065
3066
0
                        layer.attn_out_norm   = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
3067
3068
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3069
3070
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
3071
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff,   n_embd}, TENSOR_NOT_REQUIRED);
3072
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
3073
3074
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
3075
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff_exp, n_expert}, TENSOR_NOT_REQUIRED);
3076
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd,   n_expert}, 0);
3077
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff_exp, n_expert}, 0);
3078
3079
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3080
0
                        if (!layer.ffn_post_norm) {
3081
0
                            layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
3082
0
                        }
3083
0
                    }
3084
0
                } break;
3085
0
            case LLM_ARCH_DBRX:
3086
0
                {
3087
0
                    if (n_expert == 0) {
3088
0
                        throw std::runtime_error("DBRX model cannot have zero experts");
3089
0
                    }
3090
3091
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3092
3093
                    // output
3094
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3095
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3096
3097
0
                    for (int i = 0; i < n_layer; ++i) {
3098
0
                        auto & layer = layers[i];
3099
3100
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3101
3102
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3103
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3104
3105
0
                        layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
3106
3107
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
3108
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
3109
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
3110
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
3111
0
                    }
3112
0
                } break;
3113
0
            case LLM_ARCH_BAICHUAN:
3114
0
                {
3115
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3116
0
                    {
3117
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3118
0
                        output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3119
0
                    }
3120
3121
0
                    for (int i = 0; i < n_layer; ++i) {
3122
0
                        auto & layer = layers[i];
3123
3124
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3125
3126
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3127
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3128
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3129
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3130
3131
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3132
3133
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3134
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3135
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3136
0
                    }
3137
0
                } break;
3138
0
            case LLM_ARCH_FALCON:
3139
0
                {
3140
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3141
3142
                    // output
3143
0
                    {
3144
0
                        output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3145
0
                        output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3146
3147
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3148
0
                        if (!output) {
3149
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU
3150
0
                        }
3151
0
                    }
3152
3153
0
                    for (int i = 0; i < n_layer; ++i) {
3154
0
                        auto & layer = layers[i];
3155
3156
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3157
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
3158
3159
0
                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3160
0
                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3161
3162
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3163
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3164
3165
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3166
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3167
0
                    }
3168
0
                } break;
3169
0
            case LLM_ARCH_STARCODER:
3170
0
                {
3171
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3172
0
                    pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,   "weight"), {n_embd, n_ctx_train}, 0);
3173
3174
                    // output
3175
0
                    {
3176
0
                        output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3177
0
                        output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3178
0
                        output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3179
0
                        if (!output) {
3180
                            // needs to be on GPU
3181
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3182
0
                        }
3183
3184
0
                    }
3185
3186
0
                    for (int i = 0; i < n_layer; ++i) {
3187
0
                        auto & layer = layers[i];
3188
3189
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3190
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
3191
3192
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3193
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
3194
3195
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3196
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
3197
3198
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3199
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
3200
3201
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3202
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
3203
3204
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i),   {n_embd, n_ff}, 0);
3205
0
                        layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i),     {n_ff}, 0);
3206
0
                    }
3207
0
                } break;
3208
0
            case LLM_ARCH_BERT:
3209
0
            case LLM_ARCH_NOMIC_BERT:
3210
0
            case LLM_ARCH_NOMIC_BERT_MOE:
3211
0
            case LLM_ARCH_JINA_BERT_V3:
3212
0
                {
3213
0
                    tok_embd     = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, 0);
3214
0
                    type_embd    = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, TENSOR_NOT_REQUIRED);
3215
3216
0
                    if (arch == LLM_ARCH_BERT) {
3217
0
                        pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,    "weight"), {n_embd, n_ctx_train}, 0);
3218
3219
0
                        cls   = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED);
3220
0
                        cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"),   {n_embd},         TENSOR_NOT_REQUIRED);
3221
3222
0
                        cls_out   = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
3223
0
                        cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"),   {hparams.n_cls_out},         TENSOR_NOT_REQUIRED);
3224
0
                    }
3225
3226
0
                    tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
3227
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {n_embd}, 0);
3228
3229
0
                    for (int i = 0; i < n_layer; ++i) {
3230
0
                        auto & layer = layers[i];
3231
3232
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3233
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3234
3235
0
                        if (!layer.wqkv) {
3236
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3237
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i),   {n_embd}, 0);
3238
3239
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3240
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i),   {n_embd_gqa}, 0);
3241
3242
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3243
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i),   {n_embd_gqa}, 0);
3244
0
                        }
3245
3246
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {n_embd, n_embd}, 0);
3247
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3248
3249
0
                        layer.attn_out_norm   = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
3250
0
                        layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i),   {n_embd}, 0);
3251
3252
0
                        if (hparams.moe_every_n_layers > 0 && i % hparams.moe_every_n_layers == 1) {
3253
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff,   n_expert}, 0);
3254
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff,   n_embd, n_expert}, 0);
3255
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,   "weight", i), {n_embd, n_expert}, 0);
3256
0
                        } else {
3257
0
                            layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
3258
0
                            layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, TENSOR_NOT_REQUIRED);
3259
0
                            layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3260
0
                            layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3261
3262
0
                            if (arch == LLM_ARCH_NOMIC_BERT) {
3263
0
                                layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
3264
0
                            }
3265
0
                        }
3266
3267
0
                        layer.layer_out_norm   = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
3268
0
                        layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i),   {n_embd}, 0);
3269
0
                    }
3270
0
                } break;
3271
0
            case LLM_ARCH_MODERN_BERT:
3272
0
                {
3273
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3274
0
                    tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
3275
3276
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3277
3278
0
                    for(int i = 0; i < n_layer; ++i) {
3279
0
                        auto& layer = layers[i];
3280
3281
0
                        if ( i != 0 ) {
3282
0
                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3283
0
                        } else{
3284
                            // layer 0 uses identity
3285
0
                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3286
0
                        }
3287
3288
3289
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, 3 * n_embd }, 0);
3290
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT,   "weight", i), {n_embd, n_embd}, 0);
3291
3292
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, 2 * n_ff}, 0);
3293
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3294
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3295
0
                    }
3296
3297
0
                    cls       = create_tensor(tn(LLM_TENSOR_CLS,     "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED);
3298
0
                    cls_out   = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
3299
0
                    cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"),   {hparams.n_cls_out},         TENSOR_NOT_REQUIRED);
3300
3301
0
                } break;
3302
0
            case LLM_ARCH_NEO_BERT:
3303
0
                {
3304
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, 0);
3305
3306
0
                    cls   = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED);
3307
0
                    cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"),   {n_embd},         TENSOR_NOT_REQUIRED);
3308
3309
0
                    cls_out   = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
3310
0
                    cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"),   {hparams.n_cls_out},         TENSOR_NOT_REQUIRED);
3311
3312
0
                    output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
3313
3314
0
                    for (int i = 0; i < n_layer; ++i) {
3315
0
                        auto & layer = layers[i];
3316
3317
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3318
3319
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3320
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3321
3322
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3323
3324
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff*2}, 0);
3325
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3326
0
                    }
3327
0
                } break;
3328
0
            case LLM_ARCH_JINA_BERT_V2:
3329
0
                {
3330
0
                    tok_embd  = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, 0); // word_embeddings
3331
0
                    type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0); // token_type_embeddings
3332
3333
0
                    tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); // LayerNorm
3334
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {n_embd}, 0); //LayerNorm bias
3335
3336
0
                    cls   = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED);
3337
0
                    cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"),   {1},         TENSOR_NOT_REQUIRED);
3338
0
                    for (int i = 0; i < n_layer; ++i) {
3339
0
                        auto & layer = layers[i]; // JinaBertLayer
3340
3341
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
3342
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i),   {n_embd}, 0);
3343
3344
0
                        layer.attn_q_norm   = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3345
0
                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3346
3347
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
3348
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias",   i), {n_embd_gqa}, 0);
3349
3350
0
                        layer.attn_k_norm   = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3351
0
                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3352
3353
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
3354
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias",   i), {n_embd_gqa}, 0);
3355
3356
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); //output_dens
3357
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), {n_embd}, 0); //output_dens
3358
3359
0
                        layer.attn_out_norm   = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0); //output_norm
3360
0
                        layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias",   i), {n_embd}, 0);
3361
3362
0
                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3363
0
                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3364
3365
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED);
3366
3367
0
                        const auto tn_ffn_up_weight = tn(LLM_TENSOR_FFN_UP, "weight", i);
3368
0
                        ggml_tensor * t_ffn_up = ml.get_tensor_meta(tn_ffn_up_weight.str().c_str());
3369
0
                        const int64_t n_ffn_up = t_ffn_up ? t_ffn_up->ne[1] : n_ff;
3370
3371
0
                        GGML_ASSERT(n_ffn_up == n_ff || n_ffn_up == n_ff * 2);
3372
0
                        layer.ffn_up   = create_tensor(tn_ffn_up_weight, {n_embd, n_ffn_up}, 0);
3373
0
                        layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ffn_up}, TENSOR_NOT_REQUIRED);
3374
3375
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3376
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias",   i), {n_embd}, 0);
3377
3378
0
                        layer.layer_out_norm   = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
3379
0
                        layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias",   i), {n_embd}, 0);
3380
0
                    }
3381
0
                } break;
3382
0
            case LLM_ARCH_BLOOM:
3383
0
                {
3384
0
                    tok_embd   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,      "weight"), {n_embd, n_vocab}, 0);
3385
0
                    tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
3386
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {n_embd}, 0);
3387
3388
                    // output
3389
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3390
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3391
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3392
3393
                    // if output is NULL, init from the input tok embed
3394
0
                    if (output == NULL) {
3395
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3396
0
                    }
3397
3398
0
                    for (int i = 0; i < n_layer; ++i) {
3399
0
                        auto & layer = layers[i];
3400
3401
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3402
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias",   i), {n_embd}, 0);
3403
3404
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3405
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias",   i), {n_embd + 2*n_embd_gqa}, 0);
3406
3407
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3408
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), {n_embd}, 0);
3409
3410
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3411
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias",   i), {n_embd}, 0);
3412
3413
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3414
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias",   i), {n_embd}, 0);
3415
3416
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
3417
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias",   i), {n_ff}, 0);
3418
0
                    }
3419
0
                } break;
3420
0
            case LLM_ARCH_MPT:
3421
0
                {
3422
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3423
0
                    pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,   "weight"), {n_embd, n_ctx_train}, TENSOR_NOT_REQUIRED);
3424
3425
                    // output
3426
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3427
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, TENSOR_NOT_REQUIRED);
3428
3429
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3430
0
                    if (!output) {
3431
0
                        output    = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU
3432
0
                    }
3433
3434
0
                    for (int i = 0; i < n_layer; ++i) {
3435
0
                        auto & layer = layers[i];
3436
3437
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3438
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3439
3440
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3441
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3442
3443
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3444
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3445
3446
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3447
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3448
3449
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3450
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3451
3452
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3453
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, TENSOR_NOT_REQUIRED);
3454
3455
0
                        layer.attn_q_norm   = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3456
0
                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3457
3458
0
                        layer.attn_k_norm   = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3459
0
                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
3460
3461
                        // AWQ ScaleActivation layer
3462
0
                        layer.ffn_act = create_tensor(tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, TENSOR_NOT_REQUIRED);
3463
0
                    }
3464
0
                } break;
3465
0
            case LLM_ARCH_STABLELM:
3466
0
                {
3467
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3468
3469
                    // output
3470
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3471
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3472
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3473
3474
0
                    for (int i = 0; i < n_layer; ++i) {
3475
0
                        auto & layer = layers[i];
3476
3477
0
                        layer.attn_norm =   create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3478
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
3479
3480
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3481
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3482
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3483
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3484
3485
                        // optional bias tensors, present in Stable LM 2 1.6B
3486
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
3487
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3488
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3489
3490
                        // optional q and k layernorms, present in StableLM 2 12B
3491
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head},    TENSOR_NOT_REQUIRED);
3492
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED);
3493
3494
                        // optional FFN norm, not present in StableLM 2 12B which uses parallel residual
3495
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
3496
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, TENSOR_NOT_REQUIRED);
3497
3498
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3499
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3500
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3501
0
                    }
3502
0
                } break;
3503
0
            case LLM_ARCH_QWEN:
3504
0
                {
3505
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3506
3507
                    // output
3508
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3509
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3510
3511
0
                    for (int i = 0; i < n_layer; ++i) {
3512
0
                        auto & layer = layers[i];
3513
3514
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3515
3516
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3}, 0);
3517
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd*3}, 0);
3518
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3519
3520
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3521
3522
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2}, 0);
3523
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd}, 0);
3524
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff/2}, 0);
3525
0
                    }
3526
0
                } break;
3527
0
            case LLM_ARCH_QWEN2:
3528
0
            case LLM_ARCH_QWEN2VL:
3529
0
            case LLM_ARCH_DREAM:
3530
0
                {
3531
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3532
3533
                    // output
3534
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3535
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3536
0
                    output_b    = create_tensor(tn(LLM_TENSOR_OUTPUT,      "bias"),   {n_vocab}, TENSOR_NOT_REQUIRED);
3537
                    // if output is NULL, init from the input tok embed
3538
0
                    if (output == NULL) {
3539
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3540
0
                    }
3541
3542
0
                    for (int i = 0; i < n_layer; ++i) {
3543
0
                        auto & layer = layers[i];
3544
3545
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3546
3547
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3548
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3549
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3550
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3551
3552
                        // optional bias tensors
3553
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
3554
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3555
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3556
3557
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3558
3559
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3560
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3561
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3562
0
                    }
3563
0
                } break;
3564
0
            case LLM_ARCH_QWEN2MOE:
3565
0
                {
3566
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3567
3568
                    // output
3569
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3570
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3571
3572
0
                    for (int i = 0; i < n_layer; ++i) {
3573
0
                        auto & layer = layers[i];
3574
3575
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3576
3577
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3578
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3579
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3580
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3581
3582
                        // optional bias tensors
3583
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
3584
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3585
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
3586
3587
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3588
3589
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
3590
3591
0
                        if (n_expert == 0) {
3592
0
                            throw std::runtime_error("n_expert must be > 0 for QWEN2MOE");
3593
0
                        }
3594
0
                        if (n_expert_used == 0) {
3595
0
                            throw std::runtime_error("n_expert_used must be > 0 for QWEN2MOE");
3596
0
                        }
3597
3598
                        // MoE branch
3599
0
                        const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
3600
3601
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
3602
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
3603
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
3604
3605
                        // Shared expert branch
3606
0
                        const int64_t n_ff_shexp = hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff;
3607
3608
0
                        layer.ffn_gate_inp_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), {n_embd}, 0);
3609
0
                        layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {    n_embd, n_ff_shexp}, 0);
3610
0
                        layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp,     n_embd}, 0);
3611
0
                        layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {    n_embd, n_ff_shexp}, 0);
3612
0
                    }
3613
0
                } break;
3614
0
            case LLM_ARCH_QWEN3:
3615
0
            case LLM_ARCH_QWEN3VL:
3616
0
                {
3617
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3618
3619
                    // output
3620
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3621
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3622
                    // if output is NULL, init from the input tok embed
3623
0
                    if (output == NULL) {
3624
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3625
0
                    }
3626
3627
                    // output rerank head
3628
0
                    cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED);
3629
3630
0
                    for (int i = 0; i < n_layer; ++i) {
3631
0
                        auto & layer = layers[i];
3632
3633
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3634
3635
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
3636
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3637
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3638
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
3639
3640
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
3641
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
3642
3643
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3644
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3645
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3646
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3647
0
                    }
3648
0
                } break;
3649
0
            case LLM_ARCH_QWEN3MOE:
3650
0
            case LLM_ARCH_QWEN3VLMOE:
3651
0
            case LLM_ARCH_RND1:
3652
0
                {
3653
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3654
3655
                    // output
3656
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3657
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3658
                    // if output is NULL, init from the input tok embed
3659
0
                    if (output == NULL) {
3660
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3661
0
                    }
3662
3663
0
                    for (int i = 0; i < n_layer; ++i) {
3664
0
                        auto & layer = layers[i];
3665
3666
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3667
3668
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
3669
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3670
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3671
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
3672
3673
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
3674
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
3675
3676
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3677
3678
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
3679
3680
0
                        if (n_expert == 0) {
3681
0
                            throw std::runtime_error("n_expert must be > 0 for QWEN3MOE");
3682
0
                        }
3683
0
                        if (n_expert_used == 0) {
3684
0
                            throw std::runtime_error("n_expert_used must be > 0 for QWEN3MOE");
3685
0
                        }
3686
3687
                        // MoE branch
3688
0
                        const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
3689
3690
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
3691
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
3692
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
3693
0
                    }
3694
0
                } break;
3695
0
            case LLM_ARCH_PHI2:
3696
0
                {
3697
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3698
3699
                    // output
3700
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3701
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3702
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3703
0
                    output_b      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "bias"),   {n_vocab}, 0);
3704
3705
0
                    for (int i = 0; i < n_layer; ++i) {
3706
0
                        auto & layer = layers[i];
3707
3708
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3709
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
3710
3711
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3712
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
3713
3714
0
                        if (layer.wqkv == nullptr) {
3715
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
3716
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i),   {n_embd}, 0);
3717
3718
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
3719
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i),   {n_embd_gqa}, 0);
3720
3721
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
3722
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i),   {n_embd_gqa}, 0);
3723
0
                        }
3724
3725
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3726
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
3727
3728
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3729
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
3730
3731
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
3732
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
3733
0
                    }
3734
0
                } break;
3735
0
            case LLM_ARCH_PHI3:
3736
0
                {
3737
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
3738
3739
                    // output
3740
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
3741
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3742
3743
                    // if output is NULL, init from the input tok embed
3744
0
                    if (output == NULL) {
3745
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3746
0
                    }
3747
3748
0
                    for (int i = 0; i < n_layer; ++i) {
3749
0
                        auto & layer = layers[i];
3750
3751
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
3752
3753
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, TENSOR_NOT_REQUIRED);
3754
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
3755
3756
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
3757
3758
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
3759
0
                        layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff }, 0);
3760
3761
0
                        layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3762
0
                        layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3763
0
                    }
3764
0
                } break;
3765
0
            case LLM_ARCH_PHIMOE:
3766
0
                {
3767
0
                    const int64_t n_embd_head = n_embd / n_head;
3768
3769
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
3770
3771
                    // output
3772
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
3773
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3774
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), { n_embd, n_vocab }, 0);
3775
0
                    output_b      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "bias"),   { n_vocab }, 0);
3776
3777
0
                    for (int i = 0; i < n_layer; ++i) {
3778
0
                        auto & layer = layers[i];
3779
3780
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
3781
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias",   i), { n_embd }, 0);
3782
3783
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, TENSOR_NOT_REQUIRED);
3784
0
                        if (layer.wqkv == nullptr) {
3785
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
3786
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias",   i), {n_embd}, 0);
3787
3788
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
3789
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias",   i), {n_embd_gqa}, 0);
3790
3791
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
3792
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias",   i), {n_embd_gqa}, 0);
3793
0
                        }
3794
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
3795
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), { n_embd }, 0);
3796
3797
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
3798
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias",   i), { n_embd }, 0);
3799
3800
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert},         0);
3801
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
3802
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
3803
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
3804
3805
0
                        layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3806
0
                        layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
3807
0
                     }
3808
0
                } break;
3809
0
            case LLM_ARCH_PLAMO:
3810
0
                {
3811
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3812
3813
                    // output
3814
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3815
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3816
3817
0
                    for (int i = 0; i < n_layer; ++i) {
3818
0
                        auto & layer = layers[i];
3819
3820
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3821
3822
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
3823
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
3824
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
3825
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3826
3827
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
3828
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
3829
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
3830
0
                    }
3831
0
                } break;
3832
0
            case LLM_ARCH_PLAMO2:
3833
0
                {
3834
                    // mamba parameters
3835
0
                    const uint32_t d_conv             = hparams.ssm_d_conv;
3836
0
                    const uint32_t d_state            = hparams.ssm_d_state;
3837
0
                    const uint32_t num_heads          = hparams.ssm_dt_rank;
3838
0
                    const uint32_t intermediate_size  = hparams.ssm_d_inner;
3839
0
                    const int64_t dt_dim              = std::max(64, int(hparams.n_embd / 16));
3840
3841
                    // attention parameters
3842
0
                    const uint32_t qk_dim = hparams.n_embd_head_k;
3843
0
                    const uint32_t v_dim  = hparams.n_embd_head_v;
3844
3845
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3846
3847
                    // output
3848
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3849
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3850
                    // if output is NULL, init from the input tok embed
3851
0
                    if (output == NULL) {
3852
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3853
0
                    }
3854
3855
0
                    for (int i = 0; i < n_layer; ++i) {
3856
0
                        auto & layer = layers[i];
3857
0
                        bool is_mamba_layer = hparams.is_recurrent(i);
3858
3859
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3860
3861
0
                        if (is_mamba_layer) {
3862
0
                            layer.ssm_in       = create_tensor(tn(LLM_TENSOR_SSM_IN,     "weight", i), {n_embd, 2 * intermediate_size}, 0);
3863
0
                            layer.ssm_conv1d   = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, intermediate_size}, 0);
3864
3865
0
                            layer.ssm_x    = create_tensor(tn(LLM_TENSOR_SSM_X,  "weight", i), {intermediate_size, dt_dim + 2*d_state}, 0);
3866
0
                            layer.ssm_dt   = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_dim, num_heads}, 0);
3867
0
                            layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {num_heads}, 0);
3868
3869
0
                            layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {num_heads}, 0);
3870
0
                            layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {num_heads}, 0);
3871
3872
0
                            layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {intermediate_size, n_embd}, 0);
3873
3874
0
                            layer.ssm_dt_norm = create_tensor(tn(LLM_TENSOR_SSM_DT_NORM, i), {dt_dim}, 0);
3875
0
                            layer.ssm_b_norm = create_tensor(tn(LLM_TENSOR_SSM_B_NORM, i), {d_state}, 0);
3876
0
                            layer.ssm_c_norm = create_tensor(tn(LLM_TENSOR_SSM_C_NORM, i), {d_state}, 0);
3877
0
                        } else {
3878
0
                            const int64_t num_attention_heads = hparams.n_head(i);
3879
0
                            const int64_t q_num_heads         = num_attention_heads;
3880
0
                            const int64_t num_key_value_heads = hparams.n_head_kv(i);
3881
0
                            const int64_t k_num_heads         = num_key_value_heads;
3882
0
                            const int64_t v_num_heads         = num_key_value_heads;
3883
0
                            const int64_t q_proj_dim          = q_num_heads * qk_dim;
3884
0
                            const int64_t k_proj_dim          = k_num_heads * qk_dim;
3885
0
                            const int64_t v_proj_dim          = v_num_heads * v_dim;
3886
3887
0
                            layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, q_proj_dim + k_proj_dim + v_proj_dim}, 0);
3888
0
                            layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {qk_dim, num_attention_heads}, 0);
3889
0
                            layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {qk_dim, k_num_heads}, 0);
3890
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {q_num_heads * v_dim, n_embd}, 0);
3891
0
                        }
3892
3893
                        // All layers have post-attention norm, FFN norm, and FFN tensors
3894
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, i), {n_embd}, 0);
3895
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3896
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3897
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff * 2}, 0);
3898
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, i), {n_embd}, 0);
3899
0
                    }
3900
0
                } break;
3901
0
            case LLM_ARCH_PLAMO3:
3902
0
                {
3903
0
                    const int64_t head_dim_q = hparams.n_embd_head_k;
3904
0
                    const int64_t head_dim_v = hparams.n_embd_head_v;
3905
3906
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3907
3908
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3909
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3910
0
                    if (output == NULL) {
3911
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3912
0
                    }
3913
3914
0
                    for (int i = 0; i < n_layer; ++i) {
3915
0
                        auto & layer = layers[i];
3916
3917
0
                        const int64_t num_attention_heads = hparams.n_head(i);
3918
0
                        const int64_t num_key_value_heads = hparams.n_head_kv(i);
3919
0
                        const int64_t q_proj_dim = num_attention_heads * head_dim_q;
3920
0
                        const int64_t k_proj_dim = num_key_value_heads * head_dim_q;
3921
0
                        const int64_t v_proj_dim = num_key_value_heads * head_dim_v;
3922
0
                        const int64_t n_ff_cur   = hparams.n_ff(i);
3923
3924
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3925
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i),
3926
0
                                {n_embd,q_proj_dim + k_proj_dim + v_proj_dim}, 0);
3927
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {head_dim_q}, 0);
3928
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {head_dim_q}, 0);
3929
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {num_attention_heads * head_dim_v, n_embd}, 0);
3930
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, i), {n_embd}, 0);
3931
3932
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3933
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, i), {n_embd}, 0);
3934
3935
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff_cur * 2}, 0);
3936
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff_cur, n_embd}, 0);
3937
0
                    }
3938
0
                } break;
3939
0
            case LLM_ARCH_GPT2:
3940
0
                {
3941
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
3942
0
                    pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,   "weight"), {n_embd, n_ctx_train}, 0);
3943
3944
                    // output
3945
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3946
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3947
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3948
3949
                    // if output is NULL, init from the input tok embed
3950
0
                    if (output == NULL) {
3951
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3952
0
                    }
3953
3954
0
                    for (int i = 0; i < n_layer; ++i) {
3955
0
                        auto & layer = layers[i];
3956
3957
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "weight", i), {n_embd}, 0);
3958
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "bias", i),   {n_embd}, 0);
3959
3960
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3961
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
3962
3963
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
3964
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
3965
3966
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
3967
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
3968
3969
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
3970
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
3971
3972
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
3973
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
3974
0
                    }
3975
0
                } break;
3976
0
            case LLM_ARCH_CODESHELL:
3977
0
                {
3978
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
3979
3980
                    // if tok embd is NULL, init from output
3981
0
                    if (tok_embd == NULL) {
3982
0
                        tok_embd = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
3983
0
                    }
3984
3985
                    // output
3986
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
3987
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
3988
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
3989
3990
0
                    for (int i = 0; i < n_layer; ++i) {
3991
0
                        auto & layer = layers[i];
3992
3993
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
3994
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
3995
3996
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
3997
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
3998
3999
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4000
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
4001
4002
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4003
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
4004
4005
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
4006
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
4007
4008
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i),   {n_embd, n_ff}, 0);
4009
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i),     {n_ff}, 0);
4010
0
                    }
4011
0
                } break;
4012
0
            case LLM_ARCH_ORION:
4013
0
                {
4014
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4015
4016
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4017
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
4018
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4019
4020
0
                    for (int i = 0; i < n_layer; ++i) {
4021
0
                        auto & layer = layers[i];
4022
4023
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4024
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
4025
4026
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4027
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4028
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4029
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4030
4031
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4032
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
4033
4034
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4035
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4036
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4037
0
                    }
4038
0
                } break;
4039
0
            case LLM_ARCH_INTERNLM2:
4040
0
                {
4041
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4042
4043
                    // output
4044
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4045
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4046
4047
0
                    for (int i = 0; i < n_layer; ++i) {
4048
0
                        auto & layer = layers[i];
4049
4050
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4051
                        // layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
4052
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4053
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4054
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4055
4056
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4057
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4058
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4059
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4060
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4061
0
                    }
4062
0
                } break;
4063
0
            case LLM_ARCH_GEMMA:
4064
0
                {
4065
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4066
4067
                    // output
4068
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4069
0
                    output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
4070
4071
0
                    for (int i = 0; i < n_layer; ++i) {
4072
0
                        auto & layer = layers[i];
4073
4074
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4075
4076
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4077
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
4078
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
4079
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
4080
4081
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4082
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4083
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4084
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4085
0
                    }
4086
0
                } break;
4087
0
            case LLM_ARCH_GEMMA2:
4088
0
                {
4089
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4090
4091
                    // output
4092
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4093
0
                    output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
4094
4095
0
                    for (int i = 0; i < n_layer; ++i) {
4096
0
                        auto & layer = layers[i];
4097
4098
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4099
4100
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4101
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
4102
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
4103
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
4104
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4105
4106
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4107
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4108
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4109
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4110
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
4111
0
                    }
4112
0
                } break;
4113
0
            case LLM_ARCH_GEMMA3:
4114
0
            case LLM_ARCH_GEMMA_EMBEDDING:
4115
0
                {
4116
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4117
4118
                    // output
4119
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4120
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4121
4122
                    // if output is NULL, init from the input tok embed
4123
0
                    if (output == NULL) {
4124
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,   "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4125
0
                    }
4126
4127
                    // Dense linear weights
4128
0
                    dense_2_out_layers = create_tensor(tn(LLM_TENSOR_DENSE_2_OUT, "weight"), {n_embd, hparams.dense_2_feat_out}, TENSOR_NOT_REQUIRED);
4129
0
                    dense_3_out_layers = create_tensor(tn(LLM_TENSOR_DENSE_3_OUT, "weight"), {hparams.dense_3_feat_in, n_embd}, TENSOR_NOT_REQUIRED);
4130
4131
4132
0
                    for (int i = 0; i < n_layer; ++i) {
4133
0
                        auto & layer = layers[i];
4134
4135
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4136
4137
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4138
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
4139
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
4140
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
4141
4142
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4143
0
                        layer.attn_k_norm    = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM,    "weight", i), {n_embd_head_k}, 0);
4144
0
                        layer.attn_q_norm    = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM,    "weight", i), {n_embd_head_k}, 0);
4145
4146
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4147
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4148
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4149
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4150
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
4151
0
                    }
4152
0
                } break;
4153
0
            case LLM_ARCH_GEMMA3N:
4154
0
                {
4155
0
                    const int64_t n_altup      = hparams.n_altup;
4156
0
                    const int64_t laurel_rank  = hparams.laurel_rank;
4157
0
                    const int64_t n_embd_altup = hparams.n_embd_altup;
4158
4159
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4160
                    // if output is NULL, init from the input tok embed
4161
0
                    if (output == NULL) {
4162
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4163
0
                    }
4164
4165
0
                    tok_embd           = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,           "weight"), {n_embd, n_vocab}, 0);
4166
0
                    tok_embd_per_layer = create_tensor(tn(LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "weight"), {n_embd_altup * n_layer, n_vocab}, 0);
4167
4168
0
                    altup_proj           = create_tensor(tn(LLM_TENSOR_ALTUP_PROJ,           "weight"), {n_embd, n_embd, n_altup - 1}, 0);
4169
0
                    altup_unembd_proj    = create_tensor(tn(LLM_TENSOR_ALTUP_UNEMBD_PROJ,    "weight"), {n_embd, n_embd, n_altup - 1}, 0);
4170
0
                    per_layer_model_proj = create_tensor(tn(LLM_TENSOR_PER_LAYER_MODEL_PROJ, "weight"), {n_embd, n_embd_altup * n_layer}, 0);
4171
0
                    per_layer_proj_norm  = create_tensor(tn(LLM_TENSOR_PER_LAYER_PROJ_NORM,  "weight"), {n_embd_altup}, 0);
4172
4173
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4174
4175
0
                    for (int i = 0; i < n_layer; ++i) {
4176
0
                        auto & layer = layers[i];
4177
4178
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4179
4180
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4181
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
4182
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
4183
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
4184
4185
0
                        layer.attn_q_norm    = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM,    "weight", i), {n_embd_head_k}, 0);
4186
0
                        layer.attn_k_norm    = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM,    "weight", i), {n_embd_head_k}, 0);
4187
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4188
4189
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4190
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4191
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4192
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4193
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
4194
4195
                        // altup & laurel
4196
0
                        layer.per_layer_inp_gate   = create_tensor(tn(LLM_TENSOR_PER_LAYER_INP_GATE,  "weight", i), {n_embd, n_embd_altup}, 0);
4197
0
                        layer.per_layer_proj       = create_tensor(tn(LLM_TENSOR_PER_LAYER_PROJ,      "weight", i), {n_embd_altup, n_embd}, 0);
4198
0
                        layer.per_layer_post_norm  = create_tensor(tn(LLM_TENSOR_PER_LAYER_POST_NORM, "weight", i), {n_embd}, 0);
4199
0
                        layer.altup_correct_coef   = create_tensor(tn(LLM_TENSOR_ALTUP_CORRECT_COEF,  "weight", i), {n_altup, n_altup}, 0);
4200
0
                        layer.altup_correct_scale  = create_tensor(tn(LLM_TENSOR_ALTUP_CORRECT_SCALE, "weight", i), {n_embd}, 0);
4201
0
                        layer.altup_predict_coef   = create_tensor(tn(LLM_TENSOR_ALTUP_PREDICT_COEF,  "weight", i), {n_altup, n_altup * n_altup}, 0);
4202
0
                        layer.altup_router         = create_tensor(tn(LLM_TENSOR_ALTUP_ROUTER,        "weight", i), {n_embd, n_altup}, 0);
4203
0
                        layer.altup_router_norm    = create_tensor(tn(LLM_TENSOR_ALTUP_ROUTER_NORM,   "weight", i), {n_embd}, 0);
4204
0
                        layer.laurel_l             = create_tensor(tn(LLM_TENSOR_LAUREL_L,            "weight", i), {n_embd, laurel_rank}, 0);
4205
0
                        layer.laurel_r             = create_tensor(tn(LLM_TENSOR_LAUREL_R,            "weight", i), {laurel_rank, n_embd}, 0);
4206
0
                        layer.laurel_post_norm     = create_tensor(tn(LLM_TENSOR_LAUREL_POST_NORM,    "weight", i), {n_embd}, 0);
4207
0
                    }
4208
0
                } break;
4209
0
            case LLM_ARCH_STARCODER2:
4210
0
                {
4211
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4212
4213
                    // output
4214
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4215
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
4216
4217
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4218
                    // if output is NULL, init from the input tok embed
4219
0
                    if (output == NULL) {
4220
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4221
0
                    }
4222
4223
0
                    for (int i = 0; i < n_layer; ++i) {
4224
0
                        auto & layer = layers[i];
4225
4226
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4227
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
4228
4229
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4230
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4231
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4232
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4233
4234
                        // optional bias tensors
4235
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, 0);
4236
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, 0);
4237
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, 0);
4238
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
4239
4240
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4241
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
4242
4243
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4244
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4245
4246
                        // optional bias tensors
4247
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
4248
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP ,  "bias", i), {  n_ff}, 0);
4249
0
                    }
4250
0
                } break;
4251
0
            case LLM_ARCH_MAMBA:
4252
0
                {
4253
0
                    const int64_t d_conv  = hparams.ssm_d_conv;
4254
0
                    const int64_t d_inner = hparams.ssm_d_inner;
4255
0
                    const int64_t d_state = hparams.ssm_d_state;
4256
0
                    const int64_t dt_rank = hparams.ssm_dt_rank;
4257
4258
                    // only an expansion factor of 2 is supported for now
4259
0
                    if (2 * n_embd != d_inner) {
4260
0
                        throw std::runtime_error("only an expansion factor of 2 is supported for now");
4261
0
                    }
4262
4263
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4264
4265
                    // output
4266
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4267
4268
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4269
                    // if output is NULL, init from the input tok embed, duplicated to allow offloading
4270
0
                    if (output == NULL) {
4271
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4272
0
                    }
4273
4274
0
                    for (int i = 0; i < n_layer; ++i) {
4275
0
                        auto & layer = layers[i];
4276
4277
                        // norm
4278
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4279
4280
0
                        layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner}, 0);
4281
4282
0
                        layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner}, 0);
4283
0
                        layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner}, 0);
4284
4285
0
                        layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state}, 0);
4286
4287
0
                        layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner}, 0);
4288
0
                        layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner}, 0);
4289
4290
                        // no "weight" suffix for these
4291
0
                        layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner}, 0);
4292
0
                        layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {d_inner}, 0);
4293
4294
                        // out_proj
4295
0
                        layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
4296
0
                    }
4297
0
                } break;
4298
0
            case LLM_ARCH_MAMBA2:
4299
0
                {
4300
0
                    const int64_t d_conv  = hparams.ssm_d_conv;
4301
0
                    const int64_t d_inner = hparams.ssm_d_inner;
4302
0
                    const int64_t d_state = hparams.ssm_d_state;
4303
0
                    const int64_t n_head  = hparams.ssm_dt_rank;
4304
0
                    const int64_t n_group = hparams.ssm_n_group;
4305
0
                    const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_head;
4306
4307
                    // only an expansion factor of 2 is supported for now
4308
0
                    GGML_ASSERT(2 * n_embd == d_inner);
4309
4310
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4311
4312
                    // output
4313
0
                    {
4314
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4315
4316
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4317
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
4318
0
                        if (output == NULL) {
4319
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4320
0
                        }
4321
0
                    }
4322
4323
0
                    for (int i = 0; i < n_layer; ++i) {
4324
0
                        auto & layer = layers[i];
4325
4326
                        // norm
4327
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4328
4329
0
                        layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, d_in_proj}, 0);
4330
4331
0
                        layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner + 2*n_group*d_state}, 0);
4332
0
                        layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner + 2*n_group*d_state}, 0);
4333
4334
0
                        layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {n_head}, 0);
4335
4336
                        // no "weight" suffix for these
4337
0
                        layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, n_head}, 0);
4338
0
                        layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, n_head}, 0);
4339
4340
0
                        layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {d_inner / n_group, n_group}, 0);
4341
4342
                        // out_proj
4343
0
                        layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
4344
0
                    }
4345
0
                } break;
4346
0
            case LLM_ARCH_JAMBA:
4347
0
                {
4348
0
                    const int64_t d_conv  = hparams.ssm_d_conv;
4349
0
                    const int64_t d_inner = hparams.ssm_d_inner;
4350
0
                    const int64_t d_state = hparams.ssm_d_state;
4351
0
                    const int64_t dt_rank = hparams.ssm_dt_rank;
4352
4353
                    // only an expansion factor of 2 is supported for now
4354
0
                    GGML_ASSERT(2 * n_embd == d_inner);
4355
4356
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4357
4358
                    // output
4359
0
                    {
4360
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4361
4362
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4363
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
4364
0
                        if (output == NULL) {
4365
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4366
0
                        }
4367
0
                    }
4368
4369
0
                    for (int i = 0; i < n_layer; ++i) {
4370
0
                        const int64_t n_head_kv = hparams.n_head_kv(i);
4371
0
                        const int64_t n_embd_gqa = hparams.n_embd_v_gqa(i);
4372
4373
0
                        auto & layer = layers[i];
4374
4375
                        // norm
4376
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4377
4378
0
                        if (n_head_kv == 0) {
4379
                            // Mamba layer
4380
0
                            layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner}, 0);
4381
4382
0
                            layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner}, 0);
4383
0
                            layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner}, 0);
4384
4385
0
                            layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state}, 0);
4386
4387
0
                            layer.ssm_dt_norm = create_tensor(tn(LLM_TENSOR_SSM_DT_NORM, "weight", i), {dt_rank}, 0);
4388
4389
0
                            layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner}, 0);
4390
0
                            layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner}, 0);
4391
4392
0
                            layer.ssm_b_norm = create_tensor(tn(LLM_TENSOR_SSM_B_NORM, "weight", i), {d_state}, 0);
4393
0
                            layer.ssm_c_norm = create_tensor(tn(LLM_TENSOR_SSM_C_NORM, "weight", i), {d_state}, 0);
4394
4395
                            // no "weight" suffix for these
4396
0
                            layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner}, 0);
4397
0
                            layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {d_inner}, 0);
4398
4399
                            // out_proj
4400
0
                            layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
4401
0
                        } else {
4402
                            // Attention layers
4403
4404
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4405
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4406
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4407
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4408
0
                        }
4409
4410
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4411
4412
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, TENSOR_NOT_REQUIRED);
4413
4414
0
                        if (layer.ffn_gate_inp) {
4415
                            // MoE
4416
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0);
4417
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0);
4418
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff, n_expert}, 0);
4419
0
                        } else {
4420
                            // FFN (no MoE)
4421
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
4422
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
4423
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
4424
0
                        }
4425
0
                    }
4426
0
                } break;
4427
0
            case LLM_ARCH_GRANITE_HYBRID:
4428
0
                {
4429
                    // mamba2 Mixer SSM params
4430
                    // NOTE: int64_t for tensor dimensions
4431
0
                    const int64_t d_conv     = hparams.ssm_d_conv;
4432
0
                    const int64_t d_inner    = hparams.ssm_d_inner;
4433
0
                    const int64_t d_state    = hparams.ssm_d_state;
4434
0
                    const int64_t n_ssm_head = hparams.ssm_dt_rank;
4435
0
                    const int64_t n_group    = hparams.ssm_n_group;
4436
0
                    const int64_t d_in_proj  = 2*d_inner + 2*n_group*d_state + n_ssm_head;
4437
4438
                    // only an expansion factor of 2 is supported for now
4439
0
                    GGML_ASSERT(2 * n_embd == d_inner);
4440
4441
                    // embeddings
4442
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4443
4444
                    // output
4445
0
                    {
4446
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4447
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4448
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
4449
0
                        if (output == NULL) {
4450
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4451
0
                        }
4452
0
                    }
4453
4454
0
                    for (int i = 0; i < n_layer; ++i) {
4455
0
                        auto & layer = layers[i];
4456
4457
                        // norm
4458
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4459
4460
0
                        if (hparams.is_recurrent(i)) {
4461
                            // ssm layers
4462
0
                            layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, d_in_proj}, 0);
4463
4464
0
                            layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner + 2*n_group*d_state}, 0);
4465
0
                            layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner + 2*n_group*d_state}, TENSOR_NOT_REQUIRED);
4466
4467
0
                            layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {n_ssm_head}, 0);
4468
4469
                            // no "weight" suffix for these
4470
0
                            layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, n_ssm_head}, 0);
4471
0
                            layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, n_ssm_head}, 0);
4472
4473
0
                            layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {d_inner / n_group, n_group}, 0);
4474
4475
                            // out_proj
4476
0
                            layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
4477
0
                        } else {
4478
                            // attention layers (with optional bias)
4479
0
                            const int64_t n_head_i = hparams.n_head(i);
4480
0
                            const int64_t n_embd_k_gqa_i = hparams.n_embd_k_gqa(i);
4481
0
                            const int64_t n_embd_v_gqa_i = hparams.n_embd_v_gqa(i);
4482
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head_i}, 0);
4483
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa_i}, 0);
4484
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa_i}, 0);
4485
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head_i, n_embd}, 0);
4486
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},         TENSOR_NOT_REQUIRED);
4487
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_k_gqa_i}, TENSOR_NOT_REQUIRED);
4488
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_v_gqa_i}, TENSOR_NOT_REQUIRED);
4489
0
                            layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},         TENSOR_NOT_REQUIRED);
4490
0
                        }
4491
4492
                        // feed forward (w/ optional biases)
4493
0
                        if (n_expert > 0) {
4494
                            // MoE FFN
4495
0
                            layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4496
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
4497
0
                            layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
4498
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, TENSOR_NOT_REQUIRED);
4499
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
4500
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
4501
4502
                            // For Granite MoE Shared
4503
0
                            if (hparams.n_ff_shexp > 0) {
4504
0
                                layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
4505
0
                                layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
4506
0
                                layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0);
4507
0
                            }
4508
0
                        } else {
4509
0
                            layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4510
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
4511
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4512
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4513
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4514
0
                            layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
4515
0
                            layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
4516
0
                            layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
4517
0
                        }
4518
0
                    }
4519
0
                } break;
4520
0
            case LLM_ARCH_XVERSE:
4521
0
                {
4522
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4523
4524
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4525
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4526
4527
0
                    for (int i = 0; i < n_layer; ++i) {
4528
0
                        auto & layer = layers[i];
4529
4530
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4531
4532
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4533
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4534
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4535
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4536
4537
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4538
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4539
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4540
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4541
0
                    }
4542
0
                } break;
4543
0
            case LLM_ARCH_COMMAND_R:
4544
0
                {
4545
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4546
4547
                    // output
4548
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4549
                    // init output from the input tok embed
4550
0
                    output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4551
4552
0
                    for (int i = 0; i < n_layer; ++i) {
4553
0
                        auto & layer = layers[i];
4554
4555
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4556
4557
0
                        if (n_layer >= 64){
4558
0
                            layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
4559
0
                            layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
4560
0
                        }
4561
4562
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4563
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4564
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4565
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4566
4567
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4568
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4569
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4570
0
                    }
4571
0
                } break;
4572
0
            case LLM_ARCH_COHERE2:
4573
0
                {
4574
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
4575
4576
                    // output
4577
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
4578
                    // init output from the input tok embed
4579
0
                    output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab },
4580
0
                                                      TENSOR_DUPLICATED);
4581
4582
0
                    for (int i = 0; i < n_layer; ++i) {
4583
0
                        auto & layer = layers[i];
4584
4585
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
4586
4587
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd }, 0);
4588
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
4589
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
4590
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
4591
4592
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0);
4593
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
4594
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
4595
0
                    }
4596
0
                }
4597
0
                break;
4598
0
            case LLM_ARCH_OLMO:  // adapted from LLM_ARCH_LLAMA with norm params removed
4599
0
                {
4600
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4601
4602
                    // output
4603
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4604
                    // if output is NULL, init from the input tok embed
4605
0
                    if (output == NULL) {
4606
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4607
0
                    }
4608
4609
0
                    for (int i = 0; i < n_layer; ++i) {
4610
0
                        auto & layer = layers[i];
4611
4612
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4613
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4614
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4615
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4616
4617
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4618
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4619
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4620
0
                    }
4621
0
                } break;
4622
0
            case LLM_ARCH_OLMO2:
4623
0
                {
4624
0
                    const int64_t n_embd_head = n_embd / n_head;
4625
4626
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4627
4628
                    // output
4629
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4630
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4631
4632
0
                    for (int i = 0; i < n_layer; ++i) {
4633
0
                        auto & layer = layers[i];
4634
4635
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4636
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4637
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4638
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4639
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
4640
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_head_kv * n_embd_head}, 0);
4641
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4642
4643
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4644
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4645
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4646
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
4647
0
                    }
4648
0
                } break;
4649
0
            case LLM_ARCH_SEED_OSS:
4650
0
                {
4651
0
                    const uint32_t head_dim             = hparams.n_embd_head_k;
4652
0
                    const int64_t n_qo_dim              = n_head * head_dim;
4653
0
                    const int64_t n_kv_dim              = n_head_kv * head_dim;
4654
4655
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4656
4657
                    // output
4658
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4659
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4660
                    // if output is NULL, init from the input tok embed
4661
0
                    if (output == NULL) {
4662
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4663
0
                    }
4664
4665
0
                    for (int i = 0; i < n_layer; ++i) {
4666
0
                        auto & layer = layers[i];
4667
4668
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_qo_dim}, 0);
4669
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_kv_dim}, 0);
4670
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_kv_dim}, 0);
4671
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_qo_dim, n_embd}, 0);
4672
4673
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_qo_dim},   TENSOR_NOT_REQUIRED);
4674
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_kv_dim},   TENSOR_NOT_REQUIRED);
4675
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_kv_dim},   TENSOR_NOT_REQUIRED);
4676
4677
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4678
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
4679
4680
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4681
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4682
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4683
0
                    }
4684
0
                } break;
4685
4686
0
            case LLM_ARCH_OLMOE:
4687
0
                {
4688
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4689
4690
                    // output
4691
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4692
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4693
4694
0
                    for (int i = 0; i < n_layer; ++i) {
4695
0
                        auto & layer = layers[i];
4696
4697
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4698
4699
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4700
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4701
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4702
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4703
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
4704
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
4705
4706
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4707
4708
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
4709
4710
0
                        if (n_expert == 0) {
4711
0
                            throw std::runtime_error("n_expert must be > 0");
4712
0
                        }
4713
0
                        if (n_expert_used == 0) {
4714
0
                            throw std::runtime_error("n_expert_used must be > 0");
4715
0
                        }
4716
4717
                        // MoE branch
4718
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
4719
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
4720
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
4721
0
                    }
4722
0
                } break;
4723
0
            case LLM_ARCH_OPENELM:
4724
0
                {
4725
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4726
4727
                    // output
4728
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4729
                    // init output from the input tok embed
4730
0
                    output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4731
4732
0
                    for (int i = 0; i < n_layer; ++i) {
4733
0
                        const int64_t n_head      =   hparams.n_head(i);
4734
0
                        const int64_t n_head_qkv  = 2*hparams.n_head_kv(i) + n_head;
4735
0
                        const int64_t n_ff        =   hparams.n_ff(i);
4736
4737
0
                        auto & layer = layers[i];
4738
4739
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4740
4741
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_head_qkv*n_embd_head_k}, 0);
4742
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
4743
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
4744
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head*n_embd_head_k, n_embd}, 0);
4745
4746
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4747
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
4748
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
4749
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
4750
0
                    }
4751
0
                } break;
4752
0
            case LLM_ARCH_GPTNEOX:
4753
0
                {
4754
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4755
4756
                    // output
4757
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4758
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
4759
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4760
4761
0
                    for (int i = 0; i < n_layer; ++i) {
4762
0
                        auto & layer = layers[i];
4763
4764
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4765
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
4766
4767
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
4768
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
4769
4770
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4771
0
                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
4772
4773
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4774
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
4775
4776
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
4777
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
4778
4779
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
4780
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
4781
0
                    }
4782
0
                } break;
4783
0
            case LLM_ARCH_ARCTIC:
4784
0
                {
4785
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4786
4787
                    // output
4788
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4789
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4790
4791
                    // if output is NULL, init from the input tok embed
4792
0
                    if (output == NULL) {
4793
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4794
0
                    }
4795
4796
0
                    for (int i = 0; i < n_layer; ++i) {
4797
0
                        auto & layer = layers[i];
4798
4799
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4800
4801
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4802
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4803
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4804
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4805
4806
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4807
4808
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_embd}, 0);
4809
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_embd, n_embd}, 0);
4810
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_embd}, 0);
4811
4812
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
4813
0
                        layer.ffn_norm_exps = create_tensor(tn(LLM_TENSOR_FFN_NORM_EXPS, "weight", i), {n_embd}, 0);
4814
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, false);
4815
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
4816
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
4817
0
                    }
4818
0
                } break;
4819
0
            case LLM_ARCH_DEEPSEEK:
4820
0
                {
4821
4822
0
                    const int64_t n_ff_exp        = hparams.n_ff_exp;
4823
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
4824
4825
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4826
4827
                    // output
4828
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4829
                    // try to load output.weight, if not found, use token_embd (tied embeddings)
4830
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4831
0
                    if (!output) {
4832
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4833
0
                    }
4834
4835
0
                    for (int i = 0; i < n_layer; ++i) {
4836
0
                        auto & layer = layers[i];
4837
4838
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4839
4840
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
4841
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
4842
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
4843
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
4844
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4845
4846
0
                        if (i < (int) hparams.n_layer_dense_lead) {
4847
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4848
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4849
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4850
0
                        } else {
4851
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
4852
4853
0
                            if (n_expert == 0) {
4854
0
                                throw std::runtime_error("n_expert must be > 0");
4855
0
                            }
4856
0
                            if (n_expert_used == 0) {
4857
0
                                throw std::runtime_error("n_expert_used must be > 0");
4858
0
                            }
4859
4860
                            // MoE branch
4861
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
4862
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
4863
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
4864
4865
                            // Shared expert branch
4866
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
4867
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
4868
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
4869
0
                        }
4870
0
                    }
4871
0
                } break;
4872
0
            case LLM_ARCH_DEEPSEEK2:
4873
0
                {
4874
                    // lite variants include DeepSeek-V2-Lite, GigaChat3-10B-A1.8B
4875
0
                    const bool is_lite = (hparams.n_layer == 27 || hparams.n_layer == 26);
4876
4877
0
                    const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0);
4878
4879
                    // note: these are the actual head sizes you get when treating as MHA or after "decompression" using wv_b for MLA
4880
0
                    const int64_t n_embd_head_k_mla = is_mla ? hparams.n_embd_head_k_mla : hparams.n_embd_head_k;
4881
0
                    const int64_t n_embd_head_v_mla = is_mla ? hparams.n_embd_head_v_mla : hparams.n_embd_head_v;
4882
4883
0
                    const int64_t n_embd_head_qk_rope = hparams.n_rot;
4884
0
                    const int64_t n_embd_head_qk_nope = n_embd_head_k_mla - n_embd_head_qk_rope;
4885
4886
0
                    const int64_t q_lora_rank  = hparams.n_lora_q;
4887
0
                    const int64_t kv_lora_rank = hparams.n_lora_kv;
4888
4889
0
                    const int64_t n_ff_exp        = hparams.n_ff_exp;
4890
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
4891
4892
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4893
4894
                    // output
4895
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4896
                    // try to load output.weight, if not found, use token_embd (tied embeddings)
4897
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
4898
0
                    if (!output) {
4899
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4900
0
                    }
4901
4902
0
                    for (int i = 0; i < n_layer; ++i) {
4903
0
                        auto & layer = layers[i];
4904
4905
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4906
0
                        if (!is_lite) {
4907
0
                            layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
4908
0
                        }
4909
4910
0
                        layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
4911
4912
0
                        if (!is_lite) {
4913
0
                            layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
4914
0
                            layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k_mla}, 0);
4915
0
                        } else {
4916
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_head * n_embd_head_k_mla}, 0);
4917
0
                        }
4918
4919
0
                        layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + n_embd_head_qk_rope}, 0);
4920
4921
                        // note: only old legacy GGUF files will have the unsplit wkv_b tensor in
4922
0
                        if (is_mla) {
4923
0
                            layer.wk_b = create_tensor(tn(LLM_TENSOR_ATTN_K_B, "weight", i), {n_embd_head_qk_nope, kv_lora_rank, n_head}, 0);
4924
0
                            layer.wv_b = create_tensor(tn(LLM_TENSOR_ATTN_V_B, "weight", i), {kv_lora_rank, n_embd_head_v_mla, n_head}, 0);
4925
0
                        } else {
4926
0
                            layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v_mla)}, 0);
4927
0
                        }
4928
4929
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_embd_head_v_mla, n_embd}, 0);
4930
4931
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4932
4933
0
                        if (i < (int) hparams.n_layer_dense_lead) {
4934
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
4935
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4936
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4937
0
                        } else {
4938
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
4939
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
4940
4941
0
                            if (n_expert == 0) {
4942
0
                                throw std::runtime_error("n_expert must be > 0");
4943
0
                            }
4944
0
                            if (n_expert_used == 0) {
4945
0
                                throw std::runtime_error("n_expert_used must be > 0");
4946
0
                            }
4947
4948
                            // MoE branch
4949
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
4950
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
4951
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
4952
4953
                            // Shared expert branch
4954
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
4955
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
4956
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
4957
0
                        }
4958
0
                    }
4959
0
                } break;
4960
0
            case LLM_ARCH_PLM:
4961
0
                {
4962
0
                    const int64_t n_embd_head_qk_rope = hparams.n_rot;
4963
0
                    const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
4964
0
                    const int64_t kv_lora_rank = hparams.n_lora_kv;
4965
4966
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4967
4968
                    // output
4969
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4970
                    // output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
4971
0
                    output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
4972
4973
0
                    for (int i = 0; i < n_layer; ++i) {
4974
0
                        auto & layer = layers[i];
4975
4976
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
4977
4978
0
                        layer.wq        = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
4979
0
                        layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
4980
0
                        layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
4981
0
                        layer.wkv_b     = create_tensor(tn(LLM_TENSOR_ATTN_KV_B,     "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
4982
0
                        layer.wo        = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {              n_head * (                      n_embd_head_v), n_embd}, 0);
4983
4984
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
4985
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
4986
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
4987
0
                    }
4988
0
                } break;
4989
0
            case LLM_ARCH_BITNET:
4990
0
                {
4991
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
4992
4993
                    // output
4994
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
4995
4996
0
                    for (int i = 0; i < n_layer; ++i) {
4997
0
                        auto & layer = layers[i];
4998
4999
0
                        layer.attn_norm     = create_tensor(tn(LLM_TENSOR_ATTN_NORM,     "weight", i), {n_embd}, 0);
5000
0
                        layer.attn_sub_norm = create_tensor(tn(LLM_TENSOR_ATTN_SUB_NORM, "weight", i), {n_embd}, 0);
5001
5002
0
                        layer.wq       = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
5003
0
                        layer.wq_scale = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5004
0
                        layer.wk       = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
5005
0
                        layer.wk_scale = create_tensor(tn(LLM_TENSOR_ATTN_K,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5006
0
                        layer.wv       = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
5007
0
                        layer.wv_scale = create_tensor(tn(LLM_TENSOR_ATTN_V,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5008
0
                        layer.wo       = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5009
0
                        layer.wo_scale = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5010
5011
0
                        layer.ffn_norm     = create_tensor(tn(LLM_TENSOR_FFN_NORM,     "weight", i), {n_embd}, 0);
5012
0
                        layer.ffn_sub_norm = create_tensor(tn(LLM_TENSOR_FFN_SUB_NORM, "weight", i), {n_ff}, 0);
5013
5014
0
                        layer.ffn_gate       = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
5015
0
                        layer.ffn_gate_scale = create_tensor(tn(LLM_TENSOR_FFN_GATE, "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5016
0
                        layer.ffn_down       = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
5017
0
                        layer.ffn_down_scale = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5018
0
                        layer.ffn_up         = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
5019
0
                        layer.ffn_up_scale   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
5020
0
                    }
5021
0
                } break;
5022
0
            case LLM_ARCH_T5:
5023
0
                {
5024
0
                    const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
5025
5026
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5027
5028
                    // output
5029
0
                    output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
5030
0
                    output_norm     = create_tensor(tn(LLM_TENSOR_DEC_OUTPUT_NORM, "weight"), {n_embd}, 0);
5031
5032
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5033
                    // if output is NULL, init from the input tok embed
5034
0
                    if (output == NULL) {
5035
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5036
0
                    }
5037
5038
                    // n_layer:     number of encoder_layers
5039
                    // dec_n_layer: number of decoder_layers
5040
0
                    const int dec_n_layer = hparams.dec_n_layer;
5041
0
                    if (dec_n_layer > n_layer) {
5042
0
                        layers.resize(dec_n_layer);
5043
0
                    }
5044
5045
                    // load encoder layers
5046
0
                    for (int i = 0; i < n_layer; ++i) {
5047
0
                        auto & layer = layers[i];
5048
5049
0
                        layer.attn_norm_enc  = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM,  "weight", i), {n_embd}, 0);
5050
0
                        layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
5051
5052
0
                        layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5053
0
                        layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5054
0
                        layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5055
0
                        layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
5056
5057
0
                        layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
5058
0
                        layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
5059
0
                        layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5060
0
                        layer.ffn_up_enc   = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5061
0
                    }
5062
5063
                    // load decoder layers
5064
0
                    for (int i = 0; i < dec_n_layer; ++i) {
5065
0
                        auto & layer = layers[i];
5066
5067
0
                        layer.attn_norm  = create_tensor(tn(LLM_TENSOR_DEC_ATTN_NORM,  "weight", i), {n_embd}, 0);
5068
0
                        layer.attn_rel_b = create_tensor(tn(LLM_TENSOR_DEC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
5069
5070
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_DEC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5071
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_DEC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5072
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_DEC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5073
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_DEC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
5074
5075
0
                        layer.attn_norm_cross  = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "weight", i), {n_embd}, 0);
5076
                        // this tensor seems to be unused in HF transformers implementation
5077
0
                        layer.attn_rel_b_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
5078
5079
0
                        layer.wq_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5080
0
                        layer.wk_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5081
0
                        layer.wv_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5082
0
                        layer.wo_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
5083
5084
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_DEC_FFN_NORM, "weight", i), {n_embd}, 0);
5085
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_DEC_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
5086
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_DEC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5087
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_DEC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5088
0
                    }
5089
0
                } break;
5090
0
            case LLM_ARCH_T5ENCODER:
5091
0
                {
5092
0
                    const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
5093
5094
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5095
5096
                    // output
5097
0
                    output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
5098
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5099
                    // if output is NULL, init from the input tok embed
5100
0
                    if (output == NULL) {
5101
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5102
0
                    }
5103
5104
0
                    for (int i = 0; i < n_layer; ++i) {
5105
0
                        auto & layer = layers[i];
5106
5107
0
                        layer.attn_norm_enc  = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM,  "weight", i), {n_embd}, 0);
5108
0
                        layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
5109
5110
0
                        layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5111
0
                        layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5112
0
                        layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5113
0
                        layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
5114
5115
0
                        layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
5116
0
                        layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
5117
0
                        layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5118
0
                        layer.ffn_up_enc   = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5119
0
                    }
5120
0
                } break;
5121
0
            case LLM_ARCH_JAIS:
5122
0
                {
5123
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5124
5125
                    // output
5126
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5127
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
5128
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
5129
5130
0
                    for (int i = 0; i < n_layer; ++i) {
5131
0
                        auto & layer = layers[i];
5132
5133
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "weight", i), {n_embd}, 0);
5134
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "bias", i),   {n_embd}, 0);
5135
5136
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
5137
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
5138
5139
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5140
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
5141
5142
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5143
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
5144
5145
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
5146
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
5147
5148
0
                        layer.ffn_gate   = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "weight", i), {n_embd, n_ff}, 0);
5149
0
                        layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "bias", i),   {n_ff}, 0);
5150
5151
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
5152
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
5153
0
                    }
5154
0
                } break;
5155
0
            case LLM_ARCH_CHATGLM:
5156
0
                {
5157
0
                    tok_embd   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,      "weight"), {n_embd, n_vocab}, 0);
5158
5159
                    // output
5160
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5161
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5162
                    // if output is NULL, init from the input tok embed
5163
0
                    if (output == NULL) {
5164
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5165
0
                    }
5166
5167
0
                    for (int i = 0; i < n_layer; ++i) {
5168
0
                        auto & layer = layers[i];
5169
5170
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5171
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
5172
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
5173
5174
0
                        if (layer.wqkv == nullptr) {
5175
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
5176
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5177
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5178
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
5179
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5180
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5181
0
                        }
5182
5183
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5184
5185
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5186
5187
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff * 2}, 0);
5188
5189
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
5190
0
                    }
5191
0
                } break;
5192
0
            case LLM_ARCH_GLM4:
5193
0
                {
5194
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5195
5196
                    // output
5197
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5198
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5199
                    // if output is NULL, init from the input tok embed
5200
0
                    if (output == NULL) {
5201
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5202
0
                    }
5203
5204
0
                    for (int i = 0; i < n_layer; ++i) {
5205
0
                        auto & layer = layers[i];
5206
5207
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5208
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
5209
0
                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED);
5210
5211
0
                        if (layer.wqkv == nullptr) {
5212
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
5213
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5214
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5215
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
5216
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5217
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5218
0
                        }
5219
5220
0
                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5221
5222
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
5223
5224
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5225
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5226
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff * 2}, 0);
5227
5228
0
                        layer.ffn_post_norm  = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
5229
0
                    }
5230
0
                } break;
5231
0
            case LLM_ARCH_GLM4_MOE:
5232
0
                {
5233
0
                    const int64_t n_expert        = hparams.n_expert;
5234
0
                    const int64_t n_expert_used   = hparams.n_expert_used;
5235
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
5236
5237
0
                    GGML_ASSERT(hparams.n_expert > 0 && "n_expert must be > 0 for GLM4_MOE MoE layers");
5238
0
                    GGML_ASSERT(hparams.n_expert_used > 0 && "n_expert_used must be > 0 for GLM4_MOE MoE layers");
5239
5240
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
5241
5242
                    // output
5243
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
5244
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED);
5245
                    // if output is NULL, init from the input tok embed
5246
0
                    if (output == NULL) {
5247
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, TENSOR_DUPLICATED);
5248
0
                    }
5249
5250
                    // Load ALL tensors including NextN layer to satisfy total tensor count
5251
                    // but only PROCESS up to last layer (skipping final NextN layer) in forward pass
5252
0
                    for (int i = 0; i < n_layer; ++i) {
5253
0
                        int flags = 0;
5254
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
5255
                            // skip all tensors in the NextN layers
5256
0
                            flags |= TENSOR_SKIP;
5257
0
                        }
5258
5259
0
                        auto & layer = layers[i];
5260
5261
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, flags);
5262
5263
                        // GLM-style attention with bias terms
5264
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, flags);
5265
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_k_gqa }, flags);
5266
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_v_gqa }, flags);
5267
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), { n_embd_head_k * n_head }, TENSOR_NOT_REQUIRED | flags);
5268
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), { n_embd_k_gqa }, TENSOR_NOT_REQUIRED | flags);
5269
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), { n_embd_v_gqa }, TENSOR_NOT_REQUIRED | flags);
5270
5271
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, flags);
5272
5273
                        // K/Q norm tensors (optional for GLM-4.5 355B variant)
5274
0
                        layer.attn_q_norm = create_tensor(
5275
0
                            tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), { n_embd_head_k }, TENSOR_NOT_REQUIRED | flags);
5276
0
                        layer.attn_k_norm = create_tensor(
5277
0
                            tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), { n_embd_head_k }, TENSOR_NOT_REQUIRED | flags);
5278
5279
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), { n_embd }, flags);
5280
5281
                        // Check if this layer uses MoE or dense FFN based on n_layer_dense_lead
5282
                        // GLM 4.5 uses hybrid architecture: layer 0 is dense, layers 1+ are MoE
5283
0
                        const bool use_moe = (static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead);
5284
5285
0
                        if (use_moe) {
5286
                            // MoE layers
5287
0
                            layer.ffn_gate_inp =
5288
0
                                create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert }, flags);
5289
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), { n_expert }, flags);
5290
5291
                            // MoE branch
5292
0
                            const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
5293
5294
0
                            layer.ffn_gate_exps = create_tensor(
5295
0
                                tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, flags);
5296
0
                            layer.ffn_down_exps = create_tensor(
5297
0
                                tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, flags);
5298
0
                            layer.ffn_up_exps = create_tensor(
5299
0
                                tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, flags);
5300
5301
                            // Shared expert
5302
0
                            if (n_expert_shared > 0) {
5303
0
                                const int64_t n_ff_shexp = n_ff_exp * n_expert_shared;
5304
0
                                layer.ffn_gate_shexp = create_tensor(
5305
0
                                    tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, n_ff_shexp }, flags);
5306
0
                                layer.ffn_down_shexp = create_tensor(
5307
0
                                    tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_shexp, n_embd }, flags);
5308
0
                                layer.ffn_up_shexp = create_tensor(
5309
0
                                    tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, n_ff_shexp }, flags);
5310
0
                            }
5311
0
                        } else {
5312
                            // Dense layers (first k layers) - GLM uses separate gate/up projections
5313
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, flags);
5314
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, flags);
5315
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), { n_embd, n_ff }, flags);
5316
0
                        }
5317
5318
                        // NextN/MTP tensors (preserved but unused) - conditionally load for last nextn_predict_layers
5319
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
5320
0
                            layer.nextn.eh_proj          = create_tensor(tn(LLM_TENSOR_NEXTN_EH_PROJ, "weight", i), { 2 * n_embd, n_embd }, flags);
5321
0
                            layer.nextn.enorm            = create_tensor(tn(LLM_TENSOR_NEXTN_ENORM, "weight", i), { n_embd }, flags);
5322
0
                            layer.nextn.hnorm            = create_tensor(tn(LLM_TENSOR_NEXTN_HNORM, "weight", i), { n_embd }, flags);
5323
5324
                            // Optional tensors
5325
0
                            layer.nextn.embed_tokens     = create_tensor(tn(LLM_TENSOR_NEXTN_EMBED_TOKENS, "weight", i), { n_embd, n_vocab }, flags | TENSOR_NOT_REQUIRED);
5326
0
                            layer.nextn.shared_head_head = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "weight", i), { n_embd, n_vocab }, flags | TENSOR_NOT_REQUIRED);
5327
0
                            layer.nextn.shared_head_norm = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "weight", i), { n_embd }, flags | TENSOR_NOT_REQUIRED);
5328
0
                        }
5329
0
                    }
5330
0
                }
5331
0
                break;
5332
0
            case LLM_ARCH_NEMOTRON:
5333
0
                {
5334
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5335
5336
                    // output
5337
0
                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5338
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
5339
0
                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5340
5341
0
                    for (int i = 0; i < n_layer; ++i) {
5342
0
                        auto & layer = layers[i];
5343
5344
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5345
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
5346
5347
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
5348
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
5349
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
5350
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5351
5352
                        // optional bias tensors
5353
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
5354
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5355
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
5356
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
5357
5358
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5359
0
                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
5360
5361
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5362
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5363
5364
                        // optional MLP bias
5365
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
5366
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
5367
0
                    }
5368
0
                } break;
5369
0
            case LLM_ARCH_NEMOTRON_H:
5370
0
            case LLM_ARCH_NEMOTRON_H_MOE:
5371
0
                {
5372
                    // mamba2 Mixer SSM params
5373
                    // NOTE: int64_t for tensor dimensions
5374
0
                    const int64_t d_conv     = hparams.ssm_d_conv;
5375
0
                    const int64_t d_inner    = hparams.ssm_d_inner;
5376
0
                    const int64_t d_state    = hparams.ssm_d_state;
5377
0
                    const int64_t n_ssm_head = hparams.ssm_dt_rank;
5378
0
                    const int64_t n_group    = hparams.ssm_n_group;
5379
0
                    const int64_t d_in_proj  = 2*d_inner + 2*n_group*d_state + n_ssm_head;
5380
5381
                    // embeddings
5382
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5383
5384
                    // output
5385
0
                    {
5386
0
                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5387
0
                        output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5388
                        // if output is NULL, init from the input tok embed, duplicated to allow offloading
5389
0
                        if (output == NULL) {
5390
0
                            output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5391
0
                        }
5392
0
                    }
5393
5394
0
                    for (int i = 0; i < n_layer; ++i) {
5395
0
                        auto & layer = layers[i];
5396
5397
                        // all blocks use the attn norm
5398
0
                        layer.attn_norm  = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5399
5400
0
                        if (hparams.is_recurrent(i)) {
5401
                            // ssm layers
5402
0
                            layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, d_in_proj}, 0);
5403
5404
0
                            layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner + 2*n_group*d_state}, 0);
5405
0
                            layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner + 2*n_group*d_state}, TENSOR_NOT_REQUIRED);
5406
5407
0
                            layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {n_ssm_head}, 0);
5408
5409
                            // no "weight" suffix for these
5410
0
                            layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, n_ssm_head}, 0);
5411
0
                            layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, n_ssm_head}, 0);
5412
5413
0
                            layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {d_inner / n_group, n_group}, 0);
5414
5415
                            // out_proj
5416
0
                            layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
5417
0
                        } else if (hparams.n_ff(i) == 0) {
5418
                            // attention layers (with optional bias)
5419
0
                            const int64_t n_head_i = hparams.n_head(i);
5420
0
                            const int64_t n_embd_k_gqa_i = hparams.n_embd_k_gqa(i);
5421
0
                            const int64_t n_embd_v_gqa_i = hparams.n_embd_v_gqa(i);
5422
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head_i}, 0);
5423
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa_i}, 0);
5424
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa_i}, 0);
5425
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head_i, n_embd}, 0);
5426
0
                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias",   i), {n_embd},         TENSOR_NOT_REQUIRED);
5427
0
                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias",   i), {n_embd_k_gqa_i}, TENSOR_NOT_REQUIRED);
5428
0
                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias",   i), {n_embd_v_gqa_i}, TENSOR_NOT_REQUIRED);
5429
0
                            layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), {n_embd},         TENSOR_NOT_REQUIRED);
5430
0
                        }  else {
5431
0
                            if (n_expert != 0) {
5432
0
                                const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
5433
0
                                const int64_t n_ff_shexp = hparams.n_ff_shexp;
5434
5435
0
                                layer.ffn_gate_inp    = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), { n_embd, n_expert}, 0);
5436
0
                                layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert         }, 0);
5437
5438
                                // MoE branch
5439
0
                                layer.ffn_down_exps   = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
5440
0
                                layer.ffn_up_exps     = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
5441
5442
                                // Shared expert branch
5443
0
                                layer.ffn_down_shexp  = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, 0);
5444
0
                                layer.ffn_up_shexp    = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_shexp}, 0);
5445
5446
0
                            } else {
5447
                                // mlp layers
5448
0
                                layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  hparams.n_ff(i), n_embd}, 0);
5449
0
                                layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   hparams.n_ff(i)}, 0);
5450
0
                                layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias",   i), {n_embd}, TENSOR_NOT_REQUIRED);
5451
0
                                layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias",   i), {hparams.n_ff(i)}, TENSOR_NOT_REQUIRED);
5452
0
                            }
5453
0
                        }
5454
0
                    }
5455
0
                } break;
5456
0
            case LLM_ARCH_EXAONE:
5457
0
                {
5458
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5459
5460
                    // output
5461
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5462
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5463
5464
                    // if output is NULL, init from the input tok embed
5465
0
                    if (output == NULL) {
5466
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5467
0
                    }
5468
5469
0
                    for (int i = 0; i < n_layer; ++i) {
5470
0
                        auto & layer = layers[i];
5471
5472
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5473
5474
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
5475
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5476
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5477
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
5478
5479
0
                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM,   "weight", i), {n_embd}, 0);
5480
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
5481
0
                        layer.ffn_gate   = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "weight", i), {n_embd,   n_ff}, 0);
5482
0
                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN,   "weight", i), {  n_ff, n_embd}, 0);
5483
0
                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,     "weight", i), {n_embd,   n_ff}, 0);
5484
0
                    }
5485
0
                } break;
5486
0
            case LLM_ARCH_EXAONE4:
5487
0
                {
5488
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5489
5490
                    // output
5491
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5492
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5493
5494
                    // if output is NULL, init from the input tok embed
5495
0
                    if (output == NULL) {
5496
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5497
0
                    }
5498
5499
0
                    for (int i = 0; i < n_layer; ++i) {
5500
0
                        auto & layer = layers[i];
5501
5502
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
5503
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
5504
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
5505
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5506
5507
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
5508
5509
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
5510
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
5511
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
5512
5513
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
5514
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5515
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5516
0
                        layer.ffn_post_norm  = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
5517
0
                    }
5518
0
                } break;
5519
0
            case LLM_ARCH_RWKV6:
5520
0
                {
5521
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5522
5523
                    // Block 0, LN0
5524
0
                    tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
5525
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
5526
5527
                    // output
5528
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5529
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
5530
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5531
5532
0
                    const int time_mix_extra_dim = hparams.time_mix_extra_dim;
5533
0
                    const int time_decay_extra_dim = hparams.time_decay_extra_dim;
5534
0
                    const int head_size = hparams.wkv_head_size;
5535
0
                    const int attn_hidden_size = n_embd;
5536
0
                    const int ffn_size = hparams.n_ff_arr[0];
5537
5538
0
                    for (int i = 0; i < n_layer; ++i) {
5539
0
                        auto & layer = layers[i];
5540
5541
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5542
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
5543
5544
0
                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
5545
0
                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i),   {n_embd}, 0);
5546
5547
0
                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
5548
0
                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
5549
5550
0
                        layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
5551
0
                        layer.time_mix_lerp_w = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_W, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5552
0
                        layer.time_mix_lerp_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5553
0
                        layer.time_mix_lerp_v = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_V, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5554
0
                        layer.time_mix_lerp_r = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5555
0
                        layer.time_mix_lerp_g = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_G, "weight", i), {n_embd, 1, 1}, TENSOR_NOT_REQUIRED);
5556
0
                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, TENSOR_NOT_REQUIRED);
5557
0
                        GGML_ASSERT(!(layer.time_mix_lerp_fused == NULL && layer.time_mix_lerp_w == NULL));
5558
5559
0
                        layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, 0);
5560
0
                        layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
5561
0
                        layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
5562
0
                        layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
5563
0
                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
5564
0
                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
5565
0
                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
5566
0
                        layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
5567
5568
0
                        layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
5569
0
                        layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
5570
0
                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
5571
5572
0
                        layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
5573
0
                        layer.channel_mix_lerp_r = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, 0);
5574
5575
0
                        layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
5576
0
                        layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
5577
0
                        layer.channel_mix_receptance = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "weight", i), {n_embd, n_embd}, 0);
5578
0
                    }
5579
5580
0
                } break;
5581
0
            case LLM_ARCH_RWKV6QWEN2:
5582
0
                {
5583
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5584
5585
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5586
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, TENSOR_NOT_REQUIRED);
5587
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5588
5589
0
                    const int time_mix_extra_dim = hparams.time_mix_extra_dim;
5590
0
                    const int time_decay_extra_dim = hparams.time_decay_extra_dim;
5591
0
                    const int head_size = hparams.wkv_head_size;
5592
0
                    const int attn_hidden_size = n_embd;
5593
0
                    const int n_head_kv = hparams.n_head_kv();
5594
0
                    int attn_key_value_size;
5595
0
                    if (n_head_kv == 0 || attn_hidden_size / head_size == n_head_kv) {
5596
0
                        attn_key_value_size = attn_hidden_size;
5597
0
                    } else {
5598
0
                        attn_key_value_size = n_head_kv * head_size;
5599
0
                    }
5600
5601
0
                    for (int i = 0; i < n_layer; ++i) {
5602
0
                        auto & layer = layers[i];
5603
5604
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5605
5606
0
                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
5607
0
                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
5608
5609
0
                        layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
5610
0
                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
5611
5612
0
                        layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, TENSOR_NOT_REQUIRED);
5613
0
                        layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
5614
0
                        layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
5615
0
                        layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
5616
0
                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {n_embd, attn_key_value_size}, 0);
5617
0
                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {n_embd, attn_key_value_size}, 0);
5618
0
                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
5619
0
                        layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
5620
                        // optional bias tensors
5621
0
                        layer.time_mix_key_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "bias", i), {attn_key_value_size}, TENSOR_NOT_REQUIRED);
5622
0
                        layer.time_mix_value_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "bias", i), {attn_key_value_size}, TENSOR_NOT_REQUIRED);
5623
0
                        layer.time_mix_receptance_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "bias", i), {attn_hidden_size}, TENSOR_NOT_REQUIRED);
5624
5625
0
                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
5626
5627
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5628
5629
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
5630
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5631
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5632
0
                    }
5633
0
                } break;
5634
0
            case LLM_ARCH_RWKV7:
5635
0
                {
5636
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5637
5638
                    // Block 0, LN0
5639
0
                    tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
5640
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
5641
5642
                    // output
5643
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5644
0
                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
5645
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5646
5647
0
                    const int n_lora_decay = hparams.n_lora_decay;
5648
0
                    const int n_lora_iclr = hparams.n_lora_iclr;
5649
0
                    const int n_lora_value_res_mix = hparams.n_lora_value_res_mix;
5650
0
                    const int n_lora_gate = hparams.n_lora_gate;
5651
0
                    const int attn_hidden_size = n_embd;
5652
0
                    const int ffn_size = hparams.n_ff_arr[0];
5653
5654
0
                    for (int i = 0; i < n_layer; ++i) {
5655
0
                        auto & layer = layers[i];
5656
5657
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5658
0
                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
5659
5660
0
                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
5661
0
                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i),   {n_embd}, 0);
5662
5663
0
                        layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0);
5664
0
                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0);
5665
0
                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0);
5666
5667
0
                        layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0);
5668
0
                        layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0);
5669
0
                        layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0);
5670
5671
0
                        if (i == 0) {
5672
                            // actually not used
5673
0
                            layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
5674
0
                            layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0);
5675
0
                            layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0);
5676
0
                        } else {
5677
0
                            layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
5678
0
                            layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0);
5679
0
                            layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0);
5680
0
                        }
5681
5682
0
                        layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, 0);
5683
0
                        layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, 0);
5684
5685
0
                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0);
5686
5687
0
                        layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0);
5688
0
                        layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0);
5689
0
                        layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0);
5690
5691
0
                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
5692
0
                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
5693
0
                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
5694
5695
0
                        layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
5696
0
                        layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
5697
0
                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
5698
5699
0
                        layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
5700
5701
0
                        layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
5702
0
                        layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
5703
0
                    }
5704
5705
0
                } break;
5706
0
            case LLM_ARCH_ARWKV7:
5707
0
                {
5708
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5709
5710
                    // output
5711
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5712
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
5713
5714
0
                    const int n_lora_decay = hparams.n_lora_decay;
5715
0
                    const int n_lora_iclr = hparams.n_lora_iclr;
5716
0
                    const int n_lora_value_res_mix = hparams.n_lora_value_res_mix;
5717
0
                    const int n_lora_gate = hparams.n_lora_gate;
5718
0
                    const int attn_hidden_size = n_embd;
5719
5720
0
                    for (int i = 0; i < n_layer; ++i) {
5721
0
                        auto & layer = layers[i];
5722
5723
0
                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5724
5725
0
                        layer.time_mix_w0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W0, "weight", i), {n_embd}, 0);
5726
0
                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, n_lora_decay}, 0);
5727
0
                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {n_lora_decay, n_embd}, 0);
5728
5729
0
                        layer.time_mix_a0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A0, "weight", i), {n_embd}, 0);
5730
0
                        layer.time_mix_a1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A1, "weight", i), {n_embd, n_lora_iclr}, 0);
5731
0
                        layer.time_mix_a2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_A2, "weight", i), {n_lora_iclr, n_embd}, 0);
5732
5733
0
                        if (i == 0) {
5734
                            // actually not used
5735
0
                            layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
5736
0
                            layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_iclr}, 0);
5737
0
                            layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_iclr, n_embd}, 0);
5738
0
                        } else {
5739
0
                            layer.time_mix_v0 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V0, "weight", i), {n_embd}, 0);
5740
0
                            layer.time_mix_v1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V1, "weight", i), {n_embd, n_lora_value_res_mix}, 0);
5741
0
                            layer.time_mix_v2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_V2, "weight", i), {n_lora_value_res_mix, n_embd}, 0);
5742
0
                        }
5743
5744
0
                        layer.time_mix_g1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G1, "weight", i), {n_embd, n_lora_gate}, TENSOR_NOT_REQUIRED);
5745
0
                        layer.time_mix_g2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_G2, "weight", i), {n_lora_gate, n_embd}, TENSOR_NOT_REQUIRED);
5746
5747
0
                        try {
5748
0
                            layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 6}, 0);
5749
0
                        } catch(std::runtime_error & e) {
5750
                            // ARWKV models may not have gate tensors
5751
0
                            layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
5752
0
                        }
5753
5754
0
                        layer.time_mix_k_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_K, "weight", i), {attn_hidden_size}, 0);
5755
0
                        layer.time_mix_k_a = create_tensor(tn(LLM_TENSOR_TIME_MIX_K_A, "weight", i), {attn_hidden_size}, 0);
5756
0
                        layer.time_mix_r_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_R_K, "weight", i), {attn_hidden_size}, 0);
5757
5758
0
                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
5759
0
                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
5760
0
                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
5761
5762
0
                        layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED);
5763
0
                        layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
5764
0
                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
5765
5766
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5767
5768
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
5769
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5770
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5771
0
                    }
5772
5773
0
                } break;
5774
0
            case LLM_ARCH_CHAMELEON:
5775
0
                {
5776
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5777
5778
                    // output
5779
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5780
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
5781
                    // if output is NULL, init from the input tok embed
5782
0
                    if (output == NULL) {
5783
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
5784
0
                    }
5785
5786
0
                    for (int i = 0; i < n_layer; ++i) {
5787
0
                        auto & layer = layers[i];
5788
5789
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5790
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
5791
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
5792
0
                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i),  {n_embd_head_k, n_head}, TENSOR_NOT_REQUIRED);
5793
0
                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i),  {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED);
5794
5795
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
5796
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
5797
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
5798
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
5799
5800
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5801
5802
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
5803
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
5804
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
5805
0
                    }
5806
0
                } break;
5807
0
            case LLM_ARCH_WAVTOKENIZER_DEC:
5808
0
                {
5809
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hparams.n_embd_features, n_vocab}, 0);
5810
5811
0
                    conv1d   = create_tensor(tn(LLM_TENSOR_CONV1D, "weight"), {7, hparams.n_embd_features, hparams.posnet.n_embd}, 0);
5812
0
                    conv1d_b = create_tensor(tn(LLM_TENSOR_CONV1D, "bias"),   {1, hparams.posnet.n_embd}, 0);
5813
5814
                    // posnet
5815
0
                    {
5816
0
                        const int64_t n_embd = hparams.posnet.n_embd;
5817
5818
0
                        for (uint32_t i = 0; i < hparams.posnet.n_layer; ++i) {
5819
0
                            auto & layer = layers[i].posnet;
5820
5821
                            // posnet:
5822
                            //
5823
                            //  - resnet
5824
                            //  - resnet
5825
                            //  - attn
5826
                            //  - resnet
5827
                            //  - resnet
5828
                            //  - norm
5829
                            //
5830
0
                            switch (i) {
5831
0
                                case 0:
5832
0
                                case 1:
5833
0
                                case 3:
5834
0
                                case 4:
5835
0
                                    {
5836
0
                                        layer.norm1   = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "weight", i), {1, n_embd}, 0);
5837
0
                                        layer.norm1_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "bias",   i), {1, n_embd}, 0);
5838
5839
0
                                        layer.conv1   = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "weight", i), {3, n_embd, n_embd}, 0);
5840
0
                                        layer.conv1_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "bias",   i), {1, n_embd}, 0);
5841
5842
0
                                        layer.norm2   = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "weight", i), {1, n_embd}, 0);
5843
0
                                        layer.norm2_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "bias",   i), {1, n_embd}, 0);
5844
5845
0
                                        layer.conv2   = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "weight", i), {3, n_embd, n_embd}, 0);
5846
0
                                        layer.conv2_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "bias",   i), {1, n_embd}, 0);
5847
0
                                    } break;
5848
0
                                case 2:
5849
0
                                    {
5850
0
                                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
5851
0
                                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias",   i), {1, n_embd}, 0);
5852
5853
0
                                        layer.attn_q      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q,    "weight", i), {1, n_embd, n_embd}, 0);
5854
0
                                        layer.attn_q_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q,    "bias",   i), {1, n_embd}, 0);
5855
5856
0
                                        layer.attn_k      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K,    "weight", i), {1, n_embd, n_embd}, 0);
5857
0
                                        layer.attn_k_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K,    "bias",   i), {1, n_embd}, 0);
5858
5859
0
                                        layer.attn_v      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V,    "weight", i), {1, n_embd, n_embd}, 0);
5860
0
                                        layer.attn_v_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V,    "bias",   i), {1, n_embd}, 0);
5861
5862
0
                                        layer.attn_o      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT,  "weight", i), {1, n_embd, n_embd}, 0);
5863
0
                                        layer.attn_o_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT,  "bias",   i), {1, n_embd}, 0);
5864
0
                                    } break;
5865
0
                                case 5:
5866
0
                                    {
5867
0
                                        layer.norm   = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
5868
0
                                        layer.norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias",   i), {1, n_embd}, 0);
5869
0
                                    } break;
5870
0
                                default: GGML_ABORT("unknown posnet layer");
5871
0
                            };
5872
0
                        }
5873
0
                    }
5874
5875
0
                    GGML_ASSERT(hparams.posnet.n_embd == hparams.convnext.n_embd);
5876
5877
0
                    tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {hparams.posnet.n_embd}, 0);
5878
0
                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {hparams.posnet.n_embd}, 0);
5879
5880
                    // convnext
5881
0
                    {
5882
0
                        const int64_t n_embd = hparams.convnext.n_embd;
5883
5884
0
                        for (uint32_t i = 0; i < hparams.convnext.n_layer; ++i) {
5885
0
                            auto & layer = layers[i].convnext;
5886
5887
0
                            layer.dw     = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW,    "weight", i), {7, 1, n_embd}, 0);
5888
0
                            layer.dw_b   = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW,    "bias",   i), {1, n_embd}, 0);
5889
5890
0
                            layer.norm   = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM,  "weight", i), {n_embd}, 0);
5891
0
                            layer.norm_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM,  "bias",   i), {n_embd}, 0);
5892
5893
0
                            layer.pw1    = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1,   "weight", i), {n_embd, n_ff}, 0);
5894
0
                            layer.pw1_b  = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1,   "bias",   i), {n_ff}, 0);
5895
5896
0
                            layer.pw2    = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2,   "weight", i), {n_ff, n_embd}, 0);
5897
0
                            layer.pw2_b  = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2,   "bias",   i), {n_embd}, 0);
5898
5899
0
                            layer.gamma  = create_tensor(tn(LLM_TENSOR_CONVNEXT_GAMMA, "weight", i), {n_embd}, 0);
5900
0
                        }
5901
5902
                        // output
5903
0
                        output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5904
0
                        output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
5905
0
                    }
5906
5907
0
                    output   = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hparams.convnext.n_embd, n_embd}, 0);
5908
0
                    output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"),   {n_embd}, 0);
5909
0
                } break;
5910
0
            case LLM_ARCH_BAILINGMOE:
5911
0
                {
5912
0
                    const int64_t n_ff_exp            = hparams.n_ff_exp;
5913
0
                    const int64_t n_expert_shared     = hparams.n_expert_shared;
5914
5915
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5916
5917
                    // output
5918
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5919
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
5920
5921
0
                    for (int i = 0; i < n_layer; ++i) {
5922
0
                        auto & layer = layers[i];
5923
5924
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
5925
5926
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_head * n_rot}, 0);
5927
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_head_kv * n_rot}, 0);
5928
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_head_kv * n_rot}, 0);
5929
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_rot, n_embd}, 0);
5930
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
5931
5932
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
5933
5934
0
                        if (n_expert == 0) {
5935
0
                            throw std::runtime_error("n_expert must be > 0");
5936
0
                        }
5937
0
                        if (n_expert_used == 0) {
5938
0
                            throw std::runtime_error("n_expert_used must be > 0");
5939
0
                        }
5940
5941
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
5942
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
5943
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
5944
5945
0
                        layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
5946
0
                        layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
5947
0
                        layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
5948
0
                    }
5949
0
                } break;
5950
0
            case LLM_ARCH_BAILINGMOE2:
5951
0
                {
5952
0
                    const int64_t n_ff_exp        = hparams.n_ff_exp;
5953
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
5954
5955
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
5956
5957
                    // output
5958
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
5959
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
5960
5961
0
                    GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for bailingmoe2");
5962
0
                    GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for bailingmoe2");
5963
5964
0
                    for (int i = 0; i < n_layer; ++i) {
5965
0
                        int flags = 0;
5966
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
5967
                            // skip all tensors in the NextN layers
5968
0
                            flags |= TENSOR_SKIP;
5969
0
                        }
5970
5971
0
                        auto & layer = layers[i];
5972
5973
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, flags);
5974
5975
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, flags);
5976
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, flags);
5977
5978
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, flags);
5979
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, flags);
5980
5981
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, flags);
5982
5983
0
                        if (static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) { // MoE layers
5984
0
                            const int64_t n_ff_shexp = (hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff_exp) * n_expert_shared;
5985
5986
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, flags);
5987
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED | flags);
5988
5989
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, flags);
5990
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, flags);
5991
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, flags);
5992
5993
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_shexp}, flags);
5994
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, flags);
5995
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_shexp}, flags);
5996
0
                        } else { // Dense layers
5997
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, flags);
5998
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, flags);
5999
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, flags);
6000
0
                        }
6001
6002
                        // NextN/MTP tensors (preserved but unused) - conditionally load for last nextn_predict_layers
6003
0
                        if (hparams.nextn_predict_layers > 0 && static_cast<uint32_t>(i) >= n_layer - hparams.nextn_predict_layers) {
6004
0
                            layer.nextn.eh_proj          = create_tensor(tn(LLM_TENSOR_NEXTN_EH_PROJ, "weight", i), { 2 * n_embd, n_embd }, flags);
6005
0
                            layer.nextn.embed_tokens     = create_tensor(tn(LLM_TENSOR_NEXTN_EMBED_TOKENS, "weight", i), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED | flags);
6006
0
                            layer.nextn.enorm            = create_tensor(tn(LLM_TENSOR_NEXTN_ENORM, "weight", i), { n_embd }, flags);
6007
0
                            layer.nextn.hnorm            = create_tensor(tn(LLM_TENSOR_NEXTN_HNORM, "weight", i), { n_embd }, flags);
6008
0
                            layer.nextn.shared_head_head = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "weight", i), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED | flags);
6009
0
                            layer.nextn.shared_head_norm = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "weight", i), { n_embd }, TENSOR_NOT_REQUIRED | flags);
6010
0
                            layer.layer_out_norm         = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, flags);
6011
0
                        }
6012
0
                    }
6013
0
                } break;
6014
0
            case LLM_ARCH_DOTS1:
6015
0
                {
6016
0
                    const int64_t n_ff_exp        = hparams.n_ff_exp;
6017
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
6018
6019
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6020
6021
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6022
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6023
6024
0
                    for (int i = 0; i < n_layer; ++i) {
6025
0
                        auto & layer = layers[i];
6026
6027
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6028
6029
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6030
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6031
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6032
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6033
6034
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6035
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6036
6037
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6038
6039
0
                        if (i < (int) hparams.n_layer_dense_lead) {
6040
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6041
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6042
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6043
0
                        } else {
6044
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6045
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
6046
6047
0
                            if (n_expert == 0) {
6048
0
                                throw std::runtime_error("n_expert must be > 0");
6049
0
                            }
6050
0
                            if (n_expert_used == 0) {
6051
0
                                throw std::runtime_error("n_expert_used must be > 0");
6052
0
                            }
6053
6054
                            // MoE branch
6055
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6056
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
6057
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6058
6059
                            // Shared expert branch
6060
0
                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
6061
0
                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
6062
0
                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
6063
0
                        }
6064
0
                    }
6065
0
                } break;
6066
0
            case LLM_ARCH_ARCEE:
6067
0
                {
6068
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6069
6070
                    // output
6071
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6072
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6073
6074
                    // if output is NULL, init from the input tok embed
6075
0
                    if (output == NULL) {
6076
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6077
0
                    }
6078
6079
0
                    for (int i = 0; i < n_layer; ++i) {
6080
0
                        auto & layer = layers[i];
6081
6082
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6083
6084
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6085
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6086
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6087
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6088
6089
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6090
6091
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6092
6093
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6094
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6095
0
                    }
6096
0
                } break;
6097
0
            case LLM_ARCH_AFMOE:
6098
0
                {
6099
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6100
6101
                    // output
6102
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6103
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6104
6105
                    // if output is NULL, init from the input tok embed
6106
0
                    if (output == NULL) {
6107
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6108
0
                    }
6109
6110
0
                    const int64_t n_ff_exp = hparams.n_ff_exp;
6111
0
                    const int64_t n_expert_shared = hparams.n_expert_shared;
6112
6113
0
                    for (int i = 0; i < n_layer; ++i) {
6114
0
                        auto & layer = layers[i];
6115
6116
                        // dual attention normalization
6117
0
                        layer.attn_norm      = create_tensor(tn(LLM_TENSOR_ATTN_NORM,      "weight", i), {n_embd}, 0);
6118
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
6119
6120
                        // attention projections
6121
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6122
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6123
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6124
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6125
6126
                        // Q/K normalization
6127
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6128
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6129
6130
                        // attention gating
6131
0
                        layer.wqkv_gate = create_tensor(tn(LLM_TENSOR_ATTN_GATE, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6132
6133
                        // dual ffn normalization
6134
0
                        layer.ffn_norm      = create_tensor(tn(LLM_TENSOR_FFN_NORM,      "weight", i), {n_embd}, 0);
6135
0
                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
6136
6137
0
                        if (static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) {
6138
                            // MoE layers
6139
0
                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6140
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, 0);
6141
6142
                            // grouped expert weights
6143
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, 0);
6144
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0);
6145
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff_exp, n_expert}, 0);
6146
6147
                            // shared expert
6148
0
                            if (n_expert_shared > 0) {
6149
0
                                const int64_t n_ff_shexp = n_ff_exp * n_expert_shared;
6150
0
                                layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_shexp}, 0);
6151
0
                                layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, 0);
6152
0
                                layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_shexp}, 0);
6153
0
                            }
6154
0
                        } else {
6155
                            // Dense layers
6156
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
6157
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
6158
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
6159
0
                        }
6160
0
                    }
6161
0
                } break;
6162
0
            case LLM_ARCH_ERNIE4_5:
6163
0
            case LLM_ARCH_ERNIE4_5_MOE:
6164
0
                {
6165
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6166
6167
                    // output
6168
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6169
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6170
                    // if output is NULL, init from the input tok embed
6171
0
                    if (output == NULL) {
6172
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6173
0
                    }
6174
6175
0
                    for (int i = 0; i < n_layer; ++i) {
6176
0
                        auto & layer = layers[i];
6177
6178
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6179
6180
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6181
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
6182
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
6183
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6184
6185
                        // optional bias tensors
6186
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
6187
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
6188
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
6189
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
6190
6191
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6192
6193
0
                        if (arch == LLM_ARCH_ERNIE4_5_MOE && static_cast<uint32_t>(i) >= hparams.n_layer_dense_lead) { // MoE layers
6194
0
                            int n_ff_exp = hparams.n_ff_exp;
6195
6196
0
                            layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
6197
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
6198
0
                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff_exp, n_expert}, TENSOR_NOT_REQUIRED);
6199
0
                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff_exp, n_embd, n_expert}, 0);
6200
0
                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff_exp, n_expert}, 0);
6201
6202
                            // Shared expert (if present)
6203
0
                            if (hparams.n_ff_shexp > 0) {
6204
0
                                layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {    n_embd, hparams.n_ff_shexp}, 0);
6205
0
                                layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd    }, 0);
6206
0
                                layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {    n_embd, hparams.n_ff_shexp}, 0);
6207
0
                            }
6208
0
                        } else { // Dense layers
6209
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6210
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6211
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6212
0
                        }
6213
0
                    }
6214
0
                } break;
6215
0
            case LLM_ARCH_FALCON_H1:
6216
0
                {
6217
                    // Common
6218
0
                    const int64_t hidden_size = hparams.n_embd; // hidden_size
6219
6220
                    // mamba2 Mixer SSM params
6221
0
                    const int64_t ssm_conv_kernel_size  = hparams.ssm_d_conv; // ssm_conv_kernel_size
6222
0
                    const int64_t ssm_n_groups          = hparams.ssm_n_group; // ssm_n_groups
6223
0
                    const int64_t ssm_state_size        = hparams.ssm_d_state; // ssm_state_size
6224
0
                    const int64_t ssm_intermediate_size = hparams.ssm_d_inner; // TODO expand
6225
0
                    const int64_t ssm_num_heads         = hparams.ssm_dt_rank; // ssm_num_heads
6226
0
                    const int64_t ssm_conv_dim          = ssm_intermediate_size + 2 * ssm_n_groups * ssm_state_size;
6227
0
                    const int64_t ssm_projection_size   = ssm_intermediate_size + ssm_conv_dim + ssm_num_heads;
6228
6229
                    // attn params
6230
0
                    const int64_t attn_num_attention_head = hparams.n_head(0); // rename to: attn_num_attention_head
6231
0
                    const int64_t attn_num_key_value_head = hparams.n_head_kv(0);
6232
6233
                    // ffn params
6234
0
                    const int64_t ffn_intermediate_size = hparams.n_ff(0);
6235
6236
                    // embeddings
6237
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hidden_size, n_vocab}, 0);
6238
6239
                    // output
6240
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hidden_size, n_vocab}, TENSOR_NOT_REQUIRED);
6241
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {hidden_size}, 0);
6242
6243
                    // if output is NULL, init from the input tok embed
6244
0
                    if (output == NULL) {
6245
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hidden_size, n_vocab}, TENSOR_DUPLICATED);
6246
0
                    }
6247
6248
0
                    for (int i = 0; i < n_layer; ++i) {
6249
0
                        auto & layer = layers[i];
6250
6251
                        /*SSM LAYERS*/
6252
                        // ssm in
6253
0
                        layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {hidden_size, ssm_projection_size}, 0);
6254
                        // ssm 1d conv
6255
0
                        layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {ssm_conv_kernel_size, ssm_conv_dim}, 0);
6256
0
                        layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {ssm_conv_dim}, TENSOR_NOT_REQUIRED);
6257
                        // ssm_dt
6258
0
                        layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {ssm_num_heads}, 0);
6259
                        // no "weight" suffix for these
6260
0
                        layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, ssm_num_heads}, 0);
6261
0
                        layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, ssm_num_heads}, 0);
6262
                        // ssm_norm
6263
0
                        layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {ssm_intermediate_size / ssm_n_groups, ssm_n_groups}, TENSOR_NOT_REQUIRED);
6264
                        // out_proj
6265
0
                        layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {ssm_intermediate_size, hidden_size}, 0);
6266
6267
                        /*ATTENTION LAYERS*/
6268
                        // attention layers (with optional bias)
6269
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {hidden_size, n_embd_head_k * attn_num_attention_head}, 0);
6270
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {hidden_size, attn_num_key_value_head * n_embd_head_k}, 0);
6271
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {hidden_size, attn_num_key_value_head * n_embd_head_v}, 0);
6272
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * attn_num_attention_head, hidden_size}, 0);
6273
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {hidden_size}, TENSOR_NOT_REQUIRED);
6274
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {attn_num_key_value_head * n_embd_head_k}, TENSOR_NOT_REQUIRED);
6275
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {attn_num_key_value_head * n_embd_head_v}, TENSOR_NOT_REQUIRED);
6276
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {hidden_size}, TENSOR_NOT_REQUIRED);
6277
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {hidden_size}, 0);
6278
6279
6280
                        // feed forward (w/ optional biases)
6281
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, i), {hidden_size}, 0);
6282
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6283
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {hidden_size,   ffn_intermediate_size}, 0);
6284
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  ffn_intermediate_size, hidden_size}, 0);
6285
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {hidden_size,   ffn_intermediate_size}, 0);
6286
6287
0
                        layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {ffn_intermediate_size}, TENSOR_NOT_REQUIRED);
6288
0
                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {hidden_size}, TENSOR_NOT_REQUIRED);
6289
0
                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {ffn_intermediate_size}, TENSOR_NOT_REQUIRED);
6290
0
                    }
6291
0
                } break;
6292
0
            case LLM_ARCH_HUNYUAN_MOE:
6293
0
                {
6294
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6295
6296
                    // output
6297
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6298
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6299
                    // if output is NULL, init from the input tok embed
6300
0
                    if (output == NULL) {
6301
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6302
0
                    }
6303
6304
0
                    for (int i = 0; i < n_layer; ++i) {
6305
0
                        auto & layer = layers[i];
6306
6307
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6308
6309
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6310
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6311
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6312
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6313
6314
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6315
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6316
6317
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6318
6319
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
6320
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, 0);
6321
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
6322
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
6323
6324
0
                        layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
6325
0
                        layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, hparams.n_ff_shexp}, 0);
6326
0
                        layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0);
6327
0
                    }
6328
0
                } break;
6329
0
            case LLM_ARCH_HUNYUAN_DENSE:
6330
0
                {
6331
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6332
6333
                    // output
6334
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6335
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6336
                    // if output is NULL, init from the input tok embed
6337
0
                    if (output == NULL) {
6338
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6339
0
                    }
6340
6341
0
                    for (int i = 0; i < n_layer; ++i) {
6342
0
                        auto & layer = layers[i];
6343
6344
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6345
6346
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6347
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6348
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6349
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6350
6351
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6352
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6353
6354
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6355
6356
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6357
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6358
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6359
6360
0
                    }
6361
0
                } break;
6362
0
            case LLM_ARCH_SMOLLM3:
6363
0
                {
6364
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6365
6366
                    // output
6367
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6368
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6369
6370
                    // if output is NULL, init from the input tok embed
6371
0
                    if (output == NULL) {
6372
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6373
0
                    }
6374
6375
0
                    for (int i = 0; i < n_layer; ++i) {
6376
0
                        auto & layer = layers[i];
6377
6378
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6379
6380
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6381
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6382
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6383
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6384
6385
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6386
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6387
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6388
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6389
0
                    }
6390
0
                } break;
6391
0
            case LLM_ARCH_OPENAI_MOE:
6392
0
                {
6393
0
                    const int64_t n_ff_exp = hparams.n_ff_exp;
6394
6395
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6396
6397
                    // output
6398
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6399
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6400
6401
0
                    for (int i = 0; i < n_layer; ++i) {
6402
0
                        auto & layer = layers[i];
6403
6404
0
                        layer.attn_norm      = create_tensor(tn(LLM_TENSOR_ATTN_NORM,      "weight", i), {n_embd}, 0);
6405
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
6406
6407
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_head * n_rot}, 0);
6408
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_head_kv * n_rot}, 0);
6409
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_head_kv * n_rot}, 0);
6410
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_rot, n_embd}, 0);
6411
6412
0
                        layer.attn_sinks = create_tensor(tn(LLM_TENSOR_ATTN_SINKS, "weight", i), {n_head}, 0);
6413
6414
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {  n_embd, n_expert}, 0);
6415
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6416
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
6417
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6418
6419
                        // bias
6420
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_head * n_rot}, 0);
6421
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_head_kv * n_rot}, 0);
6422
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_head_kv * n_rot}, 0);
6423
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
6424
6425
0
                        layer.ffn_gate_inp_b  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "bias", i), {n_expert}, 0);
6426
0
                        layer.ffn_gate_exps_b = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "bias", i), {n_ff_exp, n_expert}, 0);
6427
0
                        layer.ffn_down_exps_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "bias", i), {  n_embd, n_expert}, 0);
6428
0
                        layer.ffn_up_exps_b   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "bias", i), {n_ff_exp, n_expert}, 0);
6429
0
                    }
6430
0
                } break;
6431
0
            case LLM_ARCH_LFM2:
6432
0
            case LLM_ARCH_LFM2MOE:
6433
0
                {
6434
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6435
6436
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM_LFM2, "weight"), {n_embd}, 0);
6437
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,           "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6438
6439
0
                    if (output == NULL) {
6440
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6441
0
                    }
6442
6443
0
                    for (int i = 0; i < n_layer; ++i) {
6444
0
                        auto & layer = layers[i];
6445
6446
0
                        const bool is_moe_layer = i >= static_cast<int>(hparams.n_layer_dense_lead);
6447
6448
                        // ffn/moe is same for transformer and conv layers
6449
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6450
0
                        if (is_moe_layer) {
6451
0
                            GGML_ASSERT(n_expert && n_expert_used);
6452
0
                            layer.ffn_gate_inp    = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i),  {n_embd, n_expert}, 0);
6453
0
                            layer.ffn_gate_exps   = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, hparams.n_ff_exp, n_expert}, 0);
6454
0
                            layer.ffn_down_exps   = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {hparams.n_ff_exp,   n_embd, n_expert}, 0);
6455
0
                            layer.ffn_up_exps     = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i),   {n_embd, hparams.n_ff_exp, n_expert}, 0);
6456
0
                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, 0);
6457
0
                        } else {  // dense
6458
0
                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6459
0
                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6460
0
                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6461
0
                        }
6462
6463
                        // for operator_norm
6464
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6465
6466
0
                        if (!hparams.is_recurrent(i)) {
6467
0
                            layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6468
0
                            layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6469
0
                            GGML_ASSERT(n_embd_v_gqa == n_embd_k_gqa);
6470
6471
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
6472
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, hparams.n_embd_k_gqa(i)}, 0);
6473
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, hparams.n_embd_v_gqa(i)}, 0);
6474
6475
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
6476
0
                        } else {
6477
0
                            layer.shortconv.conv     = create_tensor(tn(LLM_TENSOR_SHORTCONV_CONV,    "weight", i), {hparams.n_shortconv_l_cache, n_embd}, 0);
6478
0
                            layer.shortconv.in_proj  = create_tensor(tn(LLM_TENSOR_SHORTCONV_INPROJ,  "weight", i), {n_embd, 3 * n_embd}, 0);
6479
0
                            layer.shortconv.out_proj = create_tensor(tn(LLM_TENSOR_SHORTCONV_OUTPROJ, "weight", i), {n_embd, n_embd}, 0);
6480
0
                        }
6481
0
                    }
6482
6483
                    // for LFM2-ColBert-350M
6484
0
                    dense_2_out_layers = create_tensor(tn(LLM_TENSOR_DENSE_2_OUT, "weight"), {n_embd, hparams.get_n_embd_out()}, TENSOR_NOT_REQUIRED);
6485
0
                } break;
6486
0
            case LLM_ARCH_SMALLTHINKER:
6487
0
                {
6488
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
6489
6490
                    // output
6491
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
6492
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6493
6494
                    // if output is NULL, init from the input tok embed
6495
0
                    if (output == NULL) {
6496
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6497
0
                    }
6498
6499
0
                    for (int i = 0; i < n_layer; ++i) {
6500
0
                        auto & layer = layers[i];
6501
6502
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
6503
6504
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
6505
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
6506
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
6507
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
6508
6509
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
6510
6511
0
                        GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for SMALLTHINKER");
6512
0
                        GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for SMALLTHINKER");
6513
6514
                        // MoE branch
6515
0
                        const int64_t n_ff_exp = hparams.n_ff_exp;
6516
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert }, 0);
6517
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0);
6518
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, 0);
6519
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0);
6520
0
                    }
6521
0
                } break;
6522
0
            case LLM_ARCH_GROVEMOE:
6523
0
                {
6524
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6525
6526
                    // output
6527
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6528
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6529
                    // if output is NULL, init from the input tok embed
6530
0
                    if (output == NULL) {
6531
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6532
0
                    }
6533
6534
0
                    GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for GROVEMOE");
6535
0
                    GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for GROVEMOE");
6536
0
                    GGML_ASSERT(hparams.n_group_experts > 0 && "n_group_experts must be > 0 for GROVEMOE");
6537
6538
0
                    for (int i = 0; i < n_layer; ++i) {
6539
0
                        auto & layer = layers[i];
6540
6541
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6542
6543
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6544
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
6545
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
6546
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6547
6548
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6549
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6550
6551
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6552
6553
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6554
6555
                        // MoE branch
6556
0
                        const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
6557
0
                        const int64_t n_ff_chexp = hparams.n_ff_chexp ? hparams.n_ff_chexp : n_embd_head_k;
6558
0
                        const int64_t n_chunk_expert = n_expert / hparams.n_group_experts;
6559
6560
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6561
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
6562
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
6563
6564
0
                        layer.ffn_gate_chexps = create_tensor(tn(LLM_TENSOR_FFN_GATE_CHEXPS, "weight", i), {  n_embd, n_ff_chexp, n_chunk_expert}, 0);
6565
0
                        layer.ffn_down_chexps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_CHEXPS, "weight", i), {n_ff_chexp,   n_embd, n_chunk_expert}, 0);
6566
0
                        layer.ffn_up_chexps   = create_tensor(tn(LLM_TENSOR_FFN_UP_CHEXPS,   "weight", i), {  n_embd, n_ff_chexp, n_chunk_expert}, 0);
6567
0
                    }
6568
0
                } break;
6569
0
            case LLM_ARCH_APERTUS:
6570
0
                {
6571
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
6572
6573
                    // output
6574
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
6575
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), { n_embd, n_vocab }, 0);
6576
6577
0
                    for (int i = 0; i < n_layer; ++i) {
6578
0
                        auto & layer = layers[i];
6579
6580
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
6581
6582
0
                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
6583
0
                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6584
0
                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6585
0
                        } else {
6586
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6587
0
                        }
6588
6589
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
6590
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), { n_embd, n_embd_gqa }, 0);
6591
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), { n_embd, n_embd_gqa }, 0);
6592
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
6593
6594
                        // optional bias tensors
6595
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), { n_embd },     TENSOR_NOT_REQUIRED);
6596
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), { n_embd_gqa }, TENSOR_NOT_REQUIRED);
6597
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), { n_embd_gqa }, TENSOR_NOT_REQUIRED);
6598
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd },     TENSOR_NOT_REQUIRED);
6599
6600
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
6601
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
6602
0
                        layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
6603
6604
                        // Q and K layernorms for Apertus
6605
0
                        layer.attn_q_norm   = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), { n_embd_head_k }, 0);
6606
0
                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias",   i), { n_embd_head_k }, TENSOR_NOT_REQUIRED);
6607
0
                        layer.attn_k_norm   = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), { n_embd_head_k }, 0);
6608
0
                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias",   i), { n_embd_head_k }, TENSOR_NOT_REQUIRED);
6609
0
                    }
6610
0
                } break;
6611
0
            case LLM_ARCH_MINIMAX_M2:
6612
0
                {
6613
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6614
6615
                    // output
6616
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6617
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6618
6619
0
                    for (int i = 0; i < n_layer; ++i) {
6620
0
                        auto & layer = layers[i];
6621
6622
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
6623
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
6624
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
6625
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
6626
6627
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6628
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k * n_head}, 0);
6629
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_k_gqa}, 0);
6630
6631
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6632
6633
0
                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6634
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
6635
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
6636
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
6637
0
                        layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, 0);
6638
0
                    }
6639
0
                } break;
6640
0
            case LLM_ARCH_COGVLM:
6641
0
                {
6642
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6643
6644
                    // output
6645
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6646
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6647
6648
                    // if output is NULL, init from the input tok embed
6649
0
                    if (output == NULL) {
6650
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6651
0
                    }
6652
6653
0
                    for (int i = 0; i < n_layer; ++i) {
6654
0
                        auto & layer = layers[i];
6655
6656
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6657
0
                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd_head_k * n_head * 3}, 0);
6658
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6659
6660
0
                        layer.visexp_attn_wqkv = create_tensor(tn(LLM_TENSOR_VISEXP_ATTN_QKV, "weight", i), {n_embd, n_embd_head_k * n_head * 3}, 0);
6661
0
                        layer.visexp_attn_wo = create_tensor(tn(LLM_TENSOR_VISEXP_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6662
6663
0
                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6664
6665
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6666
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6667
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6668
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6669
6670
0
                        layer.visexp_ffn_gate = create_tensor(tn(LLM_TENSOR_VISEXP_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6671
0
                        layer.visexp_ffn_down = create_tensor(tn(LLM_TENSOR_VISEXP_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6672
0
                        layer.visexp_ffn_up   = create_tensor(tn(LLM_TENSOR_VISEXP_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6673
0
                    }
6674
0
                } break;
6675
0
            case LLM_ARCH_PANGU_EMBED:
6676
0
                {
6677
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6678
6679
                    // output
6680
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6681
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6682
6683
                    // if output is NULL, init from the input tok embed
6684
0
                    if (output == NULL) {
6685
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6686
0
                    }
6687
6688
0
                    for (int i = 0; i < n_layer; ++i) {
6689
0
                        auto & layer = layers[i];
6690
6691
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6692
6693
                        // weight tensors
6694
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6695
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
6696
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
6697
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6698
6699
                        // bias tensors
6700
0
                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd_head_k * n_head}, 0);
6701
0
                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, 0);
6702
0
                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, 0);
6703
0
                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
6704
6705
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6706
6707
0
                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
6708
0
                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6709
0
                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6710
0
                        } else {
6711
0
                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
6712
0
                        }
6713
6714
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6715
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6716
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6717
0
                    }
6718
0
                } break;
6719
0
            case LLM_ARCH_QWEN3NEXT:
6720
0
                {
6721
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
6722
6723
                    // output
6724
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
6725
0
                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED);
6726
6727
                    // if output is NULL, init from the input tok embed
6728
0
                    if (output == NULL) {
6729
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, TENSOR_DUPLICATED);
6730
0
                    }
6731
6732
0
                    const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
6733
6734
                    // Calculate dimensions from hyperparameters
6735
0
                    const int64_t head_k_dim = hparams.ssm_d_state;
6736
0
                    const int64_t head_v_dim = hparams.ssm_d_state;
6737
0
                    const int64_t n_k_heads  = hparams.ssm_n_group;
6738
0
                    const int64_t n_v_heads  = hparams.ssm_dt_rank;
6739
0
                    const int64_t key_dim    = head_k_dim * n_k_heads;
6740
0
                    const int64_t value_dim  = head_v_dim * n_v_heads;
6741
0
                    const int64_t conv_dim   = key_dim * 2 + value_dim;
6742
6743
                    // Calculate projection sizes
6744
0
                    const int64_t qkvz_dim = key_dim * 2 + value_dim * 2;
6745
0
                    const int64_t ba_dim   = n_v_heads * 2;
6746
6747
0
                    for (int i = 0; i < n_layer; ++i) {
6748
0
                        auto & layer = layers[i];
6749
6750
0
                        layer.attn_norm      = create_tensor(tn(LLM_TENSOR_ATTN_NORM,      "weight", i), { n_embd }, 0);
6751
0
                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), { n_embd }, 0);
6752
6753
0
                        if (!hparams.is_recurrent(i)) {
6754
                            // Attention layers
6755
0
                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), { n_embd, n_embd_head_k * n_head * 2 }, 0);
6756
0
                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), { n_embd, n_embd_k_gqa }, 0);
6757
0
                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), { n_embd, n_embd_v_gqa }, 0);
6758
0
                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
6759
6760
                            // Q/K normalization for attention layers
6761
0
                            layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), { n_embd_head_k }, 0);
6762
0
                            layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), { n_embd_head_k }, 0);
6763
0
                        } else {
6764
                            // Linear attention (gated delta net) specific tensors
6765
                            // Create tensors with calculated dimensions
6766
0
                            layer.ssm_in         = create_tensor(tn(LLM_TENSOR_SSM_IN,         "weight", i), { n_embd, qkvz_dim }, 0);
6767
0
                            layer.ssm_conv1d     = create_tensor(tn(LLM_TENSOR_SSM_CONV1D,     "weight", i), { hparams.ssm_d_conv, conv_dim }, 0);
6768
0
                            layer.ssm_dt         = create_tensor(tn(LLM_TENSOR_SSM_DT,         "bias",   i), { hparams.ssm_dt_rank }, 0);
6769
0
                            layer.ssm_a          = create_tensor(tn(LLM_TENSOR_SSM_A_NOSCAN,             i), { hparams.ssm_dt_rank }, 0);
6770
0
                            layer.ssm_beta_alpha = create_tensor(tn(LLM_TENSOR_SSM_BETA_ALPHA, "weight", i), { n_embd, ba_dim }, 0);
6771
0
                            layer.ssm_norm       = create_tensor(tn(LLM_TENSOR_SSM_NORM,       "weight", i), { head_v_dim }, 0);
6772
0
                            layer.ssm_out        = create_tensor(tn(LLM_TENSOR_SSM_OUT,        "weight", i), { value_dim, n_embd }, 0);
6773
0
                        }
6774
6775
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), { n_embd, n_expert }, 0);
6776
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0);
6777
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, 0);
6778
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), { n_embd, n_ff_exp, n_expert }, 0);
6779
6780
                        // Shared experts
6781
0
                        layer.ffn_gate_inp_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), { n_embd }, 0);
6782
0
                        layer.ffn_gate_shexp     = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP,     "weight", i), { n_embd, hparams.n_ff_shexp }, 0);
6783
0
                        layer.ffn_up_shexp       = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,       "weight", i), { n_embd, hparams.n_ff_shexp }, 0);
6784
0
                        layer.ffn_down_shexp     = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP,     "weight", i), { hparams.n_ff_shexp, n_embd }, 0);
6785
0
                    }
6786
0
                } break;
6787
0
            case LLM_ARCH_MIMO2:
6788
0
                {
6789
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6790
6791
                    // output
6792
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6793
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
6794
6795
0
                    for (int i = 0; i < n_layer; ++i) {
6796
0
                        auto & layer = layers[i];
6797
0
                        uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i);
6798
0
                        uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i);
6799
0
                        uint32_t n_head = hparams.n_head(i);
6800
6801
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
6802
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_k_gqa }, 0);
6803
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_v_gqa }, 0);
6804
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_v * n_head, n_embd }, 0);
6805
6806
0
                        layer.attn_norm  = create_tensor(tn(LLM_TENSOR_ATTN_NORM,  "weight", i), {n_embd}, 0);
6807
0
                        layer.attn_sinks = create_tensor(tn(LLM_TENSOR_ATTN_SINKS, "weight", i), {n_head}, TENSOR_NOT_REQUIRED);
6808
6809
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6810
6811
                        // non-MoE branch
6812
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
6813
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, TENSOR_NOT_REQUIRED);
6814
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
6815
6816
                        // MoE branch
6817
0
                        int64_t n_ff_exp = hparams.n_ff_exp;
6818
0
                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, TENSOR_NOT_REQUIRED);
6819
0
                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff_exp,   n_expert}, TENSOR_NOT_REQUIRED);
6820
0
                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, TENSOR_NOT_REQUIRED);
6821
0
                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff_exp,   n_expert}, TENSOR_NOT_REQUIRED);
6822
0
                        layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
6823
0
                    }
6824
0
                } break;
6825
0
            case LLM_ARCH_MAINCODER:
6826
0
                {
6827
0
                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
6828
6829
                    // output
6830
0
                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
6831
0
                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
6832
                    // if output is NULL, init from the input tok embed
6833
0
                    if (output == NULL) {
6834
0
                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
6835
0
                    }
6836
6837
0
                    for (int i = 0; i < n_layer; ++i) {
6838
0
                        auto & layer = layers[i];
6839
6840
0
                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
6841
6842
0
                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
6843
0
                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
6844
0
                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
6845
0
                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
6846
6847
0
                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
6848
0
                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
6849
6850
0
                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6851
0
                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
6852
0
                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
6853
0
                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
6854
0
                    }
6855
0
                } break;
6856
0
            default:
6857
0
                throw std::runtime_error("unknown architecture");
6858
0
        }
6859
6860
0
        if (n_moved_tensors > 0) {
6861
0
            LLAMA_LOG_DEBUG("%s: tensor '%s' (%s) (and %d others) cannot be used with preferred buffer type %s, using %s instead\n",
6862
0
                __func__, first_moved_tensor->name, ggml_type_name(first_moved_tensor->type), n_moved_tensors - 1,
6863
0
                ggml_backend_buft_name(first_moved_from_buft), ggml_backend_buft_name(first_moved_to_buft));
6864
0
        }
6865
0
    }
6866
6867
0
    ml.done_getting_tensors();
6868
6869
0
    ml.init_mappings(true, use_mlock ? &pimpl->mlock_mmaps : nullptr);
6870
0
    pimpl->mappings.reserve(ml.mappings.size());
6871
6872
    // create the backend buffers
6873
0
    std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_buf_maps;
6874
0
    ctx_buf_maps.reserve(ctx_map.size());
6875
6876
    // Ensure we have enough capacity for the maximum backend buffer we will potentially create
6877
0
    const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
6878
0
    pimpl->ctxs_bufs.reserve(n_max_backend_buffer);
6879
6880
0
    for (auto & [buft, ctx_ptr] : ctx_map) {
6881
0
        ggml_context * ctx = ctx_ptr.get();
6882
6883
        // skip contexts without tensors
6884
0
        if (ggml_get_first_tensor(ctx) == nullptr) {
6885
0
            continue;
6886
0
        }
6887
6888
0
        llama_buf_map buf_map;
6889
0
        buf_map.reserve(n_max_backend_buffer);
6890
6891
        // check if it is possible to use buffer_from_host_ptr with this buffer type
6892
0
        ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
6893
0
        if (!dev) {
6894
            // FIXME: workaround for CPU backend buft having a NULL device
6895
0
            dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
6896
0
            if (!dev) {
6897
0
                throw std::runtime_error(format("%s: no CPU backend found", __func__));
6898
0
            }
6899
0
        }
6900
0
        ggml_backend_dev_props props;
6901
0
        ggml_backend_dev_get_props(dev, &props);
6902
0
        bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
6903
0
        bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev);
6904
6905
0
        std::vector<ggml_backend_buffer_ptr> bufs;
6906
0
        if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) {
6907
0
            GGML_ASSERT(!ml.no_alloc);
6908
0
            for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
6909
                // only the mmap region containing the tensors in the model is mapped to the backend buffer
6910
                // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer,
6911
                //     then we could just use metal for all layers
6912
                // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
6913
0
                void * addr = nullptr;
6914
0
                size_t first, last; // NOLINT
6915
0
                ml.get_mapping_range(&first, &last, &addr, idx, ctx);
6916
0
                if (first >= last) {
6917
0
                    continue;
6918
0
                }
6919
0
                const size_t max_size = ggml_get_max_tensor_size(ctx);
6920
0
                ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
6921
0
                if (buf == nullptr) {
6922
0
                    throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
6923
0
                }
6924
0
                bufs.emplace_back(buf);
6925
0
                buf_map.emplace(idx, buf);
6926
0
            }
6927
0
        } else {
6928
0
            ggml_backend_buffer_t buf;
6929
0
            if (ml.no_alloc) {
6930
0
                buf = ggml_backend_buft_alloc_buffer(buft, /*size =*/ 0); // dummy buffer
6931
0
                for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
6932
0
                    t->buffer = buf; // set dummy buffer for weights so that the backend scheduler won't try to allocate them
6933
0
                }
6934
0
            } else {
6935
0
                buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); // real buffer
6936
0
            }
6937
0
            if (buf == nullptr) {
6938
0
                throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
6939
0
            }
6940
0
            if (use_mlock && ggml_backend_buffer_is_host(buf)) {
6941
0
                pimpl->mlock_bufs.emplace_back(new llama_mlock);
6942
0
                auto & mlock_buf = pimpl->mlock_bufs.back();
6943
0
                mlock_buf->init   (ggml_backend_buffer_get_base(buf));
6944
0
                mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
6945
0
            }
6946
0
            bufs.emplace_back(buf);
6947
0
            for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
6948
0
                buf_map.emplace(idx, buf);
6949
0
            }
6950
0
        }
6951
0
        pimpl->ctxs_bufs.emplace_back(std::move(ctx_ptr), std::move(bufs));
6952
6953
0
        for (auto & buf : buf_map) {
6954
            // indicate that this buffer contains weights
6955
            // this is used by ggml_backend_sched to improve op scheduling: ops that use a weight are preferably scheduled to the backend that contains the weight
6956
0
            ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
6957
0
        }
6958
6959
0
        ctx_buf_maps.emplace_back(ctx, buf_map);
6960
0
    }
6961
6962
0
    if (llama_supports_gpu_offload()) {
6963
0
        const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
6964
6965
0
        int n_repeating = n_gpu;
6966
0
        if (n_repeating > 0) {
6967
0
            LLAMA_LOG_INFO("%s: offloading output layer to GPU\n", __func__);
6968
0
            n_repeating--;
6969
0
        }
6970
0
        LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_repeating);
6971
6972
0
        const int max_backend_supported_layers = hparams.n_layer + 1;
6973
0
        const int max_offloadable_layers       = hparams.n_layer + 1;
6974
6975
0
        LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
6976
0
    }
6977
6978
    // print memory requirements per buffer type
6979
0
    for (auto & [_, bufs] : pimpl->ctxs_bufs) {
6980
0
        for (auto & buf: bufs) {
6981
0
            LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n",
6982
0
                __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0);
6983
0
        }
6984
0
    }
6985
6986
    // populate tensors_by_name
6987
0
    for (auto & [ctx, _] : pimpl->ctxs_bufs) {
6988
0
        for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) {
6989
0
            tensors_by_name.emplace_back(ggml_get_name(cur), cur);
6990
0
        }
6991
0
    }
6992
6993
0
    if (ml.no_alloc) {
6994
0
        return true;
6995
0
    }
6996
6997
    // load tensor data
6998
0
    for (auto & [ctx, buf_map] : ctx_buf_maps) {
6999
0
        if (!ml.load_all_data(ctx, buf_map, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
7000
0
            return false;
7001
0
        }
7002
0
    }
7003
7004
0
    if (use_mmap_buffer) {
7005
0
        for (auto & mapping : ml.mappings) {
7006
0
            pimpl->mappings.emplace_back(std::move(mapping));
7007
0
        }
7008
0
    }
7009
7010
0
    return true;
7011
0
}
7012
7013
0
std::string llama_model::arch_name() const {
7014
0
    return llm_arch_name(arch);
7015
0
}
7016
7017
0
std::string llama_model::type_name() const {
7018
0
    return llm_type_name(type);
7019
0
}
7020
7021
0
std::string llama_model::desc() const {
7022
0
    return pimpl->desc_str;
7023
0
}
7024
7025
0
size_t llama_model::size() const {
7026
0
    return pimpl->n_bytes;
7027
0
}
7028
7029
0
size_t llama_model::n_tensors() const {
7030
0
    return tensors_by_name.size();
7031
0
}
7032
7033
0
size_t llama_model::n_devices() const {
7034
0
    return devices.size();
7035
0
}
7036
7037
0
uint32_t llama_model::n_gpu_layers() const {
7038
0
    return params.n_gpu_layers >= 0 ? params.n_gpu_layers : hparams.n_layer + 1;
7039
0
}
7040
7041
0
llama_split_mode llama_model::split_mode() const {
7042
0
    return params.split_mode;
7043
0
}
7044
7045
0
std::map<ggml_backend_buffer_type_t, size_t> llama_model::memory_breakdown() const {
7046
0
    std::map<ggml_backend_buffer_type_t, size_t> ret;
7047
0
    for (const auto & [ctx, bufs] : pimpl->ctxs_bufs) {
7048
0
        if (hparams.no_alloc) {
7049
0
            GGML_ASSERT(bufs.size() == 1);
7050
0
            ggml_backend_buffer_t buf = bufs[0].get();
7051
0
            GGML_ASSERT(ggml_backend_buffer_get_base(buf) == nullptr);
7052
0
            ggml_backend_buffer_type_t buft = ggml_backend_buffer_get_type(buf);
7053
0
            ret[buft] += ggml_backend_alloc_ctx_tensors_from_buft_size(ctx.get(), buft);
7054
0
        } else {
7055
0
            for (const auto & buf : bufs) {
7056
                // GGML_ASSERT(ggml_backend_buffer_get_base(buf.get()) != nullptr); // multi_buffer does not have a defined base
7057
0
                ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get());
7058
0
            }
7059
0
        }
7060
0
    }
7061
0
    return ret;
7062
0
}
7063
7064
0
uint64_t llama_model::n_elements() const {
7065
0
    return pimpl->n_elements;
7066
0
}
7067
7068
0
void llama_model::print_info() const {
7069
0
    const std::string rope_scaling_type = llama_rope_scaling_type_name(hparams.rope_scaling_type_train);
7070
7071
0
    auto print_f = [](const std::function<uint32_t(uint32_t)> & f, uint32_t n) {
7072
0
        bool is_var = false;
7073
7074
0
        std::vector<uint32_t> v;
7075
0
        for (uint32_t i = 0; i < n; ++i) {
7076
0
            v.push_back(f(i));
7077
0
            if (v[i] != v[0]) {
7078
0
                is_var = true;
7079
0
            }
7080
0
        }
7081
7082
0
        std::stringstream ss;
7083
7084
0
        if (is_var) {
7085
0
            ss << "[";
7086
0
            for (uint32_t i = 0; i < n; ++i) {
7087
0
                ss << v[i];
7088
0
                if (i < n - 1) {
7089
0
                    ss << ", ";
7090
0
                }
7091
0
            }
7092
0
            ss << "]";
7093
0
        } else {
7094
0
            ss << v[0];
7095
0
        }
7096
7097
0
        return ss.str();
7098
0
    };
7099
7100
    // hparams
7101
0
    LLAMA_LOG_INFO("%s: arch             = %s\n",     __func__, arch_name().c_str());
7102
0
    LLAMA_LOG_INFO("%s: vocab_only       = %d\n",     __func__, hparams.vocab_only);
7103
0
    LLAMA_LOG_INFO("%s: no_alloc         = %d\n",     __func__, hparams.no_alloc);
7104
7105
0
    if (!hparams.vocab_only) {
7106
0
        LLAMA_LOG_INFO("%s: n_ctx_train      = %u\n",     __func__, hparams.n_ctx_train);
7107
0
        LLAMA_LOG_INFO("%s: n_embd           = %u\n",     __func__, hparams.n_embd);
7108
0
        LLAMA_LOG_INFO("%s: n_embd_inp       = %u\n",     __func__, hparams.n_embd_inp());
7109
0
        LLAMA_LOG_INFO("%s: n_layer          = %u\n",     __func__, hparams.n_layer);
7110
0
        LLAMA_LOG_INFO("%s: n_head           = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head(il);    }, hparams.n_layer).c_str());
7111
0
        LLAMA_LOG_INFO("%s: n_head_kv        = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
7112
0
        LLAMA_LOG_INFO("%s: n_rot            = %u\n",     __func__, hparams.n_rot);
7113
0
        LLAMA_LOG_INFO("%s: n_swa            = %u\n",     __func__, hparams.n_swa);
7114
0
        LLAMA_LOG_INFO("%s: is_swa_any       = %u\n",     __func__, hparams.is_swa_any());
7115
0
        LLAMA_LOG_INFO("%s: n_embd_head_k    = %u\n",     __func__, hparams.n_embd_head_k);
7116
0
        LLAMA_LOG_INFO("%s: n_embd_head_v    = %u\n",     __func__, hparams.n_embd_head_v);
7117
0
        LLAMA_LOG_INFO("%s: n_gqa            = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il);        }, hparams.n_layer).c_str());
7118
0
        LLAMA_LOG_INFO("%s: n_embd_k_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
7119
0
        LLAMA_LOG_INFO("%s: n_embd_v_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
7120
0
        LLAMA_LOG_INFO("%s: f_norm_eps       = %.1e\n",   __func__, hparams.f_norm_eps);
7121
0
        LLAMA_LOG_INFO("%s: f_norm_rms_eps   = %.1e\n",   __func__, hparams.f_norm_rms_eps);
7122
0
        LLAMA_LOG_INFO("%s: f_clamp_kqv      = %.1e\n",   __func__, hparams.f_clamp_kqv);
7123
0
        LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n",   __func__, hparams.f_max_alibi_bias);
7124
0
        LLAMA_LOG_INFO("%s: f_logit_scale    = %.1e\n",   __func__, hparams.f_logit_scale);
7125
0
        LLAMA_LOG_INFO("%s: f_attn_scale     = %.1e\n",   __func__, hparams.f_attention_scale);
7126
0
        LLAMA_LOG_INFO("%s: n_ff             = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
7127
0
        LLAMA_LOG_INFO("%s: n_expert         = %u\n",     __func__, hparams.n_expert);
7128
0
        LLAMA_LOG_INFO("%s: n_expert_used    = %u\n",     __func__, hparams.n_expert_used);
7129
0
        LLAMA_LOG_INFO("%s: n_expert_groups  = %d\n",     __func__, hparams.n_expert_groups);
7130
0
        LLAMA_LOG_INFO("%s: n_group_used     = %d\n",     __func__, hparams.n_group_used);
7131
0
        LLAMA_LOG_INFO("%s: causal attn      = %d\n",     __func__, hparams.causal_attn);
7132
0
        LLAMA_LOG_INFO("%s: pooling type     = %d\n",     __func__, hparams.pooling_type);
7133
0
        LLAMA_LOG_INFO("%s: rope type        = %d\n",     __func__, hparams.rope_type);
7134
0
        LLAMA_LOG_INFO("%s: rope scaling     = %s\n",     __func__, rope_scaling_type.c_str());
7135
0
        LLAMA_LOG_INFO("%s: freq_base_train  = %.1f\n",   __func__, hparams.rope_freq_base_train);
7136
0
        LLAMA_LOG_INFO("%s: freq_scale_train = %g\n",     __func__, hparams.rope_freq_scale_train);
7137
0
        if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
7138
0
            LLAMA_LOG_INFO("%s: freq_base_swa    = %.1f\n",   __func__, hparams.rope_freq_base_train_swa);
7139
0
            LLAMA_LOG_INFO("%s: freq_scale_swa   = %g\n",     __func__, hparams.rope_freq_scale_train_swa);
7140
0
        }
7141
0
        LLAMA_LOG_INFO("%s: n_ctx_orig_yarn  = %u\n",     __func__, hparams.n_ctx_orig_yarn);
7142
0
        LLAMA_LOG_INFO("%s: rope_yarn_log_mul= %.4f\n",   __func__, hparams.rope_yarn_log_mul);
7143
0
        LLAMA_LOG_INFO("%s: rope_finetuned   = %s\n",     __func__, hparams.rope_finetuned ? "yes" : "unknown");
7144
        // MRoPE (Multi-axis Rotary Position Embedding) sections
7145
0
        if (const auto & s = hparams.rope_sections; s[0] || s[1] || s[2] || s[3]) {
7146
0
            LLAMA_LOG_INFO("%s: mrope sections   = [%d, %d, %d, %d]\n", __func__, s[0], s[1], s[2], s[3]);
7147
0
        }
7148
0
        if (!classifier_labels.empty()) {
7149
0
            LLAMA_LOG_INFO("%s: n_cls_out        = %u\n", __func__, hparams.n_cls_out);
7150
7151
0
            size_t i = 0;
7152
0
            for (auto label : classifier_labels) {
7153
0
                LLAMA_LOG_INFO("%s: cls_label[%2zu]    = %s\n", __func__, i++, label.c_str());
7154
0
            }
7155
0
        }
7156
0
    }
7157
7158
0
    if (arch == LLM_ARCH_MAMBA ||
7159
0
        arch == LLM_ARCH_MAMBA2 ||
7160
0
        arch == LLM_ARCH_JAMBA ||
7161
0
        arch == LLM_ARCH_FALCON_H1 ||
7162
0
        arch == LLM_ARCH_PLAMO2 ||
7163
0
        arch == LLM_ARCH_GRANITE_HYBRID ||
7164
0
        arch == LLM_ARCH_QWEN3NEXT ||
7165
0
        arch == LLM_ARCH_NEMOTRON_H ||
7166
0
        arch == LLM_ARCH_NEMOTRON_H_MOE) {
7167
0
        LLAMA_LOG_INFO("%s: ssm_d_conv       = %u\n",     __func__, hparams.ssm_d_conv);
7168
0
        LLAMA_LOG_INFO("%s: ssm_d_inner      = %u\n",     __func__, hparams.ssm_d_inner);
7169
0
        LLAMA_LOG_INFO("%s: ssm_d_state      = %u\n",     __func__, hparams.ssm_d_state);
7170
0
        LLAMA_LOG_INFO("%s: ssm_dt_rank      = %u\n",     __func__, hparams.ssm_dt_rank);
7171
0
        LLAMA_LOG_INFO("%s: ssm_n_group      = %u\n",     __func__, hparams.ssm_n_group);
7172
0
        LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms   = %d\n",     __func__, hparams.ssm_dt_b_c_rms);
7173
0
    }
7174
7175
0
    LLAMA_LOG_INFO("%s: model type       = %s\n",     __func__, type_name().c_str());
7176
0
    if (pimpl->n_elements >= 1e12) {
7177
0
        LLAMA_LOG_INFO("%s: model params     = %.2f T\n", __func__, pimpl->n_elements*1e-12);
7178
0
    } else if (pimpl->n_elements >= 1e9) {
7179
0
        LLAMA_LOG_INFO("%s: model params     = %.2f B\n", __func__, pimpl->n_elements*1e-9);
7180
0
    } else if (pimpl->n_elements >= 1e6) {
7181
0
        LLAMA_LOG_INFO("%s: model params     = %.2f M\n", __func__, pimpl->n_elements*1e-6);
7182
0
    } else {
7183
0
        LLAMA_LOG_INFO("%s: model params     = %.2f K\n", __func__, pimpl->n_elements*1e-3);
7184
0
    }
7185
7186
    // general kv
7187
0
    LLAMA_LOG_INFO("%s: general.name     = %s\n",    __func__, name.c_str());
7188
7189
0
    if (arch == LLM_ARCH_DEEPSEEK) {
7190
0
        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
7191
0
        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
7192
0
        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
7193
0
        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
7194
0
    }
7195
7196
0
    if (arch == LLM_ARCH_DEEPSEEK2) {
7197
0
        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
7198
0
        LLAMA_LOG_INFO("%s: n_lora_q             = %d\n",     __func__, hparams.n_lora_q);
7199
0
        LLAMA_LOG_INFO("%s: n_lora_kv            = %d\n",     __func__, hparams.n_lora_kv);
7200
0
        LLAMA_LOG_INFO("%s: n_embd_head_k_mla    = %d\n",     __func__, hparams.n_embd_head_k_mla);
7201
0
        LLAMA_LOG_INFO("%s: n_embd_head_v_mla    = %d\n",     __func__, hparams.n_embd_head_v_mla);
7202
0
        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
7203
0
        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
7204
0
        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
7205
0
        LLAMA_LOG_INFO("%s: expert_weights_norm  = %d\n",     __func__, hparams.expert_weights_norm);
7206
0
        LLAMA_LOG_INFO("%s: expert_gating_func   = %s\n",     __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func));
7207
0
    }
7208
7209
0
    if (arch == LLM_ARCH_QWEN2MOE) {
7210
0
        LLAMA_LOG_INFO("%s: n_ff_exp         = %d\n",     __func__, hparams.n_ff_exp);
7211
0
        LLAMA_LOG_INFO("%s: n_ff_shexp       = %d\n",     __func__, hparams.n_ff_shexp);
7212
0
    }
7213
7214
0
    if (arch == LLM_ARCH_QWEN3MOE || arch == LLM_ARCH_OPENAI_MOE || arch == LLM_ARCH_QWEN3VLMOE || arch == LLM_ARCH_RND1) {
7215
0
        LLAMA_LOG_INFO("%s: n_ff_exp         = %d\n",     __func__, hparams.n_ff_exp);
7216
0
    }
7217
7218
0
    if (arch == LLM_ARCH_MINICPM ||
7219
0
        arch == LLM_ARCH_GRANITE ||
7220
0
        arch == LLM_ARCH_GRANITE_MOE ||
7221
0
        arch == LLM_ARCH_GRANITE_HYBRID ||
7222
0
        arch == LLM_ARCH_NEMOTRON_H_MOE) {
7223
0
        LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
7224
0
        LLAMA_LOG_INFO("%s: f_residual_scale  = %f\n", __func__, hparams.f_residual_scale);
7225
0
        LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
7226
0
        LLAMA_LOG_INFO("%s: n_ff_shexp        = %d\n", __func__, hparams.n_ff_shexp);
7227
0
    }
7228
7229
0
    if (arch == LLM_ARCH_BAILINGMOE) {
7230
0
        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
7231
0
        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
7232
0
        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
7233
0
        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
7234
0
        LLAMA_LOG_INFO("%s: expert_weights_norm  = %d\n",     __func__, hparams.expert_weights_norm);
7235
0
    }
7236
7237
0
    if (arch == LLM_ARCH_BAILINGMOE2) {
7238
0
        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
7239
0
        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
7240
0
        LLAMA_LOG_INFO("%s: n_ff_shexp           = %d\n",     __func__, hparams.n_ff_shexp);
7241
0
        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
7242
0
        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
7243
0
        LLAMA_LOG_INFO("%s: expert_weights_norm  = %d\n",     __func__, hparams.expert_weights_norm);
7244
0
        LLAMA_LOG_INFO("%s: expert_gating_func   = %s\n",     __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func));
7245
0
        LLAMA_LOG_INFO("%s: nextn_predict_layers = %d\n",     __func__, hparams.nextn_predict_layers);
7246
0
    }
7247
7248
0
    if (arch == LLM_ARCH_SMALLTHINKER || arch == LLM_ARCH_LFM2MOE) {
7249
0
        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
7250
0
        LLAMA_LOG_INFO("%s: expert_gating_func   = %s\n",     __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func));
7251
0
    }
7252
7253
0
    if (arch == LLM_ARCH_GROVEMOE) {
7254
0
        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
7255
0
        LLAMA_LOG_INFO("%s: n_ff_chexp           = %d\n",     __func__, hparams.n_ff_chexp);
7256
0
        LLAMA_LOG_INFO("%s: n_group_experts      = %d\n",     __func__, hparams.n_group_experts);
7257
0
        LLAMA_LOG_INFO("%s: expert_group_scale   = %.2f\n",   __func__, hparams.expert_group_scale);
7258
0
    }
7259
7260
0
    vocab.print_info();
7261
0
}
7262
7263
0
ggml_backend_dev_t llama_model::dev_layer(int il) const {
7264
0
    return pimpl->dev_layer.at(il).dev;
7265
0
}
7266
7267
0
ggml_backend_dev_t llama_model::dev_output() const {
7268
0
    return pimpl->dev_output.dev;
7269
0
}
7270
7271
template<typename F>
7272
0
static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) {
7273
0
    ggml_init_params params = {
7274
0
        /*.mem_size   =*/ ggml_tensor_overhead()*8,
7275
0
        /*.mem_buffer =*/ NULL,
7276
0
        /*.no_alloc   =*/ true,
7277
0
    };
7278
7279
0
    ggml_context_ptr ctx { ggml_init(params) };
7280
0
    if (!ctx) {
7281
0
        throw std::runtime_error(format("failed to create ggml context"));
7282
0
    }
7283
7284
0
    ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) };
7285
0
    ggml_tensor * op_tensor = fn(ctx.get());
7286
0
    for (int i = 0; i < GGML_MAX_SRC; i++) {
7287
0
        if (op_tensor->src[i] != nullptr) {
7288
0
            assert(op_tensor->src[i]->buffer == nullptr);
7289
0
            op_tensor->src[i]->buffer = buf.get();
7290
0
        }
7291
0
    }
7292
7293
0
    bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
7294
7295
0
    return op_supported;
7296
0
}
7297
7298
template<typename F>
7299
0
static ggml_backend_buffer_type_t select_buft(const buft_list_t & buft_list, const F & fn) {
7300
0
    for (const auto & cur : buft_list) {
7301
0
        ggml_backend_dev_t cur_dev = cur.first;
7302
0
        ggml_backend_buffer_type_t cur_buft = cur.second;
7303
0
        if (buft_supported(cur_buft, cur_dev, fn)) {
7304
0
            return cur_buft;
7305
0
        }
7306
0
    }
7307
7308
0
    throw std::runtime_error(format("no suitable buffer type found"));
7309
0
}
7310
7311
0
ggml_backend_buffer_type_t llama_model::select_buft(int il) const {
7312
0
    return ::select_buft(
7313
0
            *pimpl->dev_layer.at(il).buft_list,
7314
0
            [&](ggml_context * ctx) {
7315
0
                ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
7316
0
                ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
7317
0
                return ggml_add(ctx, cur, layer_dir);
7318
0
            });
7319
0
}
7320
7321
0
bool llama_model::has_tensor_overrides() const {
7322
0
    return pimpl->has_tensor_overrides;
7323
0
}
7324
7325
0
const ggml_tensor * llama_model::get_tensor(const char * name) const {
7326
0
    auto it = std::find_if(tensors_by_name.begin(), tensors_by_name.end(),
7327
0
            [name](const std::pair<std::string, ggml_tensor *> & it) {
7328
0
                return it.first == name;
7329
0
            });
7330
0
    if (it == tensors_by_name.end()) {
7331
0
        return nullptr;
7332
0
    }
7333
7334
0
    return it->second;
7335
0
}
7336
7337
0
float llama_model::get_rope_freq_base (const llama_cparams & cparams, int il) const {
7338
0
    return hparams.is_swa(il) ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base;
7339
0
}
7340
7341
0
float llama_model::get_rope_freq_scale(const llama_cparams & cparams, int il) const {
7342
0
    return hparams.is_swa(il) ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale;
7343
0
}
7344
7345
0
ggml_tensor * llama_model::get_rope_factors(const llama_cparams & cparams, int il) const {
7346
0
    const uint32_t n_ctx_seq = cparams.n_ctx_seq;
7347
7348
    // choose long/short freq factors based on the context size
7349
0
    if (layers[il].rope_freqs != nullptr) {
7350
0
        return layers[il].rope_freqs;
7351
0
    }
7352
7353
0
    if (n_ctx_seq > hparams.n_ctx_orig_yarn) {
7354
0
        return layers[il].rope_long;
7355
0
    }
7356
7357
0
    return layers[il].rope_short;
7358
0
}
7359
7360
0
llama_memory_i * llama_model::create_memory(const llama_memory_params & params, const llama_cparams & cparams) const {
7361
0
    llama_memory_i * res;
7362
7363
0
    switch (arch) {
7364
        // Models that need specific instantiation should be handled in the
7365
        // switch statement
7366
0
        case LLM_ARCH_BERT:
7367
0
        case LLM_ARCH_JINA_BERT_V2:
7368
0
        case LLM_ARCH_JINA_BERT_V3:
7369
0
        case LLM_ARCH_NOMIC_BERT:
7370
0
        case LLM_ARCH_NOMIC_BERT_MOE:
7371
0
        case LLM_ARCH_NEO_BERT:
7372
0
        case LLM_ARCH_WAVTOKENIZER_DEC:
7373
0
        case LLM_ARCH_MODERN_BERT:
7374
0
        case LLM_ARCH_GEMMA_EMBEDDING:
7375
0
        case LLM_ARCH_DREAM:
7376
0
        case LLM_ARCH_LLADA:
7377
0
        case LLM_ARCH_LLADA_MOE:
7378
0
        case LLM_ARCH_RND1:
7379
0
            {
7380
0
                res = nullptr;
7381
0
            } break;
7382
        // Models that need standard caching should rely on recurrent/hybrid
7383
        // checks
7384
0
        default:
7385
0
            {
7386
0
                if (llm_arch_is_recurrent(arch)) {
7387
0
                    res = new llama_memory_recurrent(
7388
0
                            *this,
7389
0
                            GGML_TYPE_F32,
7390
0
                            GGML_TYPE_F32,
7391
0
                            cparams.offload_kqv,
7392
0
                            std::max((uint32_t) 1, cparams.n_seq_max),
7393
0
                            cparams.n_seq_max,
7394
0
                            nullptr);
7395
0
                } else if (llm_arch_is_hybrid(arch)) {
7396
7397
                    // The main difference between hybrid architectures is the
7398
                    // layer filters, so pick the right one here
7399
0
                    llama_memory_hybrid::layer_filter_cb filter_attn = nullptr;
7400
0
                    llama_memory_hybrid::layer_filter_cb filter_recr = nullptr;
7401
0
                    if (arch == LLM_ARCH_FALCON_H1) {
7402
0
                        filter_attn = [&](int32_t) { return true; };
7403
0
                        filter_recr = [&](int32_t) { return true; };
7404
0
                    } else if (arch == LLM_ARCH_NEMOTRON_H || arch == LLM_ARCH_NEMOTRON_H_MOE) {
7405
0
                        filter_attn = [&](int32_t il) {
7406
0
                            return !hparams.is_recurrent(il) && hparams.n_ff(il) == 0;
7407
0
                        };
7408
0
                        filter_recr = [&](int32_t il) {
7409
0
                            return hparams.is_recurrent(il) && hparams.n_ff(il) == 0;
7410
0
                        };
7411
0
                    }
7412
7413
0
                    res = new llama_memory_hybrid(
7414
0
                        /* model             */ *this,
7415
0
                        /* attn_type_k       */ params.type_k,
7416
0
                        /* attn_type_v       */ params.type_v,
7417
0
                        /* attn_v_trans      */ !cparams.flash_attn,
7418
0
                        /* attn_kv_size      */ cparams.n_ctx,
7419
0
                        /* attn_n_pad        */ 1,
7420
0
                        /* attn_n_swa        */ hparams.n_swa,
7421
0
                        /* attn_swa_type     */ hparams.swa_type,
7422
0
                        /* recurrent_type_k  */ GGML_TYPE_F32,
7423
0
                        /* recurrent_type_v  */ GGML_TYPE_F32,
7424
0
                        /* recurrent_kv_size */ std::max((uint32_t) 1, cparams.n_seq_max),
7425
0
                        /* n_seq_max         */ cparams.n_seq_max,
7426
0
                        /* offload           */ cparams.offload_kqv,
7427
0
                        /* unified           */ cparams.kv_unified,
7428
0
                        /* filter_attn       */ std::move(filter_attn),
7429
0
                        /* filter_recr       */ std::move(filter_recr));
7430
0
                } else {
7431
0
                    llama_memory_i::layer_reuse_cb reuse = nullptr;
7432
7433
0
                    if (arch == LLM_ARCH_GEMMA3N) {
7434
0
                        reuse = [&](int32_t il) {
7435
0
                            if (il >= (int32_t) hparams.n_layer_kv_from_start) {
7436
0
                                return (int32_t) hparams.n_layer_kv_from_start - (hparams.is_swa(il) ? 2 : 1);
7437
0
                            }
7438
7439
0
                            return -1;
7440
0
                        };
7441
0
                    }
7442
7443
0
                    if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
7444
0
                        GGML_ASSERT(hparams.is_swa_any());
7445
7446
0
                        res = new llama_kv_cache_iswa(
7447
0
                                *this,
7448
0
                                params.type_k,
7449
0
                                params.type_v,
7450
0
                                !cparams.flash_attn,
7451
0
                                cparams.offload_kqv,
7452
0
                                params.swa_full,
7453
0
                                cparams.kv_unified,
7454
0
                                cparams.n_ctx_seq,
7455
0
                                cparams.n_seq_max,
7456
0
                                cparams.n_ubatch,
7457
0
                                1,
7458
0
                                nullptr,
7459
0
                                reuse);
7460
0
                    } else {
7461
0
                        GGML_ASSERT(!hparams.is_swa_any());
7462
7463
0
                        res = new llama_kv_cache(
7464
0
                                *this,
7465
0
                                params.type_k,
7466
0
                                params.type_v,
7467
0
                                !cparams.flash_attn,
7468
0
                                cparams.offload_kqv,
7469
0
                                cparams.kv_unified,
7470
0
                                cparams.n_ctx_seq,
7471
0
                                cparams.n_seq_max,
7472
0
                                1,
7473
0
                                hparams.n_swa,
7474
0
                                hparams.swa_type,
7475
0
                                nullptr,
7476
0
                                nullptr);
7477
0
                    }
7478
0
                }
7479
0
            }
7480
0
    }
7481
7482
0
    return res;
7483
0
}
7484
7485
0
ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
7486
0
    std::unique_ptr<llm_graph_context> llm;
7487
7488
0
    switch (arch) {
7489
0
        case LLM_ARCH_LLAMA:
7490
0
            {
7491
0
                llm = std::make_unique<llm_build_llama<false>>(*this, params);
7492
0
            } break;
7493
0
        case LLM_ARCH_LLAMA4:
7494
0
            {
7495
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_NONE) {
7496
0
                    llm = std::make_unique<llm_build_llama<false>>(*this, params);
7497
0
                } else {
7498
0
                    llm = std::make_unique<llm_build_llama_iswa>(*this, params);
7499
0
                }
7500
0
            } break;
7501
0
        case LLM_ARCH_LLAMA_EMBED:
7502
0
            {
7503
0
                llm = std::make_unique<llm_build_llama<true>>(*this, params);
7504
0
            } break;
7505
0
        case LLM_ARCH_MAINCODER:
7506
0
            {
7507
0
                llm = std::make_unique<llm_build_maincoder>(*this, params);
7508
0
            } break;
7509
0
        case LLM_ARCH_DECI:
7510
0
            {
7511
0
                llm = std::make_unique<llm_build_deci>(*this, params);
7512
0
            } break;
7513
0
        case LLM_ARCH_BAICHUAN:
7514
0
            {
7515
0
                llm = std::make_unique<llm_build_baichuan>(*this, params);
7516
0
            } break;
7517
0
        case LLM_ARCH_FALCON:
7518
0
            {
7519
0
                llm = std::make_unique<llm_build_falcon>(*this, params);
7520
0
            } break;
7521
0
        case LLM_ARCH_GROK:
7522
0
            {
7523
0
                llm = std::make_unique<llm_build_grok>(*this, params);
7524
0
            } break;
7525
0
        case LLM_ARCH_STARCODER:
7526
0
            {
7527
0
                llm = std::make_unique<llm_build_starcoder>(*this, params);
7528
0
            } break;
7529
0
        case LLM_ARCH_REFACT:
7530
0
            {
7531
0
                llm = std::make_unique<llm_build_refact>(*this, params);
7532
0
            } break;
7533
0
        case LLM_ARCH_BERT:
7534
0
        case LLM_ARCH_JINA_BERT_V2:
7535
0
        case LLM_ARCH_JINA_BERT_V3:
7536
0
        case LLM_ARCH_NOMIC_BERT:
7537
0
        case LLM_ARCH_NOMIC_BERT_MOE:
7538
0
            {
7539
0
                llm = std::make_unique<llm_build_bert>(*this, params);
7540
0
            } break;
7541
0
        case LLM_ARCH_MODERN_BERT:
7542
0
            {
7543
0
                llm = std::make_unique<llm_build_modern_bert>(*this, params);
7544
0
            } break;
7545
0
        case LLM_ARCH_NEO_BERT:
7546
0
            {
7547
0
                llm = std::make_unique<llm_build_neo_bert>(*this, params);
7548
0
            } break;
7549
0
        case LLM_ARCH_BLOOM:
7550
0
            {
7551
0
                llm = std::make_unique<llm_build_bloom>(*this, params);
7552
0
            } break;
7553
0
        case LLM_ARCH_MPT:
7554
0
            {
7555
0
                llm = std::make_unique<llm_build_mpt>(*this, params);
7556
0
            } break;
7557
0
        case LLM_ARCH_STABLELM:
7558
0
            {
7559
0
                llm = std::make_unique<llm_build_stablelm>(*this, params);
7560
0
            } break;
7561
0
        case LLM_ARCH_QWEN:
7562
0
            {
7563
0
                llm = std::make_unique<llm_build_qwen>(*this, params);
7564
0
            } break;
7565
0
        case LLM_ARCH_QWEN2:
7566
0
            {
7567
0
                llm = std::make_unique<llm_build_qwen2>(*this, params);
7568
0
            } break;
7569
0
        case LLM_ARCH_DREAM:
7570
0
            {
7571
0
                llm = std::make_unique<llm_build_dream>(*this, params);
7572
0
            }
7573
0
            break;
7574
0
        case LLM_ARCH_LLADA:
7575
0
            {
7576
0
                llm = std::make_unique<llm_build_llada>(*this, params);
7577
0
            }
7578
0
            break;
7579
0
        case LLM_ARCH_LLADA_MOE:
7580
0
            {
7581
0
                llm = std::make_unique<llm_build_llada_moe>(*this, params);
7582
0
            }
7583
0
            break;
7584
0
        case LLM_ARCH_RND1:
7585
0
            {
7586
0
                llm = std::make_unique<llm_build_rnd1>(*this, params);
7587
0
            }
7588
0
            break;
7589
0
        case LLM_ARCH_QWEN2VL:
7590
0
            {
7591
0
                llm = std::make_unique<llm_build_qwen2vl>(*this, params);
7592
0
            } break;
7593
0
        case LLM_ARCH_QWEN2MOE:
7594
0
            {
7595
0
                llm = std::make_unique<llm_build_qwen2moe>(*this, params);
7596
0
            } break;
7597
0
        case LLM_ARCH_QWEN3:
7598
0
            {
7599
0
                llm = std::make_unique<llm_build_qwen3>(*this, params);
7600
0
            } break;
7601
0
        case LLM_ARCH_QWEN3MOE:
7602
0
            {
7603
0
                llm = std::make_unique<llm_build_qwen3moe>(*this, params);
7604
0
            } break;
7605
0
        case LLM_ARCH_QWEN3VL:
7606
0
            {
7607
0
                llm = std::make_unique<llm_build_qwen3vl>(*this, params);
7608
0
            } break;
7609
0
        case LLM_ARCH_QWEN3VLMOE:
7610
0
            {
7611
0
                llm = std::make_unique<llm_build_qwen3vlmoe>(*this, params);
7612
0
            } break;
7613
0
        case LLM_ARCH_PHI2:
7614
0
            {
7615
0
                llm = std::make_unique<llm_build_phi2>(*this, params);
7616
0
            } break;
7617
0
        case LLM_ARCH_PHI3:
7618
0
        case LLM_ARCH_PHIMOE:
7619
0
            {
7620
0
                if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
7621
0
                    llm = std::make_unique<llm_build_phi3<true>> (*this, params);
7622
0
                } else {
7623
0
                    llm = std::make_unique<llm_build_phi3<false>>(*this, params);
7624
0
                }
7625
0
            } break;
7626
0
        case LLM_ARCH_PLAMO:
7627
0
            {
7628
0
                llm = std::make_unique<llm_build_plamo>(*this, params);
7629
0
            } break;
7630
0
        case LLM_ARCH_PLAMO2:
7631
0
            {
7632
0
                llm = std::make_unique<llm_build_plamo2>(*this, params);
7633
0
            } break;
7634
0
        case LLM_ARCH_PLAMO3:
7635
0
            {
7636
0
                if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) {
7637
0
                    llm = std::make_unique<llm_build_plamo3<true>> (*this, params);
7638
0
                } else {
7639
0
                    llm = std::make_unique<llm_build_plamo3<false>>(*this, params);
7640
0
                }
7641
0
            } break;
7642
0
        case LLM_ARCH_GPT2:
7643
0
            {
7644
0
                llm = std::make_unique<llm_build_gpt2>(*this, params);
7645
0
            } break;
7646
0
        case LLM_ARCH_CODESHELL:
7647
0
            {
7648
0
                llm = std::make_unique<llm_build_codeshell>(*this, params);
7649
0
            } break;
7650
0
        case LLM_ARCH_ORION:
7651
0
            {
7652
0
                llm = std::make_unique<llm_build_orion>(*this, params);
7653
0
            } break;
7654
0
        case LLM_ARCH_INTERNLM2:
7655
0
            {
7656
0
                llm = std::make_unique<llm_build_internlm2>(*this, params);
7657
0
            } break;
7658
0
        case LLM_ARCH_MINICPM3:
7659
0
            {
7660
0
                llm = std::make_unique<llm_build_minicpm3>(*this, params);
7661
0
            } break;
7662
0
        case LLM_ARCH_GEMMA:
7663
0
            {
7664
0
                llm = std::make_unique<llm_build_gemma>(*this, params);
7665
0
            } break;
7666
0
        case LLM_ARCH_GEMMA2:
7667
0
            {
7668
0
                llm = std::make_unique<llm_build_gemma2_iswa>(*this, params);
7669
0
            } break;
7670
0
        case LLM_ARCH_GEMMA3:
7671
0
            {
7672
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) {
7673
0
                    llm = std::make_unique<llm_build_gemma3<true>>(*this, params);
7674
0
                } else {
7675
0
                    llm = std::make_unique<llm_build_gemma3<false>>(*this, params);
7676
0
                }
7677
0
            } break;
7678
0
        case LLM_ARCH_GEMMA3N:
7679
0
            {
7680
0
                llm = std::make_unique<llm_build_gemma3n_iswa>(*this, params);
7681
0
            } break;
7682
0
        case LLM_ARCH_GEMMA_EMBEDDING:
7683
0
            {
7684
0
                llm = std::make_unique<llm_build_gemma_embedding>(*this, params);
7685
0
            } break;
7686
0
        case LLM_ARCH_STARCODER2:
7687
0
            {
7688
0
                llm = std::make_unique<llm_build_starcoder2>(*this, params);
7689
0
            } break;
7690
0
        case LLM_ARCH_MAMBA:
7691
0
        case LLM_ARCH_MAMBA2:
7692
0
            {
7693
0
                llm = std::make_unique<llm_build_mamba>(*this, params);
7694
0
            } break;
7695
0
        case LLM_ARCH_JAMBA:
7696
0
            {
7697
0
                llm = std::make_unique<llm_build_jamba>(*this, params);
7698
0
            } break;
7699
0
        case LLM_ARCH_XVERSE:
7700
0
            {
7701
0
                llm = std::make_unique<llm_build_xverse>(*this, params);
7702
0
            } break;
7703
0
        case LLM_ARCH_COMMAND_R:
7704
0
            {
7705
0
                llm = std::make_unique<llm_build_command_r>(*this, params);
7706
0
            } break;
7707
0
        case LLM_ARCH_COHERE2:
7708
0
            {
7709
0
                llm = std::make_unique<llm_build_cohere2_iswa>(*this, params);
7710
0
            } break;
7711
0
        case LLM_ARCH_DBRX:
7712
0
            {
7713
0
                llm = std::make_unique<llm_build_dbrx>(*this, params);
7714
0
            } break;
7715
0
        case LLM_ARCH_OLMO:
7716
0
            {
7717
0
                llm = std::make_unique<llm_build_olmo>(*this, params);
7718
0
            } break;
7719
0
        case LLM_ARCH_OLMO2:
7720
0
            {
7721
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) {
7722
0
                    llm = std::make_unique<llm_build_olmo2<true>>(*this, params);
7723
0
                } else {
7724
0
                    llm = std::make_unique<llm_build_olmo2<false>>(*this, params);
7725
0
                }
7726
0
            } break;
7727
0
        case LLM_ARCH_OLMOE:
7728
0
            {
7729
0
                llm = std::make_unique<llm_build_olmoe>(*this, params);
7730
0
            } break;
7731
0
        case LLM_ARCH_OPENELM:
7732
0
            {
7733
0
                llm = std::make_unique<llm_build_openelm>(*this, params);
7734
0
            } break;
7735
0
        case LLM_ARCH_GPTNEOX:
7736
0
            {
7737
0
                llm = std::make_unique<llm_build_gptneox>(*this, params);
7738
0
            } break;
7739
0
        case LLM_ARCH_ARCTIC:
7740
0
            {
7741
0
                llm = std::make_unique<llm_build_arctic>(*this, params);
7742
0
            } break;
7743
0
        case LLM_ARCH_DEEPSEEK:
7744
0
            {
7745
0
                llm = std::make_unique<llm_build_deepseek>(*this, params);
7746
0
            } break;
7747
0
        case LLM_ARCH_DEEPSEEK2:
7748
0
            {
7749
0
                llm = std::make_unique<llm_build_deepseek2>(*this, params);
7750
0
            } break;
7751
0
        case LLM_ARCH_CHATGLM:
7752
0
            {
7753
0
                llm = std::make_unique<llm_build_chatglm>(*this, params);
7754
0
            } break;
7755
0
        case LLM_ARCH_GLM4:
7756
0
            {
7757
0
                llm = std::make_unique<llm_build_glm4>(*this, params);
7758
0
            } break;
7759
0
        case LLM_ARCH_GLM4_MOE:
7760
0
            {
7761
0
                llm = std::make_unique<llm_build_glm4_moe>(*this, params);
7762
0
            } break;
7763
0
        case LLM_ARCH_BITNET:
7764
0
            {
7765
0
                llm = std::make_unique<llm_build_bitnet>(*this, params);
7766
0
            } break;
7767
0
        case LLM_ARCH_T5:
7768
0
            {
7769
0
                switch (params.gtype) {
7770
0
                    case LLM_GRAPH_TYPE_ENCODER:
7771
0
                        llm = std::make_unique<llm_build_t5_enc>(*this, params);
7772
0
                        break;
7773
0
                    case LLM_GRAPH_TYPE_DEFAULT:
7774
0
                    case LLM_GRAPH_TYPE_DECODER:
7775
0
                        llm = std::make_unique<llm_build_t5_dec>(*this, params);
7776
0
                        break;
7777
0
                    default:
7778
0
                        GGML_ABORT("invalid graph type");
7779
0
                };
7780
0
            } break;
7781
0
        case LLM_ARCH_T5ENCODER:
7782
0
            {
7783
0
                llm = std::make_unique<llm_build_t5_enc>(*this, params);
7784
0
            }
7785
0
            break;
7786
0
        case LLM_ARCH_JAIS:
7787
0
            {
7788
0
                llm = std::make_unique<llm_build_jais>(*this, params);
7789
0
            } break;
7790
0
        case LLM_ARCH_NEMOTRON:
7791
0
            {
7792
0
                llm = std::make_unique<llm_build_nemotron>(*this, params);
7793
0
            } break;
7794
0
        case LLM_ARCH_NEMOTRON_H:
7795
0
        case LLM_ARCH_NEMOTRON_H_MOE:
7796
0
            {
7797
0
                llm = std::make_unique<llm_build_nemotron_h>(*this, params);
7798
0
            } break;
7799
0
        case LLM_ARCH_EXAONE:
7800
0
            {
7801
0
                llm = std::make_unique<llm_build_exaone>(*this, params);
7802
0
            } break;
7803
0
        case LLM_ARCH_EXAONE4:
7804
0
            {
7805
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) {
7806
0
                    llm = std::make_unique<llm_build_exaone4<true>>(*this, params);
7807
0
                } else {
7808
0
                    llm = std::make_unique<llm_build_exaone4<false>>(*this, params);
7809
0
                }
7810
0
            } break;
7811
0
        case LLM_ARCH_RWKV6:
7812
0
            {
7813
0
                llm = std::make_unique<llm_build_rwkv6>(*this, params);
7814
0
            } break;
7815
0
        case LLM_ARCH_RWKV6QWEN2:
7816
0
            {
7817
0
                llm = std::make_unique<llm_build_rwkv6qwen2>(*this, params);
7818
0
            } break;
7819
0
        case LLM_ARCH_RWKV7:
7820
0
            {
7821
0
                llm = std::make_unique<llm_build_rwkv7>(*this, params);
7822
0
            } break;
7823
0
        case LLM_ARCH_ARWKV7:
7824
0
            {
7825
0
                llm = std::make_unique<llm_build_arwkv7>(*this, params);
7826
0
            } break;
7827
0
        case LLM_ARCH_GRANITE:
7828
0
        case LLM_ARCH_GRANITE_MOE:
7829
0
        case LLM_ARCH_MINICPM:
7830
0
            {
7831
0
                llm = std::make_unique<llm_build_granite>(*this, params);
7832
0
            } break;
7833
0
        case LLM_ARCH_GRANITE_HYBRID:
7834
0
            {
7835
0
                llm = std::make_unique<llm_build_granite_hybrid>(*this, params);
7836
0
            } break;
7837
0
        case LLM_ARCH_CHAMELEON:
7838
0
            {
7839
0
                llm = std::make_unique<llm_build_chameleon>(*this, params);
7840
0
            } break;
7841
0
        case LLM_ARCH_WAVTOKENIZER_DEC:
7842
0
            {
7843
0
                llm = std::make_unique<llm_build_wavtokenizer_dec>(*this, params);
7844
0
            } break;
7845
0
        case LLM_ARCH_PLM:
7846
0
            {
7847
0
                llm = std::make_unique<llm_build_plm>(*this, params);
7848
0
            } break;
7849
0
        case LLM_ARCH_BAILINGMOE:
7850
0
            {
7851
0
                llm = std::make_unique<llm_build_bailingmoe>(*this, params);
7852
0
            } break;
7853
0
        case LLM_ARCH_BAILINGMOE2:
7854
0
            {
7855
0
                llm = std::make_unique<llm_build_bailingmoe2>(*this, params);
7856
0
            } break;
7857
0
        case LLM_ARCH_SEED_OSS:
7858
0
            {
7859
0
                llm = std::make_unique<llm_build_seed_oss>(*this, params);
7860
0
            } break;
7861
0
        case LLM_ARCH_DOTS1:
7862
0
            {
7863
0
                llm = std::make_unique<llm_build_dots1>(*this, params);
7864
0
            } break;
7865
0
        case LLM_ARCH_ARCEE:
7866
0
            {
7867
0
                llm = std::make_unique<llm_build_arcee>(*this, params);
7868
0
            } break;
7869
0
        case LLM_ARCH_AFMOE:
7870
0
            {
7871
0
                llm = std::make_unique<llm_build_afmoe>(*this, params);
7872
0
            } break;
7873
0
        case LLM_ARCH_ERNIE4_5:
7874
0
            {
7875
0
                llm = std::make_unique<llm_build_ernie4_5>(*this, params);
7876
0
            } break;
7877
0
        case LLM_ARCH_ERNIE4_5_MOE:
7878
0
            {
7879
0
                llm = std::make_unique<llm_build_ernie4_5_moe>(*this, params);
7880
0
            } break;
7881
0
        case LLM_ARCH_HUNYUAN_MOE:
7882
0
            {
7883
0
                llm = std::make_unique<llm_build_hunyuan_moe>(*this, params);
7884
0
            } break;
7885
0
        case LLM_ARCH_HUNYUAN_DENSE:
7886
0
            {
7887
0
                llm = std::make_unique<llm_build_hunyuan_dense>(*this, params);
7888
0
            } break;
7889
0
        case LLM_ARCH_SMOLLM3:
7890
0
            {
7891
0
                llm = std::make_unique<llm_build_smollm3>(*this, params);
7892
0
            } break;
7893
0
        case LLM_ARCH_OPENAI_MOE:
7894
0
            {
7895
0
                llm = std::make_unique<llm_build_openai_moe_iswa>(*this, params);
7896
0
            } break;
7897
0
        case LLM_ARCH_FALCON_H1:
7898
0
            {
7899
0
                llm = std::make_unique<llm_build_falcon_h1>(*this, params);
7900
0
            } break;
7901
0
        case LLM_ARCH_LFM2:
7902
0
        case LLM_ARCH_LFM2MOE:
7903
0
            {
7904
0
                llm = std::make_unique<llm_build_lfm2>(*this, params);
7905
0
            } break;
7906
0
        case LLM_ARCH_SMALLTHINKER:
7907
0
            {
7908
0
                if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) {
7909
0
                    llm = std::make_unique<llm_build_smallthinker<true>> (*this, params);
7910
0
                } else {
7911
0
                    llm = std::make_unique<llm_build_smallthinker<false>>(*this, params);
7912
0
                }
7913
0
            } break;
7914
0
        case LLM_ARCH_GROVEMOE:
7915
0
            {
7916
0
                llm = std::make_unique<llm_build_grovemoe>(*this, params);
7917
0
            } break;
7918
0
        case LLM_ARCH_APERTUS:
7919
0
            {
7920
0
                llm = std::make_unique<llm_build_apertus>(*this, params);
7921
0
            } break;
7922
0
        case LLM_ARCH_MINIMAX_M2:
7923
0
            {
7924
0
                llm = std::make_unique<llm_build_minimax_m2>(*this, params);
7925
0
            } break;
7926
0
        case LLM_ARCH_COGVLM:
7927
0
            {
7928
0
                llm = std::make_unique<llm_build_cogvlm>(*this, params);
7929
0
            } break;
7930
0
        case LLM_ARCH_PANGU_EMBED:
7931
0
            {
7932
0
                llm = std::make_unique<llm_build_pangu_embedded>(*this, params);
7933
0
            } break;
7934
0
        case LLM_ARCH_QWEN3NEXT:
7935
0
            {
7936
0
                llm = std::make_unique<llm_build_qwen3next>(*this, params);
7937
0
            } break;
7938
0
        case LLM_ARCH_MISTRAL3:
7939
0
            {
7940
0
                llm = std::make_unique<llm_build_mistral3>(*this, params);
7941
0
            } break;
7942
0
        case LLM_ARCH_MIMO2:
7943
0
            {
7944
0
                llm = std::make_unique<llm_build_mimo2_iswa>(*this, params);
7945
0
            } break;
7946
0
        default:
7947
0
            GGML_ABORT("fatal error");
7948
0
    }
7949
7950
    // add on pooling layer
7951
0
    llm->build_pooling(cls, cls_b, cls_out, cls_out_b);
7952
7953
    // add backend sampling layers (if any)
7954
0
    llm->build_sampling();
7955
7956
    // if the gguf model was converted with --sentence-transformers-dense-modules
7957
    // there will be two additional dense projection layers
7958
    // dense linear projections are applied after pooling
7959
    // TODO: move reranking logic here and generalize
7960
0
    llm->build_dense_out(dense_2_out_layers, dense_3_out_layers);
7961
7962
0
    llm->res->set_outputs();
7963
7964
0
    return llm->res->get_gf();
7965
0
}
7966
7967
7968
//
7969
// interface implementation
7970
//
7971
7972
0
llama_model_params llama_model_default_params() {
7973
0
    llama_model_params result = {
7974
0
        /*.devices                     =*/ nullptr,
7975
0
        /*.tensor_buft_overrides       =*/ nullptr,
7976
0
        /*.n_gpu_layers                =*/ -1,
7977
0
        /*.split_mode                  =*/ LLAMA_SPLIT_MODE_LAYER,
7978
0
        /*.main_gpu                    =*/ 0,
7979
0
        /*.tensor_split                =*/ nullptr,
7980
0
        /*.progress_callback           =*/ nullptr,
7981
0
        /*.progress_callback_user_data =*/ nullptr,
7982
0
        /*.kv_overrides                =*/ nullptr,
7983
0
        /*.vocab_only                  =*/ false,
7984
0
        /*.use_mmap                    =*/ true,
7985
0
        /*.use_direct_io               =*/ true,
7986
0
        /*.use_mlock                   =*/ false,
7987
0
        /*.check_tensors               =*/ false,
7988
0
        /*.use_extra_bufts             =*/ true,
7989
0
        /*.no_host                     =*/ false,
7990
0
        /*.no_alloc                    =*/ false,
7991
0
    };
7992
7993
0
    return result;
7994
0
}
7995
7996
0
const llama_vocab * llama_model_get_vocab(const llama_model * model) {
7997
0
    return &model->vocab;
7998
0
}
7999
8000
0
void llama_free_model(llama_model * model) {
8001
0
    llama_model_free(model);
8002
0
}
8003
8004
0
void llama_model_free(llama_model * model) {
8005
0
    delete model;
8006
0
}
8007
8008
0
int32_t llama_model_n_ctx_train(const llama_model * model) {
8009
0
    return model->hparams.n_ctx_train;
8010
0
}
8011
8012
0
int32_t llama_model_n_embd(const llama_model * model) {
8013
0
    return model->hparams.n_embd;
8014
0
}
8015
8016
0
int32_t llama_model_n_embd_inp(const llama_model * model) {
8017
0
    return model->hparams.n_embd_inp();
8018
0
}
8019
8020
0
int32_t llama_model_n_embd_out(const llama_model * model) {
8021
0
    return model->hparams.get_n_embd_out();
8022
0
}
8023
8024
0
int32_t llama_model_n_layer(const llama_model * model) {
8025
0
    return model->hparams.n_layer;
8026
0
}
8027
8028
0
int32_t llama_model_n_head(const llama_model * model) {
8029
0
    return model->hparams.n_head();
8030
0
}
8031
8032
0
int32_t llama_model_n_head_kv(const llama_model * model) {
8033
0
    return model->hparams.n_head_kv();
8034
0
}
8035
8036
0
int32_t llama_model_n_swa(const llama_model * model) {
8037
0
    return model->hparams.n_swa;
8038
0
}
8039
8040
0
uint32_t llama_model_n_cls_out(const struct llama_model * model) {
8041
0
    return model->hparams.n_cls_out;
8042
0
}
8043
8044
0
const char * llama_model_cls_label(const struct llama_model * model, uint32_t i) {
8045
0
    if (i < model->classifier_labels.size()) {
8046
0
        return model->classifier_labels[i].c_str();
8047
0
    }
8048
8049
0
    return nullptr;
8050
0
}
8051
8052
// deprecated
8053
0
int32_t llama_n_ctx_train(const llama_model * model) {
8054
0
    return llama_model_n_ctx_train(model);
8055
0
}
8056
8057
// deprecated
8058
0
int32_t llama_n_embd(const llama_model * model) {
8059
0
    return llama_model_n_embd(model);
8060
0
}
8061
8062
// deprecated
8063
0
int32_t llama_n_layer(const llama_model * model) {
8064
0
    return llama_model_n_layer(model);
8065
0
}
8066
8067
// deprecated
8068
0
int32_t llama_n_head(const llama_model * model) {
8069
0
    return llama_model_n_head(model);
8070
0
}
8071
8072
0
llama_rope_type llama_model_rope_type(const llama_model * model) {
8073
0
    switch (model->arch) {
8074
        // these models do not use RoPE
8075
0
        case LLM_ARCH_CLIP:
8076
0
        case LLM_ARCH_GPT2:
8077
0
        case LLM_ARCH_GPTJ:
8078
0
        case LLM_ARCH_MPT:
8079
0
        case LLM_ARCH_REFACT:
8080
0
        case LLM_ARCH_BLOOM:
8081
0
        case LLM_ARCH_MAMBA:
8082
0
        case LLM_ARCH_MAMBA2:
8083
0
        case LLM_ARCH_JAMBA:
8084
0
        case LLM_ARCH_JINA_BERT_V2:
8085
0
        case LLM_ARCH_T5:
8086
0
        case LLM_ARCH_T5ENCODER:
8087
0
        case LLM_ARCH_JAIS:
8088
0
        case LLM_ARCH_RWKV6:
8089
0
        case LLM_ARCH_RWKV6QWEN2:
8090
0
        case LLM_ARCH_RWKV7:
8091
0
        case LLM_ARCH_ARWKV7:
8092
0
        case LLM_ARCH_WAVTOKENIZER_DEC:
8093
0
        case LLM_ARCH_NEMOTRON_H:
8094
0
        case LLM_ARCH_NEMOTRON_H_MOE:
8095
0
            return LLAMA_ROPE_TYPE_NONE;
8096
8097
        // use what we call a normal RoPE, operating on pairs of consecutive head values
8098
0
        case LLM_ARCH_LLAMA:
8099
0
        case LLM_ARCH_LLADA:
8100
0
        case LLM_ARCH_LLAMA4:
8101
0
        case LLM_ARCH_DECI:
8102
0
        case LLM_ARCH_BAICHUAN:
8103
0
        case LLM_ARCH_STARCODER:
8104
0
        case LLM_ARCH_INTERNLM2:
8105
0
        case LLM_ARCH_MINICPM:
8106
0
        case LLM_ARCH_XVERSE:
8107
0
        case LLM_ARCH_COMMAND_R:
8108
0
        case LLM_ARCH_COHERE2:
8109
0
        case LLM_ARCH_OLMO:
8110
0
        case LLM_ARCH_ARCTIC:
8111
0
        case LLM_ARCH_DEEPSEEK:
8112
0
        case LLM_ARCH_DEEPSEEK2:
8113
0
        case LLM_ARCH_PLM:
8114
0
        case LLM_ARCH_CHATGLM:
8115
0
        case LLM_ARCH_GRANITE:
8116
0
        case LLM_ARCH_GRANITE_MOE:
8117
0
        case LLM_ARCH_GRANITE_HYBRID:
8118
0
        case LLM_ARCH_CHAMELEON:
8119
0
        case LLM_ARCH_BAILINGMOE:
8120
0
        case LLM_ARCH_NEO_BERT:
8121
0
        case LLM_ARCH_SMOLLM3:
8122
0
        case LLM_ARCH_ARCEE:
8123
0
        case LLM_ARCH_ERNIE4_5:
8124
0
        case LLM_ARCH_ERNIE4_5_MOE:
8125
0
        case LLM_ARCH_MISTRAL3:
8126
0
        case LLM_ARCH_LLAMA_EMBED:
8127
0
        case LLM_ARCH_MAINCODER:
8128
0
            return LLAMA_ROPE_TYPE_NORM;
8129
8130
        // the pairs of head values are offset by n_rot/2
8131
0
        case LLM_ARCH_FALCON:
8132
0
        case LLM_ARCH_FALCON_H1:
8133
0
        case LLM_ARCH_GROK:
8134
0
        case LLM_ARCH_DBRX:
8135
0
        case LLM_ARCH_BERT:
8136
0
        case LLM_ARCH_JINA_BERT_V3:
8137
0
        case LLM_ARCH_MODERN_BERT:
8138
0
        case LLM_ARCH_NOMIC_BERT:
8139
0
        case LLM_ARCH_NOMIC_BERT_MOE:
8140
0
        case LLM_ARCH_STABLELM:
8141
0
        case LLM_ARCH_BITNET:
8142
0
        case LLM_ARCH_QWEN:
8143
0
        case LLM_ARCH_QWEN2:
8144
0
        case LLM_ARCH_DREAM:
8145
0
        case LLM_ARCH_QWEN2MOE:
8146
0
        case LLM_ARCH_QWEN3:
8147
0
        case LLM_ARCH_QWEN3MOE:
8148
0
        case LLM_ARCH_LLADA_MOE:
8149
0
        case LLM_ARCH_RND1:
8150
0
        case LLM_ARCH_OLMO2:
8151
0
        case LLM_ARCH_OLMOE:
8152
0
        case LLM_ARCH_PHI2:
8153
0
        case LLM_ARCH_PHI3:
8154
0
        case LLM_ARCH_PHIMOE:
8155
0
        case LLM_ARCH_PLAMO:
8156
0
        case LLM_ARCH_PLAMO2:
8157
0
        case LLM_ARCH_PLAMO3:
8158
0
        case LLM_ARCH_GEMMA:
8159
0
        case LLM_ARCH_GEMMA2:
8160
0
        case LLM_ARCH_GEMMA3:
8161
0
        case LLM_ARCH_GEMMA3N:
8162
0
        case LLM_ARCH_GEMMA_EMBEDDING:
8163
0
        case LLM_ARCH_STARCODER2:
8164
0
        case LLM_ARCH_OPENELM:
8165
0
        case LLM_ARCH_GPTNEOX:
8166
0
        case LLM_ARCH_CODESHELL:
8167
0
        case LLM_ARCH_ORION:
8168
0
        case LLM_ARCH_NEMOTRON:
8169
0
        case LLM_ARCH_EXAONE:
8170
0
        case LLM_ARCH_EXAONE4:
8171
0
        case LLM_ARCH_MINICPM3:
8172
0
        case LLM_ARCH_BAILINGMOE2:
8173
0
        case LLM_ARCH_DOTS1:
8174
0
        case LLM_ARCH_HUNYUAN_MOE:
8175
0
        case LLM_ARCH_OPENAI_MOE:
8176
0
        case LLM_ARCH_HUNYUAN_DENSE:
8177
0
        case LLM_ARCH_LFM2:
8178
0
        case LLM_ARCH_LFM2MOE:
8179
0
        case LLM_ARCH_SMALLTHINKER:
8180
0
        case LLM_ARCH_SEED_OSS:
8181
0
        case LLM_ARCH_GROVEMOE:
8182
0
        case LLM_ARCH_APERTUS:
8183
0
        case LLM_ARCH_MINIMAX_M2:
8184
0
        case LLM_ARCH_COGVLM:
8185
0
        case LLM_ARCH_PANGU_EMBED:
8186
0
        case LLM_ARCH_AFMOE:
8187
0
        case LLM_ARCH_QWEN3NEXT:
8188
0
        case LLM_ARCH_MIMO2:
8189
0
            return LLAMA_ROPE_TYPE_NEOX;
8190
8191
0
        case LLM_ARCH_QWEN2VL:
8192
0
            return LLAMA_ROPE_TYPE_MROPE;
8193
0
        case LLM_ARCH_QWEN3VL:
8194
0
        case LLM_ARCH_QWEN3VLMOE:
8195
0
            return LLAMA_ROPE_TYPE_IMROPE;
8196
8197
0
        case LLM_ARCH_GLM4:
8198
0
            return model->hparams.use_mrope() ? LLAMA_ROPE_TYPE_MROPE : LLAMA_ROPE_TYPE_NORM;
8199
0
        case LLM_ARCH_GLM4_MOE:
8200
0
            return model->hparams.use_mrope() ? LLAMA_ROPE_TYPE_MROPE : LLAMA_ROPE_TYPE_NEOX;
8201
8202
        // all model arches should be listed explicitly here
8203
0
        case LLM_ARCH_UNKNOWN:
8204
0
            GGML_ABORT("unknown architecture");
8205
0
    }
8206
8207
0
    return LLAMA_ROPE_TYPE_NONE;
8208
0
}
8209
8210
0
float llama_model_rope_freq_scale_train(const llama_model * model) {
8211
0
    return model->hparams.rope_freq_scale_train;
8212
0
}
8213
8214
0
int32_t llama_model_meta_val_str(const llama_model * model, const char * key, char * buf, size_t buf_size) {
8215
0
    const auto & it = model->gguf_kv.find(key);
8216
0
    if (it == model->gguf_kv.end()) {
8217
0
        if (buf_size > 0) {
8218
0
            buf[0] = '\0';
8219
0
        }
8220
0
        return -1;
8221
0
    }
8222
0
    return snprintf(buf, buf_size, "%s", it->second.c_str());
8223
0
}
8224
8225
0
int32_t llama_model_meta_count(const llama_model * model) {
8226
0
    return (int)model->gguf_kv.size();
8227
0
}
8228
8229
0
const char * llama_model_meta_key_str(llama_model_meta_key key) {
8230
0
    switch (key) {
8231
0
        case LLAMA_MODEL_META_KEY_SAMPLING_SEQUENCE:        return "general.sampling.sequence";
8232
0
        case LLAMA_MODEL_META_KEY_SAMPLING_TOP_K:           return "general.sampling.top_k";
8233
0
        case LLAMA_MODEL_META_KEY_SAMPLING_TOP_P:           return "general.sampling.top_p";
8234
0
        case LLAMA_MODEL_META_KEY_SAMPLING_MIN_P:           return "general.sampling.min_p";
8235
0
        case LLAMA_MODEL_META_KEY_SAMPLING_XTC_PROBABILITY: return "general.sampling.xtc_probability";
8236
0
        case LLAMA_MODEL_META_KEY_SAMPLING_XTC_THRESHOLD:   return "general.sampling.xtc_threshold";
8237
0
        case LLAMA_MODEL_META_KEY_SAMPLING_TEMP:            return "general.sampling.temp";
8238
0
        case LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_LAST_N:  return "general.sampling.penalty_last_n";
8239
0
        case LLAMA_MODEL_META_KEY_SAMPLING_PENALTY_REPEAT:  return "general.sampling.penalty_repeat";
8240
0
        case LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT:        return "general.sampling.mirostat";
8241
0
        case LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_TAU:    return "general.sampling.mirostat_tau";
8242
0
        case LLAMA_MODEL_META_KEY_SAMPLING_MIROSTAT_ETA:    return "general.sampling.mirostat_eta";
8243
0
        default:                                            return nullptr;
8244
0
    }
8245
0
}
8246
8247
0
int32_t llama_model_meta_key_by_index(const llama_model * model, int i, char * buf, size_t buf_size) {
8248
0
    if (i < 0 || i >= (int)model->gguf_kv.size()) {
8249
0
        if (buf_size > 0) {
8250
0
            buf[0] = '\0';
8251
0
        }
8252
0
        return -1;
8253
0
    }
8254
0
    auto it = model->gguf_kv.begin();
8255
0
    std::advance(it, i);
8256
0
    return snprintf(buf, buf_size, "%s", it->first.c_str());
8257
0
}
8258
8259
0
int32_t llama_model_meta_val_str_by_index(const llama_model * model, int32_t i, char * buf, size_t buf_size) {
8260
0
    if (i < 0 || i >= (int)model->gguf_kv.size()) {
8261
0
        if (buf_size > 0) {
8262
0
            buf[0] = '\0';
8263
0
        }
8264
0
        return -1;
8265
0
    }
8266
0
    auto it = model->gguf_kv.begin();
8267
0
    std::advance(it, i);
8268
0
    return snprintf(buf, buf_size, "%s", it->second.c_str());
8269
0
}
8270
8271
0
int32_t llama_model_desc(const llama_model * model, char * buf, size_t buf_size) {
8272
0
    return snprintf(buf, buf_size, "%s", model->desc().c_str());
8273
0
}
8274
8275
0
uint64_t llama_model_size(const llama_model * model) {
8276
0
    return model->size();
8277
0
}
8278
8279
0
const char * llama_model_chat_template(const llama_model * model, const char * name) {
8280
0
    const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE)
8281
0
        : LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE);
8282
0
    const auto & it = model->gguf_kv.find(key);
8283
0
    if (it == model->gguf_kv.end()) {
8284
        // one-off fix for very popular models (so we are not flooded with issues)
8285
        // do not extend this list unless absolutely necessary
8286
        // Mistral-Small-2503 does not have built-in chat template
8287
0
        llama_vocab_pre_type pre_type = model->vocab.get_pre_type();
8288
0
        if (!name && pre_type == LLAMA_VOCAB_PRE_TYPE_TEKKEN && model->layers.size() == 40) {
8289
0
            return "mistral-v7-tekken";
8290
0
        }
8291
8292
0
        return nullptr;
8293
0
    }
8294
8295
0
    return it->second.c_str();
8296
0
}
8297
8298
0
uint64_t llama_model_n_params(const llama_model * model) {
8299
0
    return model->n_elements();
8300
0
}
8301
8302
0
bool llama_model_has_encoder(const llama_model * model) {
8303
0
    switch (model->arch) {
8304
0
        case LLM_ARCH_T5:        return true;
8305
0
        case LLM_ARCH_T5ENCODER: return true;
8306
0
        default:                 return false;
8307
0
    }
8308
0
}
8309
8310
0
bool llama_model_has_decoder(const llama_model * model) {
8311
0
    switch (model->arch) {
8312
0
        case LLM_ARCH_T5ENCODER: return false;
8313
0
        default:                 return true;
8314
0
    }
8315
0
}
8316
8317
0
llama_token llama_model_decoder_start_token(const llama_model * model) {
8318
0
    return model->hparams.dec_start_token_id;
8319
0
}
8320
8321
0
bool llama_model_is_recurrent(const llama_model * model) {
8322
0
    return llm_arch_is_recurrent(model->arch);
8323
0
}
8324
8325
0
bool llama_model_is_hybrid(const llama_model * model) {
8326
0
    return llm_arch_is_hybrid(model->arch);
8327
0
}
8328
8329
0
bool llama_model_is_diffusion(const llama_model * model) {
8330
0
    return llm_arch_is_diffusion(model->arch);
8331
0
}
8332
8333
0
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model) {
8334
0
    return model->tensors_by_name;
8335
0
}