/src/llama.cpp/src/llama-model-saver.cpp
Line | Count | Source |
1 | | #include "llama-model-saver.h" |
2 | | |
3 | | #include "gguf.h" |
4 | | |
5 | | #include "llama.h" |
6 | | #include "llama-hparams.h" |
7 | | #include "llama-model.h" |
8 | | #include "llama-vocab.h" |
9 | | |
10 | | #include <string> |
11 | | |
12 | 0 | llama_model_saver::llama_model_saver(const struct llama_model & model) : model(model), llm_kv(model.arch) { |
13 | 0 | gguf_ctx = gguf_init_empty(); |
14 | 0 | } |
15 | | |
16 | 0 | llama_model_saver::~llama_model_saver() { |
17 | 0 | gguf_free(gguf_ctx); |
18 | 0 | } |
19 | | |
20 | 0 | void llama_model_saver::add_kv(const enum llm_kv key, const uint32_t value) { |
21 | 0 | gguf_set_val_u32(gguf_ctx, llm_kv(key).c_str(), value); |
22 | 0 | } |
23 | | |
24 | 0 | void llama_model_saver::add_kv(const enum llm_kv key, const int32_t value) { |
25 | 0 | gguf_set_val_i32(gguf_ctx, llm_kv(key).c_str(), value); |
26 | 0 | } |
27 | | |
28 | 0 | void llama_model_saver::add_kv(const enum llm_kv key, const float value) { |
29 | 0 | gguf_set_val_f32(gguf_ctx, llm_kv(key).c_str(), value); |
30 | 0 | } |
31 | | |
32 | 0 | void llama_model_saver::add_kv(const enum llm_kv key, const bool value) { |
33 | 0 | gguf_set_val_bool(gguf_ctx, llm_kv(key).c_str(), value); |
34 | 0 | } |
35 | | |
36 | 0 | void llama_model_saver::add_kv(const enum llm_kv key, const char * value) { |
37 | 0 | gguf_set_val_str(gguf_ctx, llm_kv(key).c_str(), value); |
38 | 0 | } |
39 | | |
40 | | [[noreturn]] |
41 | 0 | void llama_model_saver::add_kv(const enum llm_kv key, const char value) { |
42 | 0 | GGML_UNUSED(key); |
43 | 0 | GGML_UNUSED(value); |
44 | 0 | GGML_ABORT("fatal error"); // this should never be called, only needed to make the template below compile |
45 | 0 | } |
46 | | |
47 | | template <typename Container> |
48 | 0 | void llama_model_saver::add_kv(const enum llm_kv key, const Container & value, const bool per_layer) { |
49 | 0 | const size_t n_values = per_layer ? size_t(model.hparams.n_layer) : value.size(); |
50 | 0 | GGML_ASSERT(n_values <= value.size()); |
51 | |
|
52 | 0 | if (n_values == 0) { |
53 | 0 | return; |
54 | 0 | } |
55 | | |
56 | 0 | if (per_layer) { |
57 | 0 | bool all_values_the_same = true; |
58 | 0 | for (size_t i = 1; i < n_values; ++i) { |
59 | 0 | if (value[i] != value[0]) { |
60 | 0 | all_values_the_same = false; |
61 | 0 | break; |
62 | 0 | } |
63 | 0 | } |
64 | 0 | if (all_values_the_same) { |
65 | 0 | add_kv(key, value[0]); |
66 | 0 | return; |
67 | 0 | } |
68 | 0 | } |
69 | | |
70 | 0 | if (std::is_same<typename Container::value_type, uint8_t>::value) { |
71 | 0 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_UINT8, value.data(), n_values); |
72 | 0 | } else if (std::is_same<typename Container::value_type, int8_t>::value) { |
73 | 0 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_INT8, value.data(), n_values); |
74 | 0 | } else if (std::is_same<typename Container::value_type, uint32_t>::value) { |
75 | 0 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_UINT32, value.data(), n_values); |
76 | 0 | } else if (std::is_same<typename Container::value_type, int32_t>::value) { |
77 | 0 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_INT32, value.data(), n_values); |
78 | 0 | } else if (std::is_same<typename Container::value_type, float>::value) { |
79 | 0 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_FLOAT32, value.data(), n_values); |
80 | 0 | } else if (std::is_same<Container, std::string>::value) { |
81 | 0 | gguf_set_val_str(gguf_ctx, llm_kv(key).c_str(), reinterpret_cast<const char *>(value.data())); |
82 | 0 | } else { |
83 | 0 | GGML_ABORT("fatal error"); |
84 | 0 | } |
85 | 0 | } Unexecuted instantiation: void llama_model_saver::add_kv<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(llm_kv, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, bool) Unexecuted instantiation: void llama_model_saver::add_kv<std::__1::array<unsigned int, 512ul> >(llm_kv, std::__1::array<unsigned int, 512ul> const&, bool) Unexecuted instantiation: void llama_model_saver::add_kv<std::__1::vector<int, std::__1::allocator<int> > >(llm_kv, std::__1::vector<int, std::__1::allocator<int> > const&, bool) Unexecuted instantiation: void llama_model_saver::add_kv<std::__1::vector<float, std::__1::allocator<float> > >(llm_kv, std::__1::vector<float, std::__1::allocator<float> > const&, bool) Unexecuted instantiation: void llama_model_saver::add_kv<std::__1::vector<char, std::__1::allocator<char> > >(llm_kv, std::__1::vector<char, std::__1::allocator<char> > const&, bool) |
86 | | |
87 | 0 | void llama_model_saver::add_kv(const enum llm_kv key, const std::vector<std::string> & value) { |
88 | 0 | std::vector<const char *> tmp(value.size()); |
89 | 0 | for (size_t i = 0; i < value.size(); ++i) { |
90 | 0 | tmp[i] = value[i].c_str(); |
91 | 0 | } |
92 | 0 | gguf_set_arr_str(gguf_ctx, llm_kv(key).c_str(), tmp.data(), tmp.size()); |
93 | 0 | } |
94 | | |
95 | 0 | void llama_model_saver::add_tensor(const struct ggml_tensor * tensor) { |
96 | 0 | if (!tensor) { |
97 | 0 | return; |
98 | 0 | } |
99 | 0 | if (gguf_find_tensor(gguf_ctx, tensor->name) >= 0) { |
100 | 0 | GGML_ASSERT(std::string(tensor->name) == "rope_freqs.weight"); // FIXME |
101 | 0 | return; |
102 | 0 | } |
103 | 0 | gguf_add_tensor(gguf_ctx, tensor); |
104 | 0 | } |
105 | | |
106 | 0 | void llama_model_saver::add_kv_from_model() { |
107 | 0 | const llama_hparams & hparams = model.hparams; |
108 | 0 | const llama_vocab & vocab = model.vocab; |
109 | |
|
110 | 0 | const int32_t n_vocab = vocab.n_tokens(); |
111 | 0 | std::vector<std::string> tokens(n_vocab); |
112 | 0 | std::vector<float> scores(n_vocab); |
113 | 0 | std::vector<int32_t> token_types(n_vocab); |
114 | |
|
115 | 0 | for (int32_t id = 0; id < n_vocab; ++id) { |
116 | 0 | const llama_vocab::token_data & token_data = vocab.get_token_data(id); |
117 | |
|
118 | 0 | tokens[id] = token_data.text; |
119 | 0 | scores[id] = token_data.score; |
120 | |
|
121 | 0 | switch(token_data.attr) { |
122 | 0 | case LLAMA_TOKEN_ATTR_UNKNOWN: token_types[id] = LLAMA_TOKEN_TYPE_UNKNOWN; break; |
123 | 0 | case LLAMA_TOKEN_ATTR_UNUSED: token_types[id] = LLAMA_TOKEN_TYPE_UNUSED; break; |
124 | 0 | case LLAMA_TOKEN_ATTR_NORMAL: token_types[id] = LLAMA_TOKEN_TYPE_NORMAL; break; |
125 | 0 | case LLAMA_TOKEN_ATTR_CONTROL: token_types[id] = LLAMA_TOKEN_TYPE_CONTROL; break; |
126 | 0 | case LLAMA_TOKEN_ATTR_USER_DEFINED: token_types[id] = LLAMA_TOKEN_TYPE_USER_DEFINED; break; |
127 | 0 | case LLAMA_TOKEN_ATTR_BYTE: token_types[id] = LLAMA_TOKEN_TYPE_BYTE; break; |
128 | 0 | case LLAMA_TOKEN_ATTR_UNDEFINED: |
129 | 0 | default: token_types[id] = LLAMA_TOKEN_TYPE_UNDEFINED; break; |
130 | 0 | } |
131 | 0 | } |
132 | | |
133 | | // add_kv(LLM_KV_GENERAL_TYPE, ???); |
134 | 0 | add_kv(LLM_KV_GENERAL_ARCHITECTURE, model.arch_name()); |
135 | | // add_kv(LLM_KV_GENERAL_QUANTIZATION_VERSION, ???); |
136 | | // add_kv(LLM_KV_GENERAL_ALIGNMENT, ???); |
137 | 0 | add_kv(LLM_KV_GENERAL_NAME, model.name); |
138 | | // add_kv(LLM_KV_GENERAL_AUTHOR, ???); |
139 | | // add_kv(LLM_KV_GENERAL_VERSION, ???); |
140 | | // add_kv(LLM_KV_GENERAL_URL, ???); |
141 | | // add_kv(LLM_KV_GENERAL_DESCRIPTION, ???); |
142 | | // add_kv(LLM_KV_GENERAL_LICENSE, ???); |
143 | | // add_kv(LLM_KV_GENERAL_SOURCE_URL, ???); |
144 | | // add_kv(LLM_KV_GENERAL_SOURCE_HF_REPO, ???); |
145 | |
|
146 | 0 | add_kv(LLM_KV_VOCAB_SIZE, vocab.n_tokens()); |
147 | 0 | add_kv(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train); |
148 | 0 | add_kv(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd); |
149 | 0 | add_kv(LLM_KV_BLOCK_COUNT, hparams.n_layer); |
150 | 0 | add_kv(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); |
151 | 0 | add_kv(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, true); |
152 | 0 | add_kv(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); |
153 | 0 | add_kv(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_exp); |
154 | 0 | add_kv(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res); |
155 | | // add_kv(LLM_KV_TENSOR_DATA_LAYOUT, ???); |
156 | 0 | add_kv(LLM_KV_EXPERT_COUNT, hparams.n_expert); |
157 | 0 | add_kv(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used); |
158 | 0 | add_kv(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); |
159 | 0 | add_kv(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); |
160 | 0 | add_kv(LLM_KV_POOLING_TYPE, uint32_t(hparams.pooling_type)); |
161 | 0 | add_kv(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); |
162 | 0 | add_kv(LLM_KV_DECODER_START_TOKEN_ID, hparams.dec_start_token_id); |
163 | 0 | add_kv(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping); |
164 | 0 | add_kv(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping); |
165 | 0 | add_kv(LLM_KV_SWIN_NORM, hparams.swin_norm); |
166 | 0 | add_kv(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers); |
167 | 0 | add_kv(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim); |
168 | 0 | add_kv(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim); |
169 | 0 | add_kv(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); |
170 | 0 | add_kv(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); |
171 | |
|
172 | 0 | add_kv(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, true); |
173 | 0 | add_kv(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, true); |
174 | 0 | add_kv(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias); |
175 | 0 | add_kv(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv); |
176 | 0 | add_kv(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k); |
177 | 0 | add_kv(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v); |
178 | 0 | add_kv(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); |
179 | 0 | add_kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); |
180 | 0 | add_kv(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); |
181 | 0 | add_kv(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q); |
182 | 0 | add_kv(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv); |
183 | 0 | add_kv(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts); |
184 | 0 | add_kv(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); |
185 | 0 | add_kv(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale); |
186 | |
|
187 | 0 | const float rope_scaling_factor = hparams.rope_freq_scale_train == 1.0f ? 0.0f : 1.0f/hparams.rope_freq_scale_train; |
188 | |
|
189 | 0 | add_kv(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot); |
190 | 0 | add_kv(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train); |
191 | | // add_kv(LLM_KV_ROPE_SCALE_LINEAR, rope_scaling_factor); // old name |
192 | 0 | add_kv(LLM_KV_ROPE_SCALING_TYPE, llama_rope_scaling_type_name(hparams.rope_scaling_type_train)); |
193 | 0 | add_kv(LLM_KV_ROPE_SCALING_FACTOR, rope_scaling_factor); |
194 | 0 | add_kv(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor); |
195 | 0 | add_kv(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn); |
196 | 0 | add_kv(LLM_KV_ROPE_SCALING_FINETUNED, hparams.rope_finetuned); |
197 | 0 | add_kv(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul); |
198 | | |
199 | | // TODO: implement split file support |
200 | | // add_kv(LLM_KV_SPLIT_NO, ???); |
201 | | // add_kv(LLM_KV_SPLIT_COUNT, ???); |
202 | | // add_kv(LLM_KV_SPLIT_TENSORS_COUNT, ???); |
203 | |
|
204 | 0 | add_kv(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner); |
205 | 0 | add_kv(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv); |
206 | 0 | add_kv(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state); |
207 | 0 | add_kv(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank); |
208 | 0 | add_kv(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms); |
209 | |
|
210 | 0 | add_kv(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size); |
211 | |
|
212 | 0 | add_kv(LLM_KV_TOKENIZER_MODEL, vocab.get_tokenizer_model()); |
213 | 0 | add_kv(LLM_KV_TOKENIZER_PRE, vocab.get_tokenizer_pre()); |
214 | 0 | add_kv(LLM_KV_TOKENIZER_LIST, tokens); |
215 | 0 | add_kv(LLM_KV_TOKENIZER_TOKEN_TYPE, token_types); |
216 | 0 | add_kv(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, vocab.n_token_types()); |
217 | 0 | add_kv(LLM_KV_TOKENIZER_SCORES, scores); |
218 | 0 | add_kv(LLM_KV_TOKENIZER_MERGES, vocab.get_bpe_merges()); |
219 | | // FIXME llama_token is type i32 but when reading in a GGUF file u32 is expected, not an issue for writing though |
220 | 0 | add_kv(LLM_KV_TOKENIZER_BOS_ID, uint32_t(vocab.token_bos())); |
221 | 0 | add_kv(LLM_KV_TOKENIZER_EOS_ID, uint32_t(vocab.token_eos())); |
222 | 0 | add_kv(LLM_KV_TOKENIZER_EOT_ID, uint32_t(vocab.token_eot())); |
223 | 0 | add_kv(LLM_KV_TOKENIZER_EOM_ID, uint32_t(vocab.token_eom())); |
224 | 0 | add_kv(LLM_KV_TOKENIZER_UNK_ID, uint32_t(vocab.token_unk())); |
225 | 0 | add_kv(LLM_KV_TOKENIZER_SEP_ID, uint32_t(vocab.token_sep())); |
226 | 0 | add_kv(LLM_KV_TOKENIZER_PAD_ID, uint32_t(vocab.token_pad())); |
227 | | // add_kv(LLM_KV_TOKENIZER_CLS_ID, uint32_t(vocab.token_bos())); // deprecated |
228 | | // add_kv(LLM_KV_TOKENIZER_MASK_ID, ???); |
229 | 0 | add_kv(LLM_KV_TOKENIZER_ADD_BOS, vocab.get_add_bos()); |
230 | 0 | add_kv(LLM_KV_TOKENIZER_ADD_EOS, vocab.get_add_eos()); |
231 | 0 | add_kv(LLM_KV_TOKENIZER_ADD_SEP, vocab.get_add_sep()); |
232 | 0 | add_kv(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.get_add_space_prefix()); |
233 | 0 | add_kv(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.get_remove_extra_whitespaces()); |
234 | 0 | add_kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, vocab.get_precompiled_charsmap()); |
235 | | // add_kv(LLM_KV_TOKENIZER_HF_JSON, ???); |
236 | | // add_kv(LLM_KV_TOKENIZER_RWKV, ???); |
237 | 0 | add_kv(LLM_KV_TOKENIZER_FIM_PRE_ID, uint32_t(vocab.token_fim_pre())); |
238 | 0 | add_kv(LLM_KV_TOKENIZER_FIM_SUF_ID, uint32_t(vocab.token_fim_suf())); |
239 | 0 | add_kv(LLM_KV_TOKENIZER_FIM_MID_ID, uint32_t(vocab.token_fim_mid())); |
240 | 0 | add_kv(LLM_KV_TOKENIZER_FIM_PAD_ID, uint32_t(vocab.token_fim_pad())); |
241 | 0 | add_kv(LLM_KV_TOKENIZER_FIM_REP_ID, uint32_t(vocab.token_fim_rep())); |
242 | 0 | add_kv(LLM_KV_TOKENIZER_FIM_SEP_ID, uint32_t(vocab.token_fim_sep())); |
243 | | |
244 | | // TODO: implement LoRA support |
245 | | // add_kv(LLM_KV_ADAPTER_TYPE, ???); |
246 | | // add_kv(LLM_KV_ADAPTER_LORA_ALPHA, ???); |
247 | | |
248 | | // deprecated |
249 | | // add_kv(LLM_KV_TOKENIZER_PREFIX_ID, ???); |
250 | | // add_kv(LLM_KV_TOKENIZER_SUFFIX_ID, ???); |
251 | | // add_kv(LLM_KV_TOKENIZER_MIDDLE_ID, ???); |
252 | 0 | } |
253 | | |
254 | 0 | void llama_model_saver::add_tensors_from_model() { |
255 | 0 | if (std::string(model.output->name) != std::string(model.tok_embd->name)) { |
256 | 0 | add_tensor(model.tok_embd); // some models use the same tensor for tok_embd and output |
257 | 0 | } |
258 | 0 | add_tensor(model.type_embd); |
259 | 0 | add_tensor(model.pos_embd); |
260 | 0 | add_tensor(model.tok_norm); |
261 | 0 | add_tensor(model.tok_norm_b); |
262 | 0 | add_tensor(model.output_norm); |
263 | 0 | add_tensor(model.output_norm_b); |
264 | 0 | add_tensor(model.output); |
265 | 0 | add_tensor(model.output_b); |
266 | 0 | add_tensor(model.output_norm_enc); |
267 | 0 | add_tensor(model.cls); |
268 | 0 | add_tensor(model.cls_b); |
269 | 0 | add_tensor(model.cls_out); |
270 | 0 | add_tensor(model.cls_out_b); |
271 | |
|
272 | 0 | for (const struct llama_layer & layer : model.layers) { |
273 | 0 | for (size_t i = 0; i < sizeof(layer)/sizeof(struct ggml_tensor *); ++i) { |
274 | 0 | add_tensor(reinterpret_cast<const struct ggml_tensor * const *>(&layer)[i]); |
275 | 0 | } |
276 | 0 | } |
277 | 0 | } |
278 | | |
279 | 0 | void llama_model_saver::save(const std::string & path_model) { |
280 | 0 | gguf_write_to_file(gguf_ctx, path_model.c_str(), false); |
281 | 0 | } |
282 | | |