Coverage Report

Created: 2026-01-18 06:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/src/llama-chat.cpp
Line
Count
Source
1
#include "llama-chat.h"
2
3
#include "llama.h"
4
5
#include <map>
6
#include <sstream>
7
#include <algorithm>
8
9
#if __cplusplus >= 202000L
10
    #define LU8(x) (const char*)(u8##x)
11
#else
12
0
    #define LU8(x) u8##x
13
#endif
14
15
// trim whitespace from the beginning and end of a string
16
0
static std::string trim(const std::string & str) {
17
0
    size_t start = 0;
18
0
    size_t end = str.size();
19
0
    while (start < end && isspace(static_cast<unsigned char>(str[start]))) {
20
0
        start += 1;
21
0
    }
22
0
    while (end > start && isspace(static_cast<unsigned char>(str[end - 1]))) {
23
0
        end -= 1;
24
0
    }
25
0
    return str.substr(start, end - start);
26
0
}
27
28
static const std::map<std::string, llm_chat_template> LLM_CHAT_TEMPLATES = {
29
    { "chatml",            LLM_CHAT_TEMPLATE_CHATML            },
30
    { "llama2",            LLM_CHAT_TEMPLATE_LLAMA_2           },
31
    { "llama2-sys",        LLM_CHAT_TEMPLATE_LLAMA_2_SYS       },
32
    { "llama2-sys-bos",    LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS   },
33
    { "llama2-sys-strip",  LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP },
34
    { "mistral-v1",        LLM_CHAT_TEMPLATE_MISTRAL_V1        },
35
    { "mistral-v3",        LLM_CHAT_TEMPLATE_MISTRAL_V3        },
36
    { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN },
37
    { "mistral-v7",        LLM_CHAT_TEMPLATE_MISTRAL_V7        },
38
    { "mistral-v7-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN },
39
    { "phi3",              LLM_CHAT_TEMPLATE_PHI_3             },
40
    { "phi4",              LLM_CHAT_TEMPLATE_PHI_4             },
41
    { "falcon3",           LLM_CHAT_TEMPLATE_FALCON_3          },
42
    { "zephyr",            LLM_CHAT_TEMPLATE_ZEPHYR            },
43
    { "monarch",           LLM_CHAT_TEMPLATE_MONARCH           },
44
    { "gemma",             LLM_CHAT_TEMPLATE_GEMMA             },
45
    { "orion",             LLM_CHAT_TEMPLATE_ORION             },
46
    { "openchat",          LLM_CHAT_TEMPLATE_OPENCHAT          },
47
    { "vicuna",            LLM_CHAT_TEMPLATE_VICUNA            },
48
    { "vicuna-orca",       LLM_CHAT_TEMPLATE_VICUNA_ORCA       },
49
    { "deepseek",          LLM_CHAT_TEMPLATE_DEEPSEEK          },
50
    { "deepseek2",         LLM_CHAT_TEMPLATE_DEEPSEEK_2        },
51
    { "deepseek3",         LLM_CHAT_TEMPLATE_DEEPSEEK_3        },
52
    { "command-r",         LLM_CHAT_TEMPLATE_COMMAND_R         },
53
    { "llama3",            LLM_CHAT_TEMPLATE_LLAMA_3           },
54
    { "chatglm3",          LLM_CHAT_TEMPLATE_CHATGLM_3         },
55
    { "chatglm4",          LLM_CHAT_TEMPLATE_CHATGLM_4         },
56
    { "glmedge",           LLM_CHAT_TEMPLATE_GLMEDGE           },
57
    { "minicpm",           LLM_CHAT_TEMPLATE_MINICPM           },
58
    { "exaone3",           LLM_CHAT_TEMPLATE_EXAONE_3          },
59
    { "exaone4",           LLM_CHAT_TEMPLATE_EXAONE_4          },
60
    { "exaone-moe",        LLM_CHAT_TEMPLATE_EXAONE_MOE        },
61
    { "rwkv-world",        LLM_CHAT_TEMPLATE_RWKV_WORLD        },
62
    { "granite",           LLM_CHAT_TEMPLATE_GRANITE           },
63
    { "gigachat",          LLM_CHAT_TEMPLATE_GIGACHAT          },
64
    { "megrez",            LLM_CHAT_TEMPLATE_MEGREZ            },
65
    { "yandex",            LLM_CHAT_TEMPLATE_YANDEX            },
66
    { "bailing",           LLM_CHAT_TEMPLATE_BAILING           },
67
    { "bailing-think",     LLM_CHAT_TEMPLATE_BAILING_THINK     },
68
    { "bailing2",          LLM_CHAT_TEMPLATE_BAILING2          },
69
    { "llama4",            LLM_CHAT_TEMPLATE_LLAMA4            },
70
    { "smolvlm",           LLM_CHAT_TEMPLATE_SMOLVLM           },
71
    { "hunyuan-moe",       LLM_CHAT_TEMPLATE_HUNYUAN_MOE       },
72
    { "gpt-oss",           LLM_CHAT_TEMPLATE_OPENAI_MOE        },
73
    { "hunyuan-dense",     LLM_CHAT_TEMPLATE_HUNYUAN_DENSE     },
74
    { "kimi-k2",           LLM_CHAT_TEMPLATE_KIMI_K2           },
75
    { "seed_oss",          LLM_CHAT_TEMPLATE_SEED_OSS          },
76
    { "grok-2",            LLM_CHAT_TEMPLATE_GROK_2            },
77
    { "pangu-embedded",    LLM_CHAT_TEMPLATE_PANGU_EMBED       },
78
    { "solar-open",        LLM_CHAT_TEMPLATE_SOLAR_OPEN        },
79
};
80
81
0
llm_chat_template llm_chat_template_from_str(const std::string & name) {
82
0
    return LLM_CHAT_TEMPLATES.at(name);
83
0
}
84
85
0
llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
86
0
    try {
87
0
        return llm_chat_template_from_str(tmpl);
88
0
    } catch (const std::out_of_range &) {
89
        // ignore
90
0
    }
91
92
0
    auto tmpl_contains = [&tmpl](const char * haystack) -> bool {
93
0
        return tmpl.find(haystack) != std::string::npos;
94
0
    };
95
0
    if (tmpl_contains("<|im_start|>")) {
96
0
        return tmpl_contains("<|im_sep|>")
97
0
            ? LLM_CHAT_TEMPLATE_PHI_4
98
0
            : tmpl_contains("<end_of_utterance>")
99
0
                ? LLM_CHAT_TEMPLATE_SMOLVLM // SmolVLM uses <|im_start|> as BOS, but it is NOT chatml
100
0
                : LLM_CHAT_TEMPLATE_CHATML;
101
0
    } else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) {
102
0
        if (tmpl_contains("[SYSTEM_PROMPT]")) {
103
0
            return LLM_CHAT_TEMPLATE_MISTRAL_V7;
104
0
        } else if (
105
            // catches official 'v1' template
106
0
            tmpl_contains("' [INST] ' + system_message")
107
            // catches official 'v3' and 'v3-tekken' templates
108
0
            || tmpl_contains("[AVAILABLE_TOOLS]")
109
0
        ) {
110
            // Official mistral 'v1', 'v3' and 'v3-tekken' templates
111
            // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
112
            // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
113
0
            if (tmpl_contains(" [INST]")) {
114
0
                return LLM_CHAT_TEMPLATE_MISTRAL_V1;
115
0
            } else if (tmpl_contains("\"[INST]\"")) {
116
0
                return LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN;
117
0
            }
118
0
            return LLM_CHAT_TEMPLATE_MISTRAL_V3;
119
0
        } else {
120
            // llama2 template and its variants
121
            // [variant] support system message
122
            // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
123
0
            bool support_system_message = tmpl_contains("<<SYS>>");
124
0
            bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]");
125
0
            bool strip_message = tmpl_contains("content.strip()");
126
0
            if (strip_message) {
127
0
                return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
128
0
            } else if (add_bos_inside_history) {
129
0
                return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
130
0
            } else if (support_system_message) {
131
0
                return LLM_CHAT_TEMPLATE_LLAMA_2_SYS;
132
0
            } else {
133
0
                return LLM_CHAT_TEMPLATE_LLAMA_2;
134
0
            }
135
0
        }
136
0
    } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
137
0
        return LLM_CHAT_TEMPLATE_PHI_3;
138
0
    } else if (tmpl_contains("[gMASK]<sop>")) {
139
0
        return LLM_CHAT_TEMPLATE_CHATGLM_4;
140
0
    } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
141
0
        if (tmpl_contains("<|tool_declare|>")) {
142
0
            return LLM_CHAT_TEMPLATE_EXAONE_MOE;
143
0
        }
144
0
        return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
145
0
    } else if (tmpl_contains("<|{{ item['role'] }}|>") && tmpl_contains("<|begin_of_image|>")) {
146
0
        return LLM_CHAT_TEMPLATE_GLMEDGE;
147
0
    } else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
148
0
        return LLM_CHAT_TEMPLATE_ZEPHYR;
149
0
    } else if (tmpl_contains("bos_token + message['role']")) {
150
0
        return LLM_CHAT_TEMPLATE_MONARCH;
151
0
    } else if (tmpl_contains("<start_of_turn>")) {
152
0
        return LLM_CHAT_TEMPLATE_GEMMA;
153
0
    } else if (tmpl_contains("'\\n\\nAssistant: ' + eos_token")) {
154
        // OrionStarAI/Orion-14B-Chat
155
0
        return LLM_CHAT_TEMPLATE_ORION;
156
0
    } else if (tmpl_contains("GPT4 Correct ")) {
157
        // openchat/openchat-3.5-0106
158
0
        return LLM_CHAT_TEMPLATE_OPENCHAT;
159
0
    } else if (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: ")) {
160
        // eachadea/vicuna-13b-1.1 (and Orca variant)
161
0
        if (tmpl_contains("SYSTEM: ")) {
162
0
            return LLM_CHAT_TEMPLATE_VICUNA_ORCA;
163
0
        }
164
0
        return LLM_CHAT_TEMPLATE_VICUNA;
165
0
    } else if (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>")) {
166
        // deepseek-ai/deepseek-coder-33b-instruct
167
0
        return LLM_CHAT_TEMPLATE_DEEPSEEK;
168
0
    } else if (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>")) {
169
        // CohereForAI/c4ai-command-r-plus
170
0
        return LLM_CHAT_TEMPLATE_COMMAND_R;
171
0
    } else if (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>")) {
172
0
        return LLM_CHAT_TEMPLATE_LLAMA_3;
173
0
    } else if (tmpl_contains("[gMASK]sop")) {
174
        // chatglm3-6b
175
0
        return LLM_CHAT_TEMPLATE_CHATGLM_3;
176
0
    } else if (tmpl_contains(LU8("<用户>"))) {
177
        // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
178
0
        return LLM_CHAT_TEMPLATE_MINICPM;
179
0
    } else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
180
0
        return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
181
0
    } else if (tmpl_contains(LU8("<|Assistant|>")) && tmpl_contains(LU8("<|User|>")) && tmpl_contains(LU8("<|end▁of▁sentence|>"))) {
182
0
        return LLM_CHAT_TEMPLATE_DEEPSEEK_3;
183
0
    } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
184
0
        if (tmpl_contains("[|tool|]")) {
185
0
            return LLM_CHAT_TEMPLATE_EXAONE_4;
186
0
        }
187
        // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
188
        // EXAONE-3.0-7.8B-Instruct
189
0
        return LLM_CHAT_TEMPLATE_EXAONE_3;
190
0
    } else if (tmpl_contains("rwkv-world") || tmpl_contains("{{- 'User: ' + message['content']|trim + '\\n\\n' -}}")) {
191
0
        return LLM_CHAT_TEMPLATE_RWKV_WORLD;
192
0
    } else if (tmpl_contains("<|start_of_role|>")) {
193
0
        return LLM_CHAT_TEMPLATE_GRANITE;
194
0
    } else if (tmpl_contains("message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1]")) {
195
0
        return LLM_CHAT_TEMPLATE_GIGACHAT;
196
0
    } else if (tmpl_contains("<|role_start|>")) {
197
0
        return LLM_CHAT_TEMPLATE_MEGREZ;
198
0
    } else if (tmpl_contains(" Ассистент:")) {
199
0
        return LLM_CHAT_TEMPLATE_YANDEX;
200
0
    } else if (tmpl_contains("<role>ASSISTANT</role>") && tmpl_contains("'HUMAN'")) {
201
0
        return LLM_CHAT_TEMPLATE_BAILING;
202
0
    } else if (tmpl_contains("<role>ASSISTANT</role>") && tmpl_contains("\"HUMAN\"") && tmpl_contains("<think>")) {
203
0
        return LLM_CHAT_TEMPLATE_BAILING_THINK;
204
0
    } else if (tmpl_contains("<role>ASSISTANT</role>") && tmpl_contains("<role>HUMAN</role>") && tmpl_contains("<|role_end|>")) {
205
0
        return LLM_CHAT_TEMPLATE_BAILING2;
206
0
    } else if (tmpl_contains("<|header_start|>") && tmpl_contains("<|header_end|>")) {
207
0
        return LLM_CHAT_TEMPLATE_LLAMA4;
208
0
    } else if (tmpl_contains("<|endofuserprompt|>")) {
209
0
        return LLM_CHAT_TEMPLATE_DOTS1;
210
0
    } else if (tmpl_contains("<|extra_0|>") && tmpl_contains("<|extra_4|>")) {
211
0
        return LLM_CHAT_TEMPLATE_HUNYUAN_MOE;
212
0
    } else if (tmpl_contains("<|start|>") && tmpl_contains("<|channel|>")) {
213
0
        return LLM_CHAT_TEMPLATE_OPENAI_MOE;
214
0
    } else if (tmpl_contains("<|hy_Assistant|>") && tmpl_contains("<|hy_place▁holder▁no▁3|>")) {
215
0
        return LLM_CHAT_TEMPLATE_HUNYUAN_DENSE;
216
0
    } else if (tmpl_contains("<|im_assistant|>assistant<|im_middle|>")) {
217
0
        return LLM_CHAT_TEMPLATE_KIMI_K2;
218
0
    } else if (tmpl_contains("<seed:bos>")) {
219
0
        return LLM_CHAT_TEMPLATE_SEED_OSS;
220
0
    } else if (tmpl_contains("'Assistant: '  + message['content'] + '<|separator|>")) {
221
0
        return LLM_CHAT_TEMPLATE_GROK_2;
222
0
    } else if (tmpl_contains(LU8("[unused9]系统:[unused10]"))) {
223
0
        return LLM_CHAT_TEMPLATE_PANGU_EMBED;
224
0
    } else if (tmpl_contains("<|begin|>") && tmpl_contains("<|end|>") && tmpl_contains("<|content|>")) {
225
0
        return LLM_CHAT_TEMPLATE_SOLAR_OPEN;
226
0
    }
227
0
    return LLM_CHAT_TEMPLATE_UNKNOWN;
228
0
}
229
230
// Simple version of "llama_apply_chat_template" that only works with strings
231
// This function uses heuristic checks to determine commonly used template. It is not a jinja parser.
232
int32_t llm_chat_apply_template(
233
    llm_chat_template tmpl,
234
    const std::vector<const llama_chat_message *> & chat,
235
0
    std::string & dest, bool add_ass) {
236
    // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527
237
0
    std::stringstream ss;
238
0
    if (tmpl == LLM_CHAT_TEMPLATE_CHATML) {
239
        // chatml template
240
0
        for (auto message : chat) {
241
0
            ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n";
242
0
        }
243
0
        if (add_ass) {
244
0
            ss << "<|im_start|>assistant\n";
245
0
        }
246
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7 || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN) {
247
        // Official mistral 'v7' template
248
        // See: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7
249
        //      https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503#basic-instruct-template-v7-tekken
250
0
        const char * trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7 ? " " : "";
251
0
        for (auto message : chat) {
252
0
            std::string role(message->role);
253
0
            std::string content(message->content);
254
0
            if (role == "system") {
255
0
                ss << "[SYSTEM_PROMPT]" << trailing_space << content << "[/SYSTEM_PROMPT]";
256
0
            } else if (role == "user") {
257
0
                ss << "[INST]" << trailing_space << content << "[/INST]";
258
0
            } else {
259
0
                ss << trailing_space << content << "</s>";
260
0
            }
261
0
        }
262
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1
263
0
            || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3
264
0
            || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN) {
265
        // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
266
        // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
267
0
        std::string leading_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 ? " " : "";
268
0
        std::string trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN ? "" : " ";
269
0
        bool trim_assistant_message = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3;
270
0
        bool is_inside_turn = false;
271
0
        for (auto message : chat) {
272
0
            if (!is_inside_turn) {
273
0
                ss << leading_space << "[INST]" << trailing_space;
274
0
                is_inside_turn = true;
275
0
            }
276
0
            std::string role(message->role);
277
0
            std::string content(message->content);
278
0
            if (role == "system") {
279
0
                ss << content << "\n\n";
280
0
            } else if (role == "user") {
281
0
                ss << content << leading_space << "[/INST]";
282
0
            } else {
283
0
                ss << trailing_space << (trim_assistant_message ? trim(content) : content) << "</s>";
284
0
                is_inside_turn = false;
285
0
            }
286
0
        }
287
0
    } else if (
288
0
            tmpl == LLM_CHAT_TEMPLATE_LLAMA_2
289
0
            || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS
290
0
            || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS
291
0
            || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP) {
292
        // llama2 template and its variants
293
        // [variant] support system message
294
        // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
295
0
        bool support_system_message = tmpl != LLM_CHAT_TEMPLATE_LLAMA_2;
296
        // [variant] add BOS inside history
297
0
        bool add_bos_inside_history = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
298
        // [variant] trim spaces from the input message
299
0
        bool strip_message = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
300
        // construct the prompt
301
0
        bool is_inside_turn = true; // skip BOS at the beginning
302
0
        ss << "[INST] ";
303
0
        for (auto message : chat) {
304
0
            std::string content = strip_message ? trim(message->content) : message->content;
305
0
            std::string role(message->role);
306
0
            if (!is_inside_turn) {
307
0
                is_inside_turn = true;
308
0
                ss << (add_bos_inside_history ? "<s>[INST] " : "[INST] ");
309
0
            }
310
0
            if (role == "system") {
311
0
                if (support_system_message) {
312
0
                    ss << "<<SYS>>\n" << content << "\n<</SYS>>\n\n";
313
0
                } else {
314
                    // if the model does not support system message, we still include it in the first message, but without <<SYS>>
315
0
                    ss << content << "\n";
316
0
                }
317
0
            } else if (role == "user") {
318
0
                ss << content << " [/INST]";
319
0
            } else {
320
0
                ss << content << "</s>";
321
0
                is_inside_turn = false;
322
0
            }
323
0
        }
324
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_3) {
325
        // Phi 3
326
0
        for (auto message : chat) {
327
0
            std::string role(message->role);
328
0
            ss << "<|" << role << "|>\n" << message->content << "<|end|>\n";
329
0
        }
330
0
        if (add_ass) {
331
0
            ss << "<|assistant|>\n";
332
0
        }
333
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_4) {
334
        // chatml template
335
0
        for (auto message : chat) {
336
0
            ss << "<|im_start|>" << message->role << "<|im_sep|>" << message->content << "<|im_end|>";
337
0
        }
338
0
        if (add_ass) {
339
0
            ss << "<|im_start|>assistant<|im_sep|>";
340
0
        }
341
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) {
342
        // Falcon 3
343
0
        for (auto message : chat) {
344
0
            std::string role(message->role);
345
0
            ss << "<|" << role << "|>\n" << message->content << "\n";
346
0
        }
347
0
        if (add_ass) {
348
0
            ss << "<|assistant|>\n";
349
0
        }
350
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) {
351
        // zephyr template
352
0
        for (auto message : chat) {
353
0
            ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n";
354
0
        }
355
0
        if (add_ass) {
356
0
            ss << "<|assistant|>\n";
357
0
        }
358
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_MONARCH) {
359
        // mlabonne/AlphaMonarch-7B template (the <s> is included inside history)
360
0
        for (auto message : chat) {
361
0
            std::string bos = (message == chat.front()) ? "" : "<s>"; // skip BOS for first message
362
0
            ss << bos << message->role << "\n" << message->content << "</s>\n";
363
0
        }
364
0
        if (add_ass) {
365
0
            ss << "<s>assistant\n";
366
0
        }
367
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_GEMMA) {
368
        // google/gemma-7b-it
369
0
        std::string system_prompt = "";
370
0
        for (auto message : chat) {
371
0
            std::string role(message->role);
372
0
            if (role == "system") {
373
                // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
374
0
                system_prompt += trim(message->content);
375
0
                continue;
376
0
            }
377
            // in gemma, "assistant" is "model"
378
0
            role = role == "assistant" ? "model" : message->role;
379
0
            ss << "<start_of_turn>" << role << "\n";
380
0
            if (!system_prompt.empty() && role != "model") {
381
0
                ss << system_prompt << "\n\n";
382
0
                system_prompt = "";
383
0
            }
384
0
            ss << trim(message->content) << "<end_of_turn>\n";
385
0
        }
386
0
        if (add_ass) {
387
0
            ss << "<start_of_turn>model\n";
388
0
        }
389
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_ORION) {
390
        // OrionStarAI/Orion-14B-Chat
391
0
        std::string system_prompt = "";
392
0
        for (auto message : chat) {
393
0
            std::string role(message->role);
394
0
            if (role == "system") {
395
                // there is no system message support, we will merge it with user prompt
396
0
                system_prompt += message->content;
397
0
                continue;
398
0
            } else if (role == "user") {
399
0
                ss << "Human: ";
400
0
                if (!system_prompt.empty()) {
401
0
                    ss << system_prompt << "\n\n";
402
0
                    system_prompt = "";
403
0
                }
404
0
                ss << message->content << "\n\nAssistant: </s>";
405
0
            } else {
406
0
                ss << message->content << "</s>";
407
0
            }
408
0
        }
409
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_OPENCHAT) {
410
        // openchat/openchat-3.5-0106,
411
0
        for (auto message : chat) {
412
0
            std::string role(message->role);
413
0
            if (role == "system") {
414
0
                ss << message->content << "<|end_of_turn|>";
415
0
            } else {
416
0
                role[0] = toupper(role[0]);
417
0
                ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>";
418
0
            }
419
0
        }
420
0
        if (add_ass) {
421
0
            ss << "GPT4 Correct Assistant:";
422
0
        }
423
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_VICUNA || tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
424
        // eachadea/vicuna-13b-1.1 (and Orca variant)
425
0
        for (auto message : chat) {
426
0
            std::string role(message->role);
427
0
            if (role == "system") {
428
                // Orca-Vicuna variant uses a system prefix
429
0
                if (tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
430
0
                    ss << "SYSTEM: " << message->content << "\n";
431
0
                } else {
432
0
                    ss << message->content << "\n\n";
433
0
                }
434
0
            } else if (role == "user") {
435
0
                ss << "USER: " << message->content << "\n";
436
0
            } else if (role == "assistant") {
437
0
                ss << "ASSISTANT: " << message->content << "</s>\n";
438
0
            }
439
0
        }
440
0
        if (add_ass) {
441
0
            ss << "ASSISTANT:";
442
0
        }
443
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK) {
444
        // deepseek-ai/deepseek-coder-33b-instruct
445
0
        for (auto message : chat) {
446
0
            std::string role(message->role);
447
0
            if (role == "system") {
448
0
                ss << message->content;
449
0
            } else if (role == "user") {
450
0
                ss << "### Instruction:\n" << message->content << "\n";
451
0
            } else if (role == "assistant") {
452
0
                ss << "### Response:\n" << message->content << "\n<|EOT|>\n";
453
0
            }
454
0
        }
455
0
        if (add_ass) {
456
0
            ss << "### Response:\n";
457
0
        }
458
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_COMMAND_R) {
459
        // CohereForAI/c4ai-command-r-plus
460
0
        for (auto message : chat) {
461
0
            std::string role(message->role);
462
0
            if (role == "system") {
463
0
                ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
464
0
            } else if (role == "user") {
465
0
                ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
466
0
            } else if (role == "assistant") {
467
0
                ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
468
0
            }
469
0
        }
470
0
        if (add_ass) {
471
0
            ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>";
472
0
        }
473
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA_3) {
474
        // Llama 3
475
0
        for (auto message : chat) {
476
0
            std::string role(message->role);
477
0
            ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>";
478
0
        }
479
0
        if (add_ass) {
480
0
            ss << "<|start_header_id|>assistant<|end_header_id|>\n\n";
481
0
        }
482
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGLM_3) {
483
        // chatglm3-6b
484
0
        ss << "[gMASK]" << "sop";
485
0
        for (auto message : chat) {
486
0
            std::string role(message->role);
487
0
            ss << "<|" << role << "|>" << "\n " << message->content;
488
0
        }
489
0
        if (add_ass) {
490
0
            ss << "<|assistant|>";
491
0
        }
492
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGLM_4) {
493
0
        ss << "[gMASK]" << "<sop>";
494
0
        for (auto message : chat) {
495
0
            std::string role(message->role);
496
0
            ss << "<|" << role << "|>" << "\n" << message->content;
497
0
        }
498
0
        if (add_ass) {
499
0
            ss << "<|assistant|>\n";
500
0
        }
501
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
502
0
        for (auto message : chat) {
503
0
            std::string role(message->role);
504
0
            ss << "<|" << role << "|>" << "\n" << message->content;
505
0
        }
506
0
        if (add_ass) {
507
0
            ss << "<|assistant|>";
508
0
        }
509
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
510
        // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
511
0
        for (auto message : chat) {
512
0
            std::string role(message->role);
513
0
            if (role == "user") {
514
0
                ss << LU8("<用户>");
515
0
                ss << trim(message->content);
516
0
                ss << "<AI>";
517
0
            } else {
518
0
                ss << trim(message->content);
519
0
            }
520
0
        }
521
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_2) {
522
        // DeepSeek-V2
523
0
        for (auto message : chat) {
524
0
            std::string role(message->role);
525
0
            if (role == "system") {
526
0
                ss << message->content << "\n\n";
527
0
            } else if (role == "user") {
528
0
                ss << "User: " << message->content << "\n\n";
529
0
            } else if (role == "assistant") {
530
0
                ss << "Assistant: " << message->content << LU8("<|end▁of▁sentence|>");
531
0
            }
532
0
        }
533
0
        if (add_ass) {
534
0
            ss << "Assistant:";
535
0
        }
536
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_3) {
537
        // DeepSeek-V3
538
0
        for (auto message : chat) {
539
0
            std::string role(message->role);
540
0
            if (role == "system") {
541
0
                ss << message->content << "\n\n";
542
0
            } else if (role == "user") {
543
0
                ss << LU8("<|User|>") << message->content;
544
0
            } else if (role == "assistant") {
545
0
                ss << LU8("<|Assistant|>") << message->content << LU8("<|end▁of▁sentence|>");
546
0
            }
547
0
        }
548
0
        if (add_ass) {
549
0
            ss << LU8("<|Assistant|>");
550
0
        }
551
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) {
552
        // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
553
        // EXAONE-3.0-7.8B-Instruct
554
0
        for (auto message : chat) {
555
0
            std::string role(message->role);
556
0
            if (role == "system") {
557
0
                ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n";
558
0
            } else if (role == "user") {
559
0
                ss << "[|user|]" << trim(message->content) << "\n";
560
0
            } else if (role == "assistant") {
561
0
                ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n";
562
0
            }
563
0
        }
564
0
        if (add_ass) {
565
0
            ss << "[|assistant|]";
566
0
        }
567
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_4) {
568
0
        for (auto message : chat) {
569
0
            std::string role(message->role);
570
0
            if (role == "system") {
571
0
                ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n";
572
0
            } else if (role == "user") {
573
0
                ss << "[|user|]" << trim(message->content) << "\n";
574
0
            } else if (role == "assistant") {
575
0
                ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n";
576
0
            } else if (role == "tool") {
577
0
                ss << "[|tool|]" << trim(message->content) << "[|endofturn|]\n";
578
0
            }
579
0
        }
580
0
        if (add_ass) {
581
0
            ss << "[|assistant|]";
582
0
        }
583
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_MOE) {
584
0
        for (auto message : chat) {
585
0
            std::string role(message->role);
586
0
            if (role == "system") {
587
0
                ss << "<|system|>\n" << trim(message->content) << "<|endofturn|>\n";
588
0
            } else if (role == "user") {
589
0
                ss << "<|user|>\n" << trim(message->content) << "<|endofturn|>\n";
590
0
            } else if (role == "assistant") {
591
0
                ss << "<|assistant|>\n" << trim(message->content) << "<|endofturn|>\n";
592
0
            } else if (role == "tool") {
593
0
                ss << "<|tool|>\n" << trim(message->content) << "<|endofturn|>\n";
594
0
            }
595
0
        }
596
0
        if (add_ass) {
597
0
            ss << "<|assistant|>\n";
598
0
        }
599
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) {
600
        // this template requires the model to have "\n\n" as EOT token
601
0
        for (size_t i = 0; i < chat.size(); i++) {
602
0
            std::string role(chat[i]->role);
603
0
            if (role == "system") {
604
0
                ss << "System: " << trim(chat[i]->content) << "\n\n";
605
0
            } else if (role == "user") {
606
0
                ss << "User: " << trim(chat[i]->content) << "\n\n";
607
0
                if (i == chat.size() - 1) {
608
0
                    ss << "Assistant:";
609
0
                }
610
0
            } else if (role == "assistant") {
611
0
                ss << "Assistant: " << trim(chat[i]->content) << "\n\n";
612
0
            }
613
0
        }
614
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) {
615
        // IBM Granite template
616
0
        for (const auto & message : chat) {
617
0
            std::string role(message->role);
618
0
            ss << "<|start_of_role|>" << role << "<|end_of_role|>";
619
0
            if (role == "assistant_tool_call") {
620
0
                ss << "<|tool_call|>";
621
0
            }
622
0
            ss << message->content << "<|end_of_text|>\n";
623
0
        }
624
0
        if (add_ass) {
625
0
            ss << "<|start_of_role|>assistant<|end_of_role|>";
626
0
        }
627
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_GIGACHAT) {
628
        // GigaChat template
629
0
        bool has_system = !chat.empty() && std::string(chat[0]->role) == "system";
630
631
        // Handle system message if present
632
0
        if (has_system) {
633
0
            ss << "<s>" << chat[0]->content << "<|message_sep|>";
634
0
        } else {
635
0
            ss << "<s>";
636
0
        }
637
638
        // Process remaining messages
639
0
        for (size_t i = has_system ? 1 : 0; i < chat.size(); i++) {
640
0
            std::string role(chat[i]->role);
641
0
            if (role == "user") {
642
0
                ss << "user<|role_sep|>" << chat[i]->content << "<|message_sep|>"
643
0
                << "available functions<|role_sep|>[]<|message_sep|>";
644
0
            } else if (role == "assistant") {
645
0
                ss << "assistant<|role_sep|>" << chat[i]->content << "<|message_sep|>";
646
0
            }
647
0
        }
648
649
        // Add generation prompt if needed
650
0
        if (add_ass) {
651
0
            ss << "assistant<|role_sep|>";
652
0
        }
653
0
    }  else if (tmpl == LLM_CHAT_TEMPLATE_MEGREZ) {
654
        // Megrez template
655
0
        for (auto message : chat) {
656
0
            std::string role(message->role);
657
0
            ss << "<|role_start|>" << role << "<|role_end|>" << message->content << "<|turn_end|>";
658
0
        }
659
660
0
        if (add_ass) {
661
0
            ss << "<|role_start|>assistant<|role_end|>";
662
0
        }
663
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_YANDEX) {
664
        // Yandex template ("\n\n" is defined as EOT token)
665
666
0
        for (size_t i = 0; i < chat.size(); i++) {
667
0
            std::string role(chat[i]->role);
668
0
            if (role == "user") {
669
0
                ss << " Пользователь: " << chat[i]->content << "\n\n";
670
0
            } else if (role == "assistant") {
671
0
                ss << " Ассистент: " << chat[i]->content << "\n\n";
672
0
            }
673
0
        }
674
675
        // Add generation prompt if needed
676
0
        if (add_ass) {
677
0
            ss << " Ассистент:[SEP]";
678
0
        }
679
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_BAILING || tmpl == LLM_CHAT_TEMPLATE_BAILING_THINK) {
680
        // Bailing (Ling/Ring) template
681
0
        for (auto message : chat) {
682
0
            std::string role(message->role);
683
684
0
            if (role == "user") {
685
0
                role = "HUMAN";
686
0
            } else {
687
0
                std::transform(role.begin(), role.end(), role.begin(), ::toupper);
688
0
            }
689
690
0
            ss << "<role>" << role << "</role>" << message->content;
691
0
        }
692
693
0
        if (add_ass) {
694
0
            ss << "<role>ASSISTANT</role>";
695
696
0
            if (tmpl == LLM_CHAT_TEMPLATE_BAILING_THINK) {
697
0
                ss << "<think>";
698
0
            }
699
0
        }
700
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_BAILING2) {
701
        // Bailing2 (Ling 2.0) template
702
0
        bool has_system = !chat.empty() && std::string(chat[0]->role) == "system";
703
704
0
        if (!has_system) {
705
0
            ss << "<role>SYSTEM</role>detailed thinking off<|role_end|>";
706
0
        }
707
708
0
        for (auto message : chat) {
709
0
            std::string role(message->role);
710
711
0
            if (role == "user") {
712
0
                role = "HUMAN";
713
0
            } else {
714
0
                std::transform(role.begin(), role.end(), role.begin(), ::toupper);
715
0
            }
716
717
0
            ss << "<role>" << role << "</role>" << message->content << "<|role_end|>";
718
0
        }
719
720
0
        if (add_ass) {
721
0
            ss << "<role>ASSISTANT</role>";
722
0
        }
723
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA4) {
724
        // Llama 4
725
0
        for (auto message : chat) {
726
0
            std::string role(message->role);
727
0
            ss << "<|header_start|>" << role << "<|header_end|>\n\n" << trim(message->content) << "<|eot|>";
728
0
        }
729
0
        if (add_ass) {
730
0
            ss << "<|header_start|>assistant<|header_end|>\n\n";
731
0
        }
732
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_SMOLVLM) {
733
        // SmolVLM
734
0
        ss << "<|im_start|>"; // uses <|im_start|> as BOS, but the actual content is NOT chatml
735
0
        for (auto message : chat) {
736
0
            std::string role(message->role);
737
0
            if (role == "system") {
738
0
                ss << message->content << "\n\n";
739
0
            } else if (role == "user") {
740
0
                ss << "User: " << message->content << "<end_of_utterance>\n";
741
0
            } else {
742
0
                ss << "Assistant: " << message->content << "<end_of_utterance>\n";
743
0
            }
744
0
        }
745
0
        if (add_ass) {
746
0
            ss << "Assistant:";
747
0
        }
748
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_DOTS1) {
749
        // dots.llm1.inst (DOTS1)
750
0
        for (auto message : chat) {
751
0
            std::string role(message->role);
752
0
            if (role == "system") {
753
0
                ss << "<|system|>" << message->content << "<|endofsystem|>";
754
0
            } else if (role == "user") {
755
0
                ss << "<|userprompt|>" << message->content << "<|endofuserprompt|>";
756
0
            } else {
757
0
                ss << "<|response|>" << message->content << "<|endofresponse|>";
758
0
            }
759
0
        }
760
0
        if (add_ass) {
761
0
            ss << "<|response|>";
762
0
        }
763
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_HUNYUAN_MOE) {
764
        // tencent/Hunyuan-A13B-Instruct
765
0
        for (auto message : chat) {
766
0
            std::string role(message->role);
767
0
            if (role == "system") {
768
0
                ss << "<|startoftext|>" << message->content << "<|extra_4|>";
769
0
            } else if (role == "assistant") {
770
0
                ss << message->content << "<|eos|>";
771
0
            } else {
772
0
                ss << "<|startoftext|>" << message->content << "<|extra_0|>";
773
0
            }
774
0
        }
775
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_OPENAI_MOE) {
776
        // OpenAI MoE (based on Harmony chat template)
777
0
        for (auto message : chat) {
778
0
            std::string role(message->role);
779
0
            ss << "<|start|>" << role << "<|message|>" << message->content;
780
0
            ss << (role == "assistant" ? "<|return|>" : "<|end|>");
781
0
        }
782
0
        if (add_ass) {
783
0
            ss << "<|start|>assistant";
784
0
        }
785
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_HUNYUAN_DENSE) {
786
        // tencent/Hunyuan-4B-Instruct
787
0
        for (size_t i = 0; i < chat.size(); i++) {
788
0
            std::string role(chat[i]->role);
789
0
            if (i == 0) {
790
0
                if (role == "system") {
791
0
                    ss << chat[i]->content << "<|hy_place▁holder▁no▁3|>";
792
0
                }
793
0
            }
794
795
0
            if (role == "assistant") {
796
0
                ss << "<|hy_Assistant|>" << chat[i]->content << "<|hy_place▁holder▁no▁2|>";
797
0
            } else if (role == "user") {
798
0
                ss << "<|hy_User|>" << chat[i]->content << "<|hy_Assistant|>";
799
0
            }
800
0
        }
801
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_KIMI_K2) {
802
        // moonshotai/Kimi-K2-Instruct
803
0
        for (auto message : chat) {
804
0
            std::string role(message->role);
805
0
            if (role == "system") {
806
0
                ss << "<|im_system|>system<|im_middle|>";
807
0
            } else if (role == "user") {
808
0
                ss << "<|im_user|>user<|im_middle|>";
809
0
            } else if (role == "assistant") {
810
0
                ss << "<|im_assistant|>assistant<|im_middle|>";
811
0
            } else if (role == "tool") {
812
0
                ss << "<|im_system|>tool<|im_middle|>";
813
0
            }
814
815
0
            ss << message->content << "<|im_end|>";
816
0
        }
817
0
        if (add_ass) {
818
0
            ss << "<|im_assistant|>assistant<|im_middle|>";
819
0
        }
820
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_SEED_OSS) {
821
0
        for (auto message: chat) {
822
0
            std::string role(message->role);
823
0
            ss << "<seed:bos>" << role << "\n" << (role == "assistant" ? trim(message->content) : message->content) << "<seed:eos>";
824
0
        }
825
0
        if (add_ass) {
826
0
            ss << "<seed:bos>assistant\n";
827
0
        }
828
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_GROK_2) {
829
0
        for (auto message : chat) {
830
0
            std::string role(message->role);
831
0
            if (role == "system") {
832
0
                ss << "System: " << trim(message->content) << "<|separator|>\n\n";
833
0
            } else if (role == "user") {
834
0
                ss << "Human: " << trim(message->content) << "<|separator|>\n\n";
835
0
            } else if (role == "assistant") {
836
0
                ss << "Assistant: " << message->content << "<|separator|>\n\n";
837
0
            }
838
0
        }
839
0
        if (add_ass) {
840
0
            ss << "Assistant:";
841
0
        }
842
0
    }else if (tmpl == LLM_CHAT_TEMPLATE_PANGU_EMBED) {
843
        // [unused9]系统:xxx[unused10]
844
        // [unused9]用户:xxx[unused10]
845
        // [unused9]助手:xxx[unused10]
846
        // ...
847
0
        for (size_t i = 0; i < chat.size(); ++i) {
848
0
            const auto & msg = chat[i];
849
0
            const std::string & role = msg->role;
850
0
            const std::string & content = msg->content;
851
852
0
            if (i == 0 && role != "system") {
853
0
                ss << "[unused9]系统:[unused10]";
854
0
            }
855
856
0
            if (role == "system") {
857
0
                ss << "[unused9]系统:" << content << "[unused10]";
858
0
            } else if (role == "user") {
859
0
                ss << "[unused9]用户:" << content << "[unused10]";
860
0
            } else if (role == "assistant") {
861
0
                ss << "[unused9]助手:" << content << "[unused10]";
862
0
            } else if (role == "tool") {
863
0
                ss << "[unused9]工具:" << content << "[unused10]";
864
0
            } else if (role == "function") {
865
0
                ss << "[unused9]方法:" << content << "[unused10]";
866
0
            }
867
0
        }
868
0
        if (add_ass) {
869
0
            ss << "[unused9]助手:";
870
0
        }
871
0
    } else if (tmpl == LLM_CHAT_TEMPLATE_SOLAR_OPEN) {
872
0
        for (auto message : chat) {
873
0
            std::string role(message->role);
874
0
            ss << "<|begin|>" << role << "<|content|>" << message->content << "<|end|>";
875
0
        }
876
0
        if (add_ass) {
877
0
            ss << "<|begin|>assistant";
878
0
        }
879
0
    } else {
880
        // template not supported
881
0
        return -1;
882
0
    }
883
0
    dest = ss.str();
884
0
    return dest.size();
885
0
}
886
887
// public interface
888
889
0
int32_t llama_chat_builtin_templates(const char ** output, size_t len) {
890
0
    auto it = LLM_CHAT_TEMPLATES.begin();
891
0
    for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) {
892
0
        output[i] = it->first.c_str();
893
0
        std::advance(it, 1);
894
0
    }
895
0
    return (int32_t) LLM_CHAT_TEMPLATES.size();
896
0
}