/src/llama.cpp/src/unicode.cpp
Line | Count | Source |
1 | | #if defined(_MSC_VER) |
2 | | #define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING |
3 | | #endif |
4 | | |
5 | | #include "unicode.h" |
6 | | #include "unicode-data.h" |
7 | | |
8 | | #include <algorithm> |
9 | | #include <cassert> |
10 | | #include <codecvt> |
11 | | #include <cstddef> |
12 | | #include <cstdint> |
13 | | #include <locale> |
14 | | #include <map> |
15 | | #include <regex> |
16 | | #include <stdexcept> |
17 | | #include <string> |
18 | | #include <unordered_map> |
19 | | #include <utility> |
20 | | #include <vector> |
21 | | |
22 | 0 | size_t unicode_len_utf8(char src) { |
23 | 0 | const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 }; |
24 | 0 | uint8_t highbits = static_cast<uint8_t>(src) >> 4; |
25 | 0 | return lookup[highbits]; |
26 | 0 | } |
27 | | |
28 | 0 | static std::string unicode_cpts_to_utf8(const std::vector<uint32_t> & cps) { |
29 | 0 | std::string result; |
30 | 0 | for (size_t i = 0; i < cps.size(); ++i) { |
31 | 0 | result.append(unicode_cpt_to_utf8(cps[i])); |
32 | 0 | } |
33 | 0 | return result; |
34 | 0 | } |
35 | | |
36 | 0 | uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) { |
37 | 0 | assert(offset < utf8.size()); |
38 | 0 | if (!(utf8[offset + 0] & 0x80)) { |
39 | 0 | auto result = utf8[offset + 0]; |
40 | 0 | offset += 1; |
41 | 0 | return result; |
42 | 0 | } |
43 | 0 | if (!(utf8[offset + 0] & 0x40)) { |
44 | 0 | throw std::invalid_argument("invalid character"); |
45 | 0 | } |
46 | 0 | if (!(utf8[offset + 0] & 0x20)) { |
47 | 0 | if (offset + 1 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80)) { |
48 | 0 | throw std::invalid_argument("invalid character"); |
49 | 0 | } |
50 | 0 | auto result = ((utf8[offset + 0] & 0x1f) << 6) | (utf8[offset + 1] & 0x3f); |
51 | 0 | offset += 2; |
52 | 0 | return result; |
53 | 0 | } |
54 | 0 | if (!(utf8[offset + 0] & 0x10)) { |
55 | 0 | if (offset + 2 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80)) { |
56 | 0 | throw std::invalid_argument("invalid character"); |
57 | 0 | } |
58 | 0 | auto result = ((utf8[offset + 0] & 0x0f) << 12) | ((utf8[offset + 1] & 0x3f) << 6) | (utf8[offset + 2] & 0x3f); |
59 | 0 | offset += 3; |
60 | 0 | return result; |
61 | 0 | } |
62 | 0 | if (!(utf8[offset + 0] & 0x08)) { |
63 | 0 | if (offset + 3 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80) || !((utf8[offset + 3] & 0xc0) == 0x80)) { |
64 | 0 | throw std::invalid_argument("invalid character"); |
65 | 0 | } |
66 | 0 | auto result = ((utf8[offset + 0] & 0x07) << 18) | ((utf8[offset + 1] & 0x3f) << 12) | ((utf8[offset + 2] & 0x3f) << 6) | (utf8[offset + 3] & 0x3f); |
67 | 0 | offset += 4; |
68 | 0 | return result; |
69 | 0 | } |
70 | 0 | throw std::invalid_argument("failed to convert utf8 to codepoint"); |
71 | 0 | } |
72 | | |
73 | | //static std::vector<uint16_t> unicode_cpt_to_utf16(uint32_t cpt) { |
74 | | // std::vector<uint16_t> result; |
75 | | // if (/* 0x0000 <= cpt && */ cpt <= 0xffff) { |
76 | | // result.emplace_back(cpt); |
77 | | // return result; |
78 | | // } |
79 | | // if (0x10000 <= cpt && cpt <= 0x10ffff) { |
80 | | // result.emplace_back(0xd800 | ((cpt - 0x10000) >> 10)); |
81 | | // result.emplace_back(0xdc00 | ((cpt - 0x10000) & 0x03ff)); |
82 | | // return result; |
83 | | // } |
84 | | // throw std::invalid_argument("failed to convert codepoint to utf16"); |
85 | | //} |
86 | | |
87 | | //static std::vector<uint16_t> unicode_cpts_to_utf16(const std::vector<uint32_t> & cps) { |
88 | | // std::vector<uint16_t> result; |
89 | | // for (size_t i = 0; i < cps.size(); ++i) { |
90 | | // auto temp = unicode_cpt_to_utf16(cps[i]); |
91 | | // result.insert(result.end(), temp.begin(), temp.end()); |
92 | | // } |
93 | | // return result; |
94 | | //} |
95 | | |
96 | | //static uint32_t unicode_cpt_from_utf16(const std::vector<uint16_t> & utf16, size_t & offset) { |
97 | | // assert(offset < utf16.size()); |
98 | | // if (((utf16[0] >> 10) << 10) != 0xd800) { |
99 | | // auto result = utf16[offset + 0]; |
100 | | // offset += 1; |
101 | | // return result; |
102 | | // } |
103 | | // |
104 | | // if (offset + 1 >= utf16.size() || !((utf16[1] & 0xdc00) == 0xdc00)) { |
105 | | // throw std::invalid_argument("invalid character"); |
106 | | // } |
107 | | // |
108 | | // auto result = 0x10000 + (((utf16[0] & 0x03ff) << 10) | (utf16[1] & 0x03ff)); |
109 | | // offset += 2; |
110 | | // return result; |
111 | | //} |
112 | | |
113 | | //static std::vector<uint32_t> unicode_cpts_from_utf16(const std::vector<uint16_t> & utf16) { |
114 | | // std::vector<uint32_t> result; |
115 | | // size_t offset = 0; |
116 | | // while (offset < utf16.size()) { |
117 | | // result.push_back(unicode_cpt_from_utf16(utf16, offset)); |
118 | | // } |
119 | | // return result; |
120 | | //} |
121 | | |
122 | 0 | static std::vector<unicode_cpt_flags> unicode_cpt_flags_array() { |
123 | 0 | std::vector<unicode_cpt_flags> cpt_flags(MAX_CODEPOINTS, unicode_cpt_flags::UNDEFINED); |
124 | |
|
125 | 0 | assert (unicode_ranges_flags.begin()[0].first == 0); |
126 | 0 | assert (unicode_ranges_flags.begin()[unicode_ranges_flags.size()-1].first == MAX_CODEPOINTS); |
127 | 0 | for (size_t i = 1; i < unicode_ranges_flags.size(); ++i) { |
128 | 0 | const auto range_ini = unicode_ranges_flags.begin()[i-1]; // codepoint_ini, flags |
129 | 0 | const auto range_end = unicode_ranges_flags.begin()[i]; // codepoint_end, flags |
130 | 0 | for (uint32_t cpt = range_ini.first; cpt < range_end.first; ++cpt) { |
131 | 0 | cpt_flags[cpt] = range_ini.second; |
132 | 0 | } |
133 | 0 | } |
134 | |
|
135 | 0 | for (auto cpt : unicode_set_whitespace) { |
136 | 0 | cpt_flags[cpt].is_whitespace = true; |
137 | 0 | } |
138 | |
|
139 | 0 | for (auto p : unicode_map_lowercase) { |
140 | 0 | cpt_flags[p.second].is_lowercase = true; |
141 | 0 | } |
142 | |
|
143 | 0 | for (auto p : unicode_map_uppercase) { |
144 | 0 | cpt_flags[p.second].is_uppercase = true; |
145 | 0 | } |
146 | |
|
147 | 0 | for (auto &range : unicode_ranges_nfd) { // start, last, nfd |
148 | 0 | cpt_flags[range.nfd].is_nfd = true; |
149 | 0 | } |
150 | |
|
151 | 0 | return cpt_flags; |
152 | 0 | } |
153 | | |
154 | 0 | static std::unordered_map<uint8_t, std::string> unicode_byte_to_utf8_map() { |
155 | 0 | std::unordered_map<uint8_t, std::string> map; |
156 | 0 | for (int ch = 0x21; ch <= 0x7E; ++ch) { // u'!' to u'~' |
157 | 0 | assert(0 <= ch && ch < 256); |
158 | 0 | map[ch] = unicode_cpt_to_utf8(ch); |
159 | 0 | } |
160 | 0 | for (int ch = 0xA1; ch <= 0xAC; ++ch) { // u'¡' to u'¬' |
161 | 0 | assert(0 <= ch && ch < 256); |
162 | 0 | map[ch] = unicode_cpt_to_utf8(ch); |
163 | 0 | } |
164 | 0 | for (int ch = 0xAE; ch <= 0xFF; ++ch) { // u'®' to u'ÿ' |
165 | 0 | assert(0 <= ch && ch < 256); |
166 | 0 | map[ch] = unicode_cpt_to_utf8(ch); |
167 | 0 | } |
168 | 0 | auto n = 0; |
169 | 0 | for (int ch = 0; ch < 256; ++ch) { |
170 | 0 | if (map.find(ch) == map.end()) { |
171 | 0 | map[ch] = unicode_cpt_to_utf8(256 + n); |
172 | 0 | ++n; |
173 | 0 | } |
174 | 0 | } |
175 | 0 | return map; |
176 | 0 | } |
177 | | |
178 | 0 | static std::unordered_map<std::string, uint8_t> unicode_utf8_to_byte_map() { |
179 | 0 | std::unordered_map<std::string, uint8_t> map; |
180 | 0 | for (int ch = 0x21; ch <= 0x7E; ++ch) { // u'!' to u'~' |
181 | 0 | assert(0 <= ch && ch < 256); |
182 | 0 | map[unicode_cpt_to_utf8(ch)] = ch; |
183 | 0 | } |
184 | 0 | for (int ch = 0xA1; ch <= 0xAC; ++ch) { // u'¡' to u'¬' |
185 | 0 | assert(0 <= ch && ch < 256); |
186 | 0 | map[unicode_cpt_to_utf8(ch)] = ch; |
187 | 0 | } |
188 | 0 | for (int ch = 0xAE; ch <= 0xFF; ++ch) { // u'®' to u'ÿ' |
189 | 0 | assert(0 <= ch && ch < 256); |
190 | 0 | map[unicode_cpt_to_utf8(ch)] = ch; |
191 | 0 | } |
192 | 0 | auto n = 0; |
193 | 0 | for (int ch = 0; ch < 256; ++ch) { |
194 | 0 | if (map.find(unicode_cpt_to_utf8(ch)) == map.end()) { |
195 | 0 | map[unicode_cpt_to_utf8(256 + n)] = ch; |
196 | 0 | ++n; |
197 | 0 | } |
198 | 0 | } |
199 | 0 | return map; |
200 | 0 | } |
201 | | |
202 | 0 | static inline std::wstring unicode_wstring_from_utf8(const std::string & s) { |
203 | 0 | #if defined(__clang__) |
204 | | // disable C++17 deprecation warning for std::codecvt_utf8 |
205 | 0 | # pragma clang diagnostic push |
206 | 0 | # pragma clang diagnostic ignored "-Wdeprecated-declarations" |
207 | | #elif defined(__GNUC__) |
208 | | # pragma GCC diagnostic push |
209 | | # pragma GCC diagnostic ignored "-Wdeprecated-declarations" |
210 | | #endif |
211 | |
|
212 | 0 | std::wstring_convert<std::codecvt_utf8<wchar_t>> conv; |
213 | |
|
214 | 0 | #if defined(__clang__) |
215 | 0 | # pragma clang diagnostic pop |
216 | | #elif defined(__GNUC__) |
217 | | # pragma GCC diagnostic pop |
218 | | #endif |
219 | |
|
220 | 0 | return conv.from_bytes(s); |
221 | 0 | } |
222 | | |
223 | 0 | static std::vector<std::string> unicode_byte_encoding_process(const std::vector<std::string> & bpe_words) { |
224 | 0 | std::vector<std::string> bpe_encoded_words; |
225 | 0 | for (const auto & word : bpe_words) { |
226 | 0 | std::string text_utf; |
227 | 0 | auto utf_word = unicode_cpts_from_utf8(word); |
228 | 0 | for (size_t i = 0; i < utf_word.size(); ++i) { |
229 | 0 | text_utf += unicode_cpt_to_utf8(utf_word[i]); |
230 | 0 | } |
231 | |
|
232 | 0 | std::string encoded_token; |
233 | 0 | for (char & c : text_utf) { |
234 | 0 | encoded_token += unicode_byte_to_utf8(c); |
235 | 0 | } |
236 | 0 | bpe_encoded_words.emplace_back(encoded_token); |
237 | 0 | } |
238 | 0 | return bpe_encoded_words; |
239 | 0 | } |
240 | | |
241 | | // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+ |
242 | 0 | static std::vector<size_t> unicode_regex_split_custom_gpt2(const std::string & text, const std::vector<size_t> & offsets) { |
243 | 0 | std::vector<size_t> bpe_offsets; // store the offset of each word |
244 | 0 | bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size |
245 | |
|
246 | 0 | const auto cpts = unicode_cpts_from_utf8(text); |
247 | |
|
248 | 0 | size_t start = 0; |
249 | 0 | for (auto offset : offsets) { |
250 | 0 | const size_t offset_ini = start; |
251 | 0 | const size_t offset_end = start + offset; |
252 | 0 | assert(offset_end <= cpts.size()); |
253 | 0 | start = offset_end; |
254 | |
|
255 | 0 | static const uint32_t OUT_OF_RANGE = 0xFFFFFFFF; |
256 | 0 | auto _get_cpt = [&] (const size_t pos) -> uint32_t { |
257 | 0 | return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : OUT_OF_RANGE; |
258 | 0 | }; |
259 | |
|
260 | 0 | auto _get_flags = [&] (const size_t pos) -> unicode_cpt_flags { |
261 | 0 | return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags_from_cpt(cpts[pos]) : unicode_cpt_flags{}; |
262 | 0 | }; |
263 | |
|
264 | 0 | size_t _prev_end = offset_ini; |
265 | 0 | auto _add_token = [&] (const size_t end) -> size_t { |
266 | 0 | assert(_prev_end <= end && end <= offset_end); |
267 | 0 | size_t len = end - _prev_end; |
268 | 0 | if (len > 0) { |
269 | 0 | bpe_offsets.push_back(len); |
270 | 0 | } |
271 | 0 | _prev_end = end; |
272 | | //if (len > 0) { |
273 | | // std::string s = ""; |
274 | | // for(size_t p = end-len; p < end; p++) |
275 | | // s += unicode_cpt_to_utf8(cpts[p]); |
276 | | // printf(">>> '%s'\n", s.c_str()); |
277 | | //} |
278 | 0 | return len; |
279 | 0 | }; |
280 | |
|
281 | 0 | for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) { |
282 | 0 | const uint32_t cpt = _get_cpt(pos); |
283 | 0 | const auto flags = _get_flags(pos); |
284 | | |
285 | | // regex: 's|'t|'re|'ve|'m|'ll|'d |
286 | 0 | if (cpt == '\'' && pos+1 < offset_end) { |
287 | 0 | uint32_t cpt_next = _get_cpt(pos+1); |
288 | 0 | if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') { |
289 | 0 | pos += _add_token(pos+2); |
290 | 0 | continue; |
291 | 0 | } |
292 | 0 | if (pos+2 < offset_end) { |
293 | 0 | uint32_t cpt_next_next = _get_cpt(pos+2); |
294 | 0 | if ((cpt_next == 'r' && cpt_next_next == 'e') || |
295 | 0 | (cpt_next == 'v' && cpt_next_next == 'e') || |
296 | 0 | (cpt_next == 'l' && cpt_next_next == 'l')) { |
297 | 0 | pos += _add_token(pos+3); |
298 | 0 | continue; |
299 | 0 | } |
300 | 0 | } |
301 | 0 | } |
302 | | |
303 | 0 | auto flags2 = (cpt == ' ' ? _get_flags(pos+1) : flags); |
304 | | // regex: <space>?\p{L}+ |
305 | 0 | if (flags2.is_letter) { |
306 | 0 | pos += (cpt == ' '); |
307 | 0 | while (flags2.is_letter) { |
308 | 0 | flags2 = _get_flags(++pos); |
309 | 0 | } |
310 | 0 | _add_token(pos); |
311 | 0 | continue; |
312 | 0 | } |
313 | | // regex: <space>?\p{N}+ |
314 | 0 | if (flags2.is_number) { |
315 | 0 | pos += (cpt == ' '); |
316 | 0 | while (flags2.is_number) { |
317 | 0 | flags2 = _get_flags(++pos); |
318 | 0 | } |
319 | 0 | _add_token(pos); |
320 | 0 | continue; |
321 | 0 | } |
322 | | // regex: <space>?[^\s\p{L}\p{N}]+ |
323 | 0 | if (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags2.as_uint()) { |
324 | 0 | pos += (cpt == ' '); |
325 | 0 | while (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags2.as_uint()) { |
326 | 0 | flags2 = _get_flags(++pos); |
327 | 0 | } |
328 | 0 | _add_token(pos); |
329 | 0 | continue; |
330 | 0 | } |
331 | | |
332 | 0 | size_t num_whitespaces = 0; |
333 | 0 | while (_get_flags(pos+num_whitespaces).is_whitespace) { |
334 | 0 | num_whitespaces++; |
335 | 0 | } |
336 | | |
337 | | // regex: \s+(?!\S) |
338 | 0 | if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != OUT_OF_RANGE) { |
339 | 0 | pos += num_whitespaces - 1; |
340 | 0 | _add_token(pos); |
341 | 0 | continue; |
342 | 0 | } |
343 | | |
344 | | // regex: \s+ |
345 | 0 | if (num_whitespaces > 0) { |
346 | 0 | pos += num_whitespaces; |
347 | 0 | _add_token(pos); |
348 | 0 | continue; |
349 | 0 | } |
350 | | |
351 | | // no matches |
352 | 0 | _add_token(++pos); |
353 | 0 | } |
354 | 0 | } |
355 | |
|
356 | 0 | return bpe_offsets; |
357 | 0 | } |
358 | | |
359 | | // LLAMA3 system regex: "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+" |
360 | 0 | static std::vector<size_t> unicode_regex_split_custom_llama3(const std::string & text, const std::vector<size_t> & offsets) { |
361 | 0 | std::vector<size_t> bpe_offsets; // store the offset of each word |
362 | 0 | bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size |
363 | |
|
364 | 0 | const auto cpts = unicode_cpts_from_utf8(text); |
365 | |
|
366 | 0 | size_t start = 0; |
367 | 0 | for (auto offset : offsets) { |
368 | 0 | const size_t offset_ini = start; |
369 | 0 | const size_t offset_end = start + offset; |
370 | 0 | assert(offset_end <= cpts.size()); |
371 | 0 | start = offset_end; |
372 | |
|
373 | 0 | static const uint32_t OUT_OF_RANGE = 0xFFFFFFFF; |
374 | 0 | auto _get_cpt = [&] (const size_t pos) -> uint32_t { |
375 | 0 | return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : OUT_OF_RANGE; |
376 | 0 | }; |
377 | |
|
378 | 0 | auto _get_flags = [&] (const size_t pos) -> unicode_cpt_flags { |
379 | 0 | return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags_from_cpt(cpts[pos]) : unicode_cpt_flags{}; |
380 | 0 | }; |
381 | |
|
382 | 0 | size_t _prev_end = offset_ini; |
383 | 0 | auto _add_token = [&] (const size_t end) -> size_t { |
384 | 0 | assert(_prev_end <= end && end <= offset_end); |
385 | 0 | size_t len = end - _prev_end; |
386 | 0 | if (len > 0) { |
387 | 0 | bpe_offsets.push_back(len); |
388 | 0 | } |
389 | 0 | _prev_end = end; |
390 | | //if (len > 0) { |
391 | | // std::string s = ""; |
392 | | // for(size_t p = end-len; p < end; p++) |
393 | | // s += unicode_cpt_to_utf8(cpts[p]); |
394 | | // printf(">>> '%s'\n", s.c_str()); |
395 | | //} |
396 | 0 | return len; |
397 | 0 | }; |
398 | |
|
399 | 0 | for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) { |
400 | 0 | const uint32_t cpt = _get_cpt(pos); |
401 | 0 | const auto flags = _get_flags(pos); |
402 | | |
403 | | // regex: (?i:'s|'t|'re|'ve|'m|'ll|'d) // case insensitive |
404 | 0 | if (cpt == '\'' && pos+1 < offset_end) { |
405 | 0 | uint32_t cpt_next = unicode_tolower(_get_cpt(pos+1)); |
406 | 0 | if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') { |
407 | 0 | pos += _add_token(pos+2); |
408 | 0 | continue; |
409 | 0 | } |
410 | 0 | if (pos+2 < offset_end) { |
411 | 0 | uint32_t cpt_next_next = unicode_tolower(_get_cpt(pos+2)); |
412 | 0 | if ((cpt_next == 'r' && cpt_next_next == 'e') || |
413 | 0 | (cpt_next == 'v' && cpt_next_next == 'e') || |
414 | 0 | (cpt_next == 'l' && cpt_next_next == 'l')) { |
415 | 0 | pos += _add_token(pos+3); |
416 | 0 | continue; |
417 | 0 | } |
418 | 0 | } |
419 | 0 | } |
420 | | |
421 | | // regex: [^\r\n\p{L}\p{N}]?\p{L}+ |
422 | 0 | if (!(cpt == '\r' || cpt == '\n' || flags.is_number)) { |
423 | 0 | if (flags.is_letter || _get_flags(pos+1).is_letter) { // one or more letters |
424 | 0 | pos++; |
425 | 0 | while (_get_flags(pos).is_letter) { |
426 | 0 | pos++; |
427 | 0 | } |
428 | 0 | _add_token(pos); |
429 | 0 | continue; |
430 | 0 | } |
431 | 0 | } |
432 | | |
433 | | // regex: \p{N}{1,3} |
434 | 0 | if (flags.is_number) { |
435 | 0 | size_t ini = pos; |
436 | 0 | while (_get_flags(pos).is_number) { |
437 | 0 | if (++pos - ini >= 3 ) { |
438 | 0 | _add_token(pos); |
439 | 0 | ini = pos; |
440 | 0 | } |
441 | 0 | } |
442 | 0 | _add_token(pos); |
443 | 0 | continue; |
444 | 0 | } |
445 | | |
446 | | // regex: <space>?[^\s\p{L}\p{N}]+[\r\n]* |
447 | 0 | auto flags2 = (cpt == ' ' ? _get_flags(pos+1) : flags); |
448 | 0 | if (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags.as_uint()) { |
449 | 0 | pos += (cpt == ' '); |
450 | 0 | while (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags2.as_uint()) { |
451 | 0 | flags2 = _get_flags(++pos); |
452 | 0 | } |
453 | 0 | uint32_t cpt2 = _get_cpt(pos); |
454 | 0 | while (cpt2 == '\r' || cpt2 == '\n') { |
455 | 0 | cpt2 = _get_cpt(++pos); |
456 | 0 | } |
457 | 0 | _add_token(pos); |
458 | 0 | continue; |
459 | 0 | } |
460 | | |
461 | 0 | size_t num_whitespaces = 0; |
462 | 0 | size_t last_end_r_or_n = 0; |
463 | 0 | while (_get_flags(pos+num_whitespaces).is_whitespace) { |
464 | 0 | uint32_t cpt2 = _get_cpt(pos+num_whitespaces); |
465 | 0 | if (cpt2 == '\r' || cpt2 == '\n') { |
466 | 0 | last_end_r_or_n = pos + num_whitespaces + 1; |
467 | 0 | } |
468 | 0 | num_whitespaces++; |
469 | 0 | } |
470 | | |
471 | | // regex: \s*[\r\n]+ |
472 | 0 | if (last_end_r_or_n > 0) { |
473 | 0 | pos = last_end_r_or_n; |
474 | 0 | _add_token(pos); |
475 | 0 | continue; |
476 | 0 | } |
477 | | |
478 | | // regex: \s+(?!\S) |
479 | 0 | if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != OUT_OF_RANGE) { |
480 | 0 | pos += num_whitespaces - 1; |
481 | 0 | _add_token(pos); |
482 | 0 | continue; |
483 | 0 | } |
484 | | |
485 | | // regex: \s+ |
486 | 0 | if (num_whitespaces > 0) { |
487 | 0 | pos += num_whitespaces; |
488 | 0 | _add_token(pos); |
489 | 0 | continue; |
490 | 0 | } |
491 | | |
492 | | // no matches |
493 | 0 | _add_token(++pos); |
494 | 0 | } |
495 | 0 | } |
496 | |
|
497 | 0 | return bpe_offsets; |
498 | 0 | } |
499 | | |
500 | | // use std::wregex to split the text |
501 | 0 | static std::vector<size_t> unicode_regex_split_stl(const std::wstring & wtext, const std::wstring & regex_expr, const std::vector<size_t> & offsets) { |
502 | 0 | std::wregex expr(regex_expr); |
503 | 0 | std::vector<size_t> bpe_offsets; // store the offset of each word |
504 | 0 | bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size |
505 | 0 | size_t start = 0; |
506 | 0 | for (auto offset : offsets) { |
507 | 0 | std::wcregex_iterator it(wtext.data() + start, wtext.data() + start + offset, expr); |
508 | 0 | std::wcregex_iterator end; |
509 | |
|
510 | 0 | int64_t start_idx = 0; |
511 | 0 | while (it != end) { |
512 | 0 | std::wcmatch match = *it; |
513 | 0 | if (match.position() > start_idx) { |
514 | 0 | bpe_offsets.emplace_back(match.position() - start_idx); |
515 | 0 | } |
516 | 0 | bpe_offsets.emplace_back(match.length()); |
517 | 0 | start_idx = match.position() + match.length(); |
518 | 0 | ++it; |
519 | 0 | } |
520 | |
|
521 | 0 | if (start_idx < (int64_t) offset) { |
522 | 0 | bpe_offsets.emplace_back(offset - start_idx); |
523 | 0 | } |
524 | 0 | start += offset; |
525 | 0 | } |
526 | |
|
527 | 0 | return bpe_offsets; |
528 | 0 | } |
529 | | |
530 | | // use std::regex to split the text |
531 | 0 | static std::vector<size_t> unicode_regex_split_stl(const std::string & text, const std::string & regex_expr, const std::vector<size_t> & offsets) { |
532 | 0 | std::regex expr(regex_expr); |
533 | 0 | std::vector<size_t> bpe_offsets; // store the offset of each word |
534 | 0 | bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size |
535 | 0 | size_t start = 0; |
536 | 0 | for (auto offset : offsets) { |
537 | 0 | std::cregex_iterator it(text.data() + start, text.data() + start + offset, expr); |
538 | 0 | std::cregex_iterator end; |
539 | |
|
540 | 0 | int64_t start_idx = 0; |
541 | 0 | while (it != end) { |
542 | 0 | std::cmatch match = *it; |
543 | 0 | if (match.position() > start_idx) { |
544 | 0 | bpe_offsets.emplace_back(match.position() - start_idx); |
545 | 0 | } |
546 | 0 | bpe_offsets.emplace_back(match.length()); |
547 | 0 | start_idx = match.position() + match.length(); |
548 | 0 | ++it; |
549 | 0 | } |
550 | |
|
551 | 0 | if (start_idx < (int64_t) offset) { |
552 | 0 | bpe_offsets.emplace_back(offset - start_idx); |
553 | 0 | } |
554 | 0 | start += offset; |
555 | 0 | } |
556 | |
|
557 | 0 | return bpe_offsets; |
558 | 0 | } |
559 | | |
560 | | // K2 system regex patterns (from tokenization_kimi.py): |
561 | | // [\p{Han}]+|[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]*[\p{Ll}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]+(?i:'s|'t|'re|'ve|'m|'ll|'d)?|[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]+[\p{Ll}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]*(?i:'s|'t|'re|'ve|'m|'ll|'d)?|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+ |
562 | 0 | static std::vector<size_t> unicode_regex_split_custom_kimi_k2(const std::string & text, const std::vector<size_t> & offsets) { |
563 | 0 | std::vector<size_t> bpe_offsets; |
564 | 0 | bpe_offsets.reserve(offsets.size()); |
565 | |
|
566 | 0 | const auto cpts = unicode_cpts_from_utf8(text); |
567 | |
|
568 | 0 | size_t start = 0; |
569 | 0 | for (auto offset : offsets) { |
570 | 0 | const size_t offset_ini = start; |
571 | 0 | const size_t offset_end = start + offset; |
572 | 0 | assert(offset_end <= cpts.size()); |
573 | 0 | start = offset_end; |
574 | |
|
575 | 0 | static const uint32_t OUT_OF_RANGE = 0xFFFFFFFF; |
576 | 0 | auto _get_cpt = [&] (const size_t pos) -> uint32_t { |
577 | 0 | return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : OUT_OF_RANGE; |
578 | 0 | }; |
579 | |
|
580 | 0 | auto _get_flags = [&] (const size_t pos) -> unicode_cpt_flags { |
581 | 0 | return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags_from_cpt(cpts[pos]) : unicode_cpt_flags{}; |
582 | 0 | }; |
583 | |
|
584 | 0 | size_t _prev_end = offset_ini; |
585 | 0 | auto _add_token = [&] (const size_t end) -> size_t { |
586 | 0 | assert(_prev_end <= end && end <= offset_end); |
587 | 0 | size_t len = end - _prev_end; |
588 | 0 | if (len > 0) { |
589 | 0 | bpe_offsets.push_back(len); |
590 | 0 | } |
591 | 0 | _prev_end = end; |
592 | 0 | return len; |
593 | 0 | }; |
594 | |
|
595 | 0 | for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) { |
596 | 0 | const uint32_t cpt = _get_cpt(pos); |
597 | 0 | const auto flags = _get_flags(pos); |
598 | | |
599 | | // Pattern 1: [\p{Han}]+ (Chinese characters) |
600 | 0 | if (unicode_cpt_is_han(cpt)) { |
601 | 0 | while (unicode_cpt_is_han(_get_cpt(pos))) { |
602 | 0 | pos++; |
603 | 0 | } |
604 | 0 | _add_token(pos); |
605 | 0 | continue; |
606 | 0 | } |
607 | | |
608 | | // Pattern 2 & 3: Letter words excluding Han characters with optional contractions |
609 | | // [^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]*[\p{Ll}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]+(?:'s|'t|'re|'ve|'m|'ll|'d)? |
610 | | // [^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]+[\p{Ll}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]*(?:'s|'t|'re|'ve|'m|'ll|'d)? |
611 | | // Check if current char is a letter OR if current char could be a leading char and next char is a letter |
612 | 0 | bool is_letter_pattern = (flags.is_letter && !unicode_cpt_is_han(cpt)) || |
613 | 0 | (!(cpt == '\r' || cpt == '\n' || flags.is_letter || flags.is_number) && |
614 | 0 | _get_flags(pos + 1).is_letter && !unicode_cpt_is_han(_get_cpt(pos + 1))); |
615 | |
|
616 | 0 | if (is_letter_pattern) { |
617 | | // Handle optional leading non-letter/non-number character |
618 | 0 | bool has_leading_char = false; |
619 | 0 | if (!(cpt == '\r' || cpt == '\n' || flags.is_letter || flags.is_number)) { |
620 | 0 | has_leading_char = true; |
621 | 0 | pos++; |
622 | 0 | } |
623 | | |
624 | | // Match letter sequence (excluding Han characters) |
625 | 0 | bool has_letters = false; |
626 | 0 | while (_get_flags(pos).is_letter && !unicode_cpt_is_han(_get_cpt(pos))) { |
627 | 0 | has_letters = true; |
628 | 0 | pos++; |
629 | 0 | } |
630 | | |
631 | | // Only proceed if we found letters (after potentially skipping leading char) |
632 | 0 | if (has_letters || (!has_leading_char && _get_flags(pos).is_letter && !unicode_cpt_is_han(_get_cpt(pos)))) { |
633 | 0 | if (!has_letters) pos++; // consume the first letter if we didn't already |
634 | | |
635 | | // Continue consuming letters |
636 | 0 | while (_get_flags(pos).is_letter && !unicode_cpt_is_han(_get_cpt(pos))) { |
637 | 0 | pos++; |
638 | 0 | } |
639 | | |
640 | | // Check for optional contractions (?:'s|'t|'re|'ve|'m|'ll|'d) |
641 | 0 | if (_get_cpt(pos) == '\'' && pos + 1 < offset_end) { |
642 | 0 | uint32_t cpt_next = unicode_tolower(_get_cpt(pos + 1)); |
643 | 0 | if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') { |
644 | 0 | pos += 2; |
645 | 0 | } else if (pos + 2 < offset_end) { |
646 | 0 | uint32_t cpt_next_next = unicode_tolower(_get_cpt(pos + 2)); |
647 | 0 | if ((cpt_next == 'r' && cpt_next_next == 'e') || |
648 | 0 | (cpt_next == 'v' && cpt_next_next == 'e') || |
649 | 0 | (cpt_next == 'l' && cpt_next_next == 'l')) { |
650 | 0 | pos += 3; |
651 | 0 | } |
652 | 0 | } |
653 | 0 | } |
654 | |
|
655 | 0 | _add_token(pos); |
656 | 0 | continue; |
657 | 0 | } else if (has_leading_char) { |
658 | | // We consumed a leading char but found no letters, backtrack |
659 | 0 | pos--; |
660 | 0 | } |
661 | 0 | } |
662 | | |
663 | | // Pattern 4: \p{N}{1,3} (numbers 1-3 digits) |
664 | 0 | if (flags.is_number) { |
665 | 0 | size_t ini = pos; |
666 | 0 | while (_get_flags(pos).is_number) { |
667 | 0 | if (++pos - ini >= 3) { |
668 | 0 | _add_token(pos); |
669 | 0 | ini = pos; |
670 | 0 | } |
671 | 0 | } |
672 | 0 | _add_token(pos); |
673 | 0 | continue; |
674 | 0 | } |
675 | | |
676 | | // Pattern 5: ?[^\s\p{L}\p{N}]+[\r\n]* (optional space + non-word chars + optional newlines) |
677 | 0 | auto flags2 = (cpt == ' ' ? _get_flags(pos + 1) : flags); |
678 | 0 | if (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number) && flags2.as_uint()) { |
679 | 0 | pos += (cpt == ' '); |
680 | 0 | while (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number) && flags2.as_uint()) { |
681 | 0 | flags2 = _get_flags(++pos); |
682 | 0 | } |
683 | | // Match optional [\r\n]* |
684 | 0 | uint32_t cpt2 = _get_cpt(pos); |
685 | 0 | while (cpt2 == '\r' || cpt2 == '\n') { |
686 | 0 | cpt2 = _get_cpt(++pos); |
687 | 0 | } |
688 | 0 | _add_token(pos); |
689 | 0 | continue; |
690 | 0 | } |
691 | | |
692 | | // Count whitespace characters |
693 | 0 | size_t num_whitespaces = 0; |
694 | 0 | size_t last_end_r_or_n = 0; |
695 | 0 | while (_get_flags(pos + num_whitespaces).is_whitespace) { |
696 | 0 | uint32_t cpt2 = _get_cpt(pos + num_whitespaces); |
697 | 0 | if (cpt2 == '\r' || cpt2 == '\n') { |
698 | 0 | last_end_r_or_n = pos + num_whitespaces + 1; |
699 | 0 | } |
700 | 0 | num_whitespaces++; |
701 | 0 | } |
702 | | |
703 | | // Pattern 6: \s*[\r\n]+ (whitespace with newlines) |
704 | 0 | if (last_end_r_or_n > 0) { |
705 | 0 | pos = last_end_r_or_n; |
706 | 0 | _add_token(pos); |
707 | 0 | continue; |
708 | 0 | } |
709 | | |
710 | | // Pattern 7: \s+(?!\S) (trailing whitespace) |
711 | 0 | if (num_whitespaces > 1 && _get_cpt(pos + num_whitespaces) != OUT_OF_RANGE) { |
712 | 0 | pos += num_whitespaces - 1; |
713 | 0 | _add_token(pos); |
714 | 0 | continue; |
715 | 0 | } |
716 | | |
717 | | // Pattern 8: \s+ (general whitespace) |
718 | 0 | if (num_whitespaces > 0) { |
719 | 0 | pos += num_whitespaces; |
720 | 0 | _add_token(pos); |
721 | 0 | continue; |
722 | 0 | } |
723 | | |
724 | | // No matches - consume single character |
725 | 0 | _add_token(++pos); |
726 | 0 | } |
727 | 0 | } |
728 | |
|
729 | 0 | return bpe_offsets; |
730 | 0 | } |
731 | | |
732 | | // AFMOE digit handling: splits digits with leading 1-2 based on total length modulo 3 |
733 | 0 | static std::vector<size_t> unicode_regex_split_custom_afmoe(const std::string & text, const std::vector<size_t> & offsets) { |
734 | 0 | std::vector<size_t> bpe_offsets; |
735 | 0 | bpe_offsets.reserve(offsets.size()); |
736 | |
|
737 | 0 | const auto cpts = unicode_cpts_from_utf8(text); |
738 | |
|
739 | 0 | size_t start = 0; |
740 | 0 | for (auto offset : offsets) { |
741 | 0 | const size_t offset_ini = start; |
742 | 0 | const size_t offset_end = start + offset; |
743 | 0 | assert(offset_end <= cpts.size()); |
744 | 0 | start = offset_end; |
745 | |
|
746 | 0 | auto _get_flags = [&] (const size_t pos) -> unicode_cpt_flags { |
747 | 0 | return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags_from_cpt(cpts[pos]) : unicode_cpt_flags{}; |
748 | 0 | }; |
749 | |
|
750 | 0 | size_t _prev_end = offset_ini; |
751 | 0 | auto _add_token = [&] (const size_t end) -> size_t { |
752 | 0 | assert(_prev_end <= end && end <= offset_end); |
753 | 0 | size_t len = end - _prev_end; |
754 | 0 | if (len > 0) { |
755 | 0 | bpe_offsets.push_back(len); |
756 | 0 | } |
757 | 0 | _prev_end = end; |
758 | 0 | return len; |
759 | 0 | }; |
760 | |
|
761 | 0 | for (size_t pos = offset_ini; pos < offset_end; ) { |
762 | 0 | const auto flags = _get_flags(pos); |
763 | | |
764 | | // Handle digit sequences with special splitting logic |
765 | 0 | if (flags.is_number) { |
766 | 0 | size_t digit_start = pos; |
767 | 0 | size_t digit_count = 0; |
768 | | |
769 | | // Count consecutive digits |
770 | 0 | while (_get_flags(pos).is_number && pos < offset_end) { |
771 | 0 | digit_count++; |
772 | 0 | pos++; |
773 | 0 | } |
774 | | |
775 | | // Split based on total length modulo 3 |
776 | 0 | size_t remainder = digit_count % 3; |
777 | 0 | size_t current = digit_start; |
778 | | |
779 | | // Emit leading 1-2 digits if needed |
780 | 0 | if (remainder > 0) { |
781 | 0 | _add_token(current + remainder); |
782 | 0 | current += remainder; |
783 | 0 | } |
784 | | |
785 | | // Emit groups of 3 |
786 | 0 | while (current < digit_start + digit_count) { |
787 | 0 | _add_token(current + 3); |
788 | 0 | current += 3; |
789 | 0 | } |
790 | 0 | continue; |
791 | 0 | } |
792 | | |
793 | | // For non-digits, just move forward |
794 | 0 | pos++; |
795 | 0 | } |
796 | | |
797 | | // Add any remaining content |
798 | 0 | if (_prev_end < offset_end) { |
799 | 0 | _add_token(offset_end); |
800 | 0 | } |
801 | 0 | } |
802 | |
|
803 | 0 | return bpe_offsets; |
804 | 0 | } |
805 | | |
806 | 0 | static std::vector<size_t> unicode_regex_split_custom(const std::string & text, const std::string & regex_expr, const std::vector<size_t> & offsets) { |
807 | 0 | std::vector<size_t> bpe_offsets; |
808 | |
|
809 | 0 | if (regex_expr == "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)") { |
810 | 0 | bpe_offsets = unicode_regex_split_custom_gpt2(text, offsets); |
811 | 0 | } else if ( |
812 | 0 | regex_expr == "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" || |
813 | 0 | regex_expr == "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+") { |
814 | |
|
815 | 0 | bpe_offsets = unicode_regex_split_custom_llama3(text, offsets); |
816 | 0 | } else if (regex_expr == "\\p{Han}+") { |
817 | | // K2's first pattern - handle all K2 patterns together |
818 | 0 | bpe_offsets = unicode_regex_split_custom_kimi_k2(text, offsets); |
819 | 0 | } else if (regex_expr == "\\p{AFMoE_digits}") { |
820 | | // AFMOE digit pattern - use custom implementation for proper splitting |
821 | 0 | bpe_offsets = unicode_regex_split_custom_afmoe(text, offsets); |
822 | 0 | } |
823 | |
|
824 | 0 | return bpe_offsets; |
825 | 0 | } |
826 | | |
827 | | // |
828 | | // interface |
829 | | // |
830 | | |
831 | 0 | std::string unicode_cpt_to_utf8(uint32_t cpt) { |
832 | 0 | std::string result; |
833 | |
|
834 | 0 | if (/* 0x00 <= cpt && */ cpt <= 0x7f) { |
835 | 0 | result.push_back(cpt); |
836 | 0 | return result; |
837 | 0 | } |
838 | 0 | if (0x80 <= cpt && cpt <= 0x7ff) { |
839 | 0 | result.push_back(0xc0 | ((cpt >> 6) & 0x1f)); |
840 | 0 | result.push_back(0x80 | (cpt & 0x3f)); |
841 | 0 | return result; |
842 | 0 | } |
843 | 0 | if (0x800 <= cpt && cpt <= 0xffff) { |
844 | 0 | result.push_back(0xe0 | ((cpt >> 12) & 0x0f)); |
845 | 0 | result.push_back(0x80 | ((cpt >> 6) & 0x3f)); |
846 | 0 | result.push_back(0x80 | (cpt & 0x3f)); |
847 | 0 | return result; |
848 | 0 | } |
849 | 0 | if (0x10000 <= cpt && cpt <= 0x10ffff) { |
850 | 0 | result.push_back(0xf0 | ((cpt >> 18) & 0x07)); |
851 | 0 | result.push_back(0x80 | ((cpt >> 12) & 0x3f)); |
852 | 0 | result.push_back(0x80 | ((cpt >> 6) & 0x3f)); |
853 | 0 | result.push_back(0x80 | (cpt & 0x3f)); |
854 | 0 | return result; |
855 | 0 | } |
856 | | |
857 | 0 | throw std::invalid_argument("invalid codepoint"); |
858 | 0 | } |
859 | | |
860 | 0 | std::vector<uint32_t> unicode_cpts_normalize_nfd(const std::vector<uint32_t> & cpts) { |
861 | 0 | auto comp = [] (const uint32_t cpt, const range_nfd & range) { |
862 | 0 | return cpt < range.first; |
863 | 0 | }; |
864 | 0 | std::vector<uint32_t> result(cpts.size()); |
865 | 0 | for (size_t i = 0; i < cpts.size(); ++i) { |
866 | 0 | const uint32_t cpt = cpts[i]; |
867 | 0 | auto it = std::upper_bound(unicode_ranges_nfd.begin(), unicode_ranges_nfd.end(), cpt, comp) - 1; |
868 | 0 | result[i] = (it->first <= cpt && cpt <= it->last) ? it->nfd : cpt; |
869 | 0 | } |
870 | 0 | return result; |
871 | 0 | } |
872 | | |
873 | 0 | std::vector<uint32_t> unicode_cpts_from_utf8(const std::string & utf8) { |
874 | 0 | std::vector<uint32_t> result; |
875 | 0 | result.reserve(utf8.size()); |
876 | 0 | size_t offset = 0; |
877 | 0 | while (offset < utf8.size()) { |
878 | 0 | try { |
879 | 0 | result.push_back(unicode_cpt_from_utf8(utf8, offset)); |
880 | 0 | } |
881 | 0 | catch (const std::invalid_argument & /*ex*/) { |
882 | | // Silently ignore invalid UTF-8 input to avoid leaking the exception beyond llama_tokenize |
883 | 0 | ++offset; |
884 | 0 | result.emplace_back(0xFFFD); // replacement character |
885 | 0 | } |
886 | 0 | } |
887 | 0 | return result; |
888 | 0 | } |
889 | | |
890 | 0 | unicode_cpt_flags unicode_cpt_flags_from_cpt(const uint32_t cpt) { |
891 | 0 | static const unicode_cpt_flags undef(unicode_cpt_flags::UNDEFINED); |
892 | 0 | static const auto cpt_flags = unicode_cpt_flags_array(); |
893 | 0 | return cpt < cpt_flags.size() ? cpt_flags[cpt] : undef; |
894 | 0 | } |
895 | | |
896 | 0 | unicode_cpt_flags unicode_cpt_flags_from_utf8(const std::string & utf8) { |
897 | 0 | static const unicode_cpt_flags undef(unicode_cpt_flags::UNDEFINED); |
898 | 0 | if (utf8.empty()) { |
899 | 0 | return undef; // undefined |
900 | 0 | } |
901 | 0 | size_t offset = 0; |
902 | 0 | return unicode_cpt_flags_from_cpt(unicode_cpt_from_utf8(utf8, offset)); |
903 | 0 | } |
904 | | |
905 | 0 | std::string unicode_byte_to_utf8(uint8_t byte) { |
906 | 0 | static std::unordered_map<uint8_t, std::string> map = unicode_byte_to_utf8_map(); |
907 | 0 | return map.at(byte); |
908 | 0 | } |
909 | | |
910 | 0 | uint8_t unicode_utf8_to_byte(const std::string & utf8) { |
911 | 0 | static std::unordered_map<std::string, uint8_t> map = unicode_utf8_to_byte_map(); |
912 | 0 | return map.at(utf8); |
913 | 0 | } |
914 | | |
915 | 0 | uint32_t unicode_tolower(uint32_t cpt) { |
916 | | // binary search |
917 | 0 | auto it = std::lower_bound(unicode_map_lowercase.begin(), unicode_map_lowercase.end(), cpt, |
918 | 0 | [](const std::pair<uint32_t, uint32_t> & pair, uint32_t value) { |
919 | 0 | return pair.first < value; |
920 | 0 | }); |
921 | 0 | if (it != unicode_map_lowercase.end() && it->first == cpt) { |
922 | 0 | return it->second; |
923 | 0 | } |
924 | 0 | return cpt; // Return the original code point if no lowercase mapping is found |
925 | 0 | } |
926 | | |
927 | 0 | bool unicode_cpt_is_han(uint32_t cpt) { |
928 | | // Han character ranges (Chinese/CJK characters) |
929 | | // CJK Unified Ideographs (most common) |
930 | 0 | if (cpt >= 0x4E00 && cpt <= 0x9FFF) return true; |
931 | | |
932 | | // CJK Extension A |
933 | 0 | if (cpt >= 0x3400 && cpt <= 0x4DBF) return true; |
934 | | |
935 | | // CJK Extension B |
936 | 0 | if (cpt >= 0x20000 && cpt <= 0x2A6DF) return true; |
937 | | |
938 | | // CJK Extension C |
939 | 0 | if (cpt >= 0x2A700 && cpt <= 0x2B73F) return true; |
940 | | |
941 | | // CJK Extension D |
942 | 0 | if (cpt >= 0x2B740 && cpt <= 0x2B81F) return true; |
943 | | |
944 | | // CJK Extension E |
945 | 0 | if (cpt >= 0x2B820 && cpt <= 0x2CEAF) return true; |
946 | | |
947 | | // CJK Extension F |
948 | 0 | if (cpt >= 0x2CEB0 && cpt <= 0x2EBEF) return true; |
949 | | |
950 | | // CJK Compatibility Ideographs |
951 | 0 | if (cpt >= 0xF900 && cpt <= 0xFAFF) return true; |
952 | | |
953 | | // CJK Compatibility Ideographs Supplement |
954 | 0 | if (cpt >= 0x2F800 && cpt <= 0x2FA1F) return true; |
955 | | |
956 | 0 | return false; |
957 | 0 | } |
958 | | |
959 | 0 | std::vector<std::string> unicode_regex_split(const std::string & text, const std::vector<std::string> & regex_exprs) { |
960 | | // unicode categories |
961 | 0 | static const std::map<std::string, int> k_ucat_enum = { |
962 | 0 | { "\\p{N}", unicode_cpt_flags::NUMBER }, |
963 | 0 | { "\\p{L}", unicode_cpt_flags::LETTER }, |
964 | 0 | { "\\p{P}", unicode_cpt_flags::PUNCTUATION }, |
965 | 0 | { "\\p{M}", unicode_cpt_flags::ACCENT_MARK }, |
966 | 0 | { "\\p{S}", unicode_cpt_flags::SYMBOL }, |
967 | 0 | }; |
968 | |
|
969 | 0 | static const std::map<int, int> k_ucat_cpt = { |
970 | 0 | { unicode_cpt_flags::NUMBER, 0xD1 }, |
971 | 0 | { unicode_cpt_flags::LETTER, 0xD2 }, |
972 | 0 | { unicode_cpt_flags::PUNCTUATION, 0xD3 }, |
973 | 0 | { unicode_cpt_flags::ACCENT_MARK, 0xD4 }, |
974 | 0 | { unicode_cpt_flags::SYMBOL, 0xD5 }, |
975 | 0 | }; |
976 | |
|
977 | 0 | static const std::map<int, std::string> k_ucat_map = { |
978 | 0 | { unicode_cpt_flags::NUMBER, "\x30-\x39" }, // 0-9 |
979 | 0 | { unicode_cpt_flags::LETTER, "\x41-\x5A\x61-\x7A" }, // A-Za-z |
980 | 0 | { unicode_cpt_flags::PUNCTUATION, "\x21-\x23\x25-\x2A\x2C-\x2F\x3A-\x3B\x3F-\x40\\\x5B-\\\x5D\x5F\\\x7B\\\x7D" }, // !-#%-*,-/:-;?-@\[-\]_\{\} |
981 | 0 | { unicode_cpt_flags::ACCENT_MARK, "" }, // no sub-128 codepoints |
982 | 0 | { unicode_cpt_flags::SYMBOL, "\\\x24\\\x2B\x3C-\x3E\x5E\x60\\\x7C" }, // $+<=>^`| |
983 | 0 | }; |
984 | | |
985 | | // compute collapsed codepoints only if needed by at least one regex |
986 | 0 | bool need_collapse = false; |
987 | 0 | for (const auto & regex_expr : regex_exprs) { |
988 | | // search for unicode categories |
989 | 0 | for (const auto & ucat : k_ucat_enum) { |
990 | 0 | if (std::string::npos != regex_expr.find(ucat.first)) { |
991 | 0 | need_collapse = true; |
992 | 0 | break; |
993 | 0 | } |
994 | 0 | } |
995 | 0 | } |
996 | |
|
997 | 0 | const auto cpts = unicode_cpts_from_utf8(text); |
998 | | |
999 | | // generate a "collapsed" representation of the text, where all codepoints are replaced by a single byte |
1000 | | // ref: https://github.com/ggml-org/llama.cpp/pull/6920#issuecomment-2081479935 |
1001 | 0 | std::string text_collapsed; |
1002 | 0 | if (need_collapse) { |
1003 | | // collapse all unicode categories |
1004 | 0 | text_collapsed.resize(cpts.size()); |
1005 | |
|
1006 | 0 | for (size_t i = 0; i < cpts.size(); ++i) { |
1007 | | // keep single-byte codepoints as is |
1008 | 0 | if (cpts[i] < 128) { |
1009 | 0 | text_collapsed[i] = cpts[i]; |
1010 | 0 | continue; |
1011 | 0 | } |
1012 | | |
1013 | 0 | const auto flags = unicode_cpt_flags_from_cpt(cpts[i]); |
1014 | |
|
1015 | 0 | if (flags.is_whitespace) { |
1016 | | //NOTE: C++ std::regex \s does not mach 0x85, Rust and Python regex does. |
1017 | | //text_collapsed[i] = (char) 0x85; // <Next Line> as whitespace fallback |
1018 | 0 | text_collapsed[i] = (char) 0x0B; // <vertical tab> as whitespace fallback |
1019 | 0 | } else if (k_ucat_cpt.find(flags.category_flag()) != k_ucat_cpt.end()) { |
1020 | 0 | text_collapsed[i] = k_ucat_cpt.at(flags.category_flag()); |
1021 | 0 | } else { |
1022 | 0 | text_collapsed[i] = (char) 0xD0; // fallback |
1023 | 0 | } |
1024 | 0 | } |
1025 | 0 | } |
1026 | |
|
1027 | 0 | std::vector<size_t> bpe_offsets = { cpts.size() }; |
1028 | |
|
1029 | 0 | for (const auto & regex_expr : regex_exprs) { |
1030 | | // first, see if we have an efficient custom regex implementation |
1031 | 0 | auto tmp = unicode_regex_split_custom(text, regex_expr, bpe_offsets); |
1032 | |
|
1033 | 0 | if (!tmp.empty()) { |
1034 | 0 | bpe_offsets = std::move(tmp); |
1035 | 0 | continue; |
1036 | 0 | } |
1037 | | |
1038 | | // fallback to general-purpose std::regex / std::wregex |
1039 | 0 | try { |
1040 | | // if a unicode category is used in the regex, we use the collapsed text and replace the unicode category |
1041 | | // with the corresponding collapsed representation |
1042 | 0 | bool use_collapsed = false; |
1043 | 0 | for (const auto & ucat : k_ucat_enum) { |
1044 | 0 | if (std::string::npos != regex_expr.find(ucat.first)) { |
1045 | 0 | use_collapsed = true; |
1046 | 0 | break; |
1047 | 0 | } |
1048 | 0 | } |
1049 | |
|
1050 | 0 | if (use_collapsed) { |
1051 | | // sanity-check that the original regex does not contain any non-ASCII characters |
1052 | 0 | const auto cpts_regex = unicode_cpts_from_utf8(regex_expr); |
1053 | 0 | for (size_t i = 0; i < cpts_regex.size(); ++i) { |
1054 | 0 | if (cpts_regex[i] >= 128) { |
1055 | 0 | throw std::runtime_error("Regex includes both unicode categories and non-ASCII characters - not supported"); |
1056 | 0 | } |
1057 | 0 | } |
1058 | | |
1059 | | // generate a collapsed representation of the regex |
1060 | 0 | std::string regex_expr_collapsed; |
1061 | | |
1062 | | // track if we are inside [], because nested [] are not allowed |
1063 | 0 | bool inside = false; |
1064 | 0 | for (size_t i = 0; i < regex_expr.size(); ++i) { |
1065 | 0 | if (regex_expr[i] == '[' && (i == 0 || regex_expr[i - 1] != '\\')) { |
1066 | 0 | regex_expr_collapsed += '['; |
1067 | 0 | inside = true; |
1068 | 0 | continue; |
1069 | 0 | } |
1070 | | |
1071 | 0 | if (inside && regex_expr[i] == ']' && regex_expr[i - 1] != '\\') { |
1072 | 0 | regex_expr_collapsed += ']'; |
1073 | 0 | inside = false; |
1074 | 0 | continue; |
1075 | 0 | } |
1076 | | |
1077 | 0 | if (regex_expr[i + 0] == '\\' && i + 4 < regex_expr.size() && |
1078 | 0 | regex_expr[i + 1] == 'p' && |
1079 | 0 | regex_expr[i + 2] == '{' && |
1080 | 0 | regex_expr[i + 4] == '}') { |
1081 | 0 | const std::string pat = regex_expr.substr(i, 5); |
1082 | 0 | if (k_ucat_enum.find(pat) != k_ucat_enum.end()) { |
1083 | 0 | if (!inside) { |
1084 | 0 | regex_expr_collapsed += '['; |
1085 | 0 | } |
1086 | 0 | regex_expr_collapsed += k_ucat_cpt.at(k_ucat_enum.at(pat)); |
1087 | 0 | regex_expr_collapsed += k_ucat_map.at(k_ucat_enum.at(pat)); |
1088 | 0 | if (!inside) { |
1089 | 0 | regex_expr_collapsed += ']'; |
1090 | 0 | } |
1091 | 0 | i += 4; |
1092 | 0 | continue; |
1093 | 0 | } |
1094 | 0 | } |
1095 | | |
1096 | 0 | regex_expr_collapsed += regex_expr[i]; |
1097 | 0 | } |
1098 | | |
1099 | | //printf("text_collapsed: %s\n", text_collapsed.c_str()); |
1100 | | //printf("regex_expr_collapsed: %s\n", regex_expr_collapsed.c_str()); |
1101 | 0 | bpe_offsets = unicode_regex_split_stl(text_collapsed, regex_expr_collapsed, bpe_offsets); |
1102 | 0 | } else { |
1103 | | // no unicode category used, we can use std::wregex directly |
1104 | 0 | const std::wstring wregex_expr = unicode_wstring_from_utf8(regex_expr); |
1105 | | |
1106 | | // std::wregex \s does not mach non-ASCII whitespaces, using 0x0B as fallback |
1107 | 0 | std::wstring wtext(cpts.begin(), cpts.end()); |
1108 | 0 | for (size_t i = 0; i < wtext.size(); ++i) { |
1109 | 0 | if (wtext[i] > 0x7F && unicode_cpt_flags_from_cpt(wtext[i]).is_whitespace) { |
1110 | 0 | wtext[i] = 0x0B; |
1111 | 0 | } |
1112 | 0 | } |
1113 | | |
1114 | | //printf("text: %s\n", text.c_str()); |
1115 | | //printf("regex_expr: %s\n", regex_expr.c_str()); |
1116 | 0 | bpe_offsets = unicode_regex_split_stl(wtext, wregex_expr, bpe_offsets); |
1117 | 0 | } |
1118 | 0 | } catch (std::regex_error & e) { |
1119 | 0 | fprintf(stderr, "Failed to process regex: '%s'\n", regex_expr.c_str()); |
1120 | 0 | fprintf(stderr, "Regex error: %s\n", e.what()); |
1121 | 0 | throw std::runtime_error("Failed to process regex"); |
1122 | 0 | } |
1123 | 0 | } |
1124 | | |
1125 | 0 | std::vector<std::string> bpe_words; |
1126 | 0 | bpe_words.reserve(bpe_offsets.size()); // reserve memory for the approximate size |
1127 | |
|
1128 | 0 | size_t start = 0; |
1129 | 0 | for (size_t & offset : bpe_offsets) { |
1130 | 0 | bpe_words.emplace_back(); |
1131 | 0 | for (size_t i = start; i < start + offset; ++i) { |
1132 | 0 | bpe_words.back() += unicode_cpt_to_utf8(cpts[i]); |
1133 | 0 | } |
1134 | 0 | start += offset; |
1135 | 0 | } |
1136 | |
|
1137 | 0 | return unicode_byte_encoding_process(bpe_words); |
1138 | 0 | } |