Coverage Report

Created: 2025-12-07 07:03

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Parser/lexer/lexer.c
Line
Count
Source
1
#include "Python.h"
2
#include "pycore_token.h"
3
#include "pycore_unicodeobject.h"
4
#include "errcode.h"
5
6
#include "state.h"
7
#include "../tokenizer/helpers.h"
8
9
/* Alternate tab spacing */
10
1.29k
#define ALTTABSIZE 1
11
12
1.75M
#define is_potential_identifier_start(c) (\
13
1.75M
              (c >= 'a' && c <= 'z')\
14
1.75M
               || (c >= 'A' && c <= 'Z')\
15
1.75M
               || c == '_'\
16
1.75M
               || (c >= 128))
17
18
2.73M
#define is_potential_identifier_char(c) (\
19
2.73M
              (c >= 'a' && c <= 'z')\
20
2.73M
               || (c >= 'A' && c <= 'Z')\
21
2.73M
               || (c >= '0' && c <= '9')\
22
2.73M
               || c == '_'\
23
2.73M
               || (c >= 128))
24
25
#ifdef Py_DEBUG
26
static inline tokenizer_mode* TOK_GET_MODE(struct tok_state* tok) {
27
    assert(tok->tok_mode_stack_index >= 0);
28
    assert(tok->tok_mode_stack_index < MAXFSTRINGLEVEL);
29
    return &(tok->tok_mode_stack[tok->tok_mode_stack_index]);
30
}
31
static inline tokenizer_mode* TOK_NEXT_MODE(struct tok_state* tok) {
32
    assert(tok->tok_mode_stack_index >= 0);
33
    assert(tok->tok_mode_stack_index + 1 < MAXFSTRINGLEVEL);
34
    return &(tok->tok_mode_stack[++tok->tok_mode_stack_index]);
35
}
36
#else
37
1.87M
#define TOK_GET_MODE(tok) (&(tok->tok_mode_stack[tok->tok_mode_stack_index]))
38
16.5k
#define TOK_NEXT_MODE(tok) (&(tok->tok_mode_stack[++tok->tok_mode_stack_index]))
39
#endif
40
41
#define FTSTRING_MIDDLE(tok_mode) (tok_mode->string_kind == TSTRING ? TSTRING_MIDDLE : FSTRING_MIDDLE)
42
#define FTSTRING_END(tok_mode) (tok_mode->string_kind == TSTRING ? TSTRING_END : FSTRING_END)
43
37
#define TOK_GET_STRING_PREFIX(tok) (TOK_GET_MODE(tok)->string_kind == TSTRING ? 't' : 'f')
44
1.75M
#define MAKE_TOKEN(token_type) _PyLexer_token_setup(tok, token, token_type, p_start, p_end)
45
0
#define MAKE_TYPE_COMMENT_TOKEN(token_type, col_offset, end_col_offset) (\
46
0
                _PyLexer_type_comment_token_setup(tok, token, token_type, col_offset, end_col_offset, p_start, p_end))
47
48
/* Spaces in this constant are treated as "zero or more spaces or tabs" when
49
   tokenizing. */
50
static const char* type_comment_prefix = "# type: ";
51
52
static inline int
53
contains_null_bytes(const char* str, size_t size)
54
264k
{
55
264k
    return memchr(str, 0, size) != NULL;
56
264k
}
57
58
/* Get next char, updating state; error code goes into tok->done */
59
static int
60
tok_nextc(struct tok_state *tok)
61
11.5M
{
62
11.5M
    int rc;
63
11.8M
    for (;;) {
64
11.8M
        if (tok->cur != tok->inp) {
65
11.5M
            if ((unsigned int) tok->col_offset >= (unsigned int) INT_MAX) {
66
0
                tok->done = E_COLUMNOVERFLOW;
67
0
                return EOF;
68
0
            }
69
11.5M
            tok->col_offset++;
70
11.5M
            return Py_CHARMASK(*tok->cur++); /* Fast path */
71
11.5M
        }
72
312k
        if (tok->done != E_OK) {
73
32.2k
            return EOF;
74
32.2k
        }
75
280k
        rc = tok->underflow(tok);
76
#if defined(Py_DEBUG)
77
        if (tok->debug) {
78
            fprintf(stderr, "line[%d] = ", tok->lineno);
79
            _PyTokenizer_print_escape(stderr, tok->cur, tok->inp - tok->cur);
80
            fprintf(stderr, "  tok->done = %d\n", tok->done);
81
        }
82
#endif
83
280k
        if (!rc) {
84
16.2k
            tok->cur = tok->inp;
85
16.2k
            return EOF;
86
16.2k
        }
87
264k
        tok->line_start = tok->cur;
88
89
264k
        if (contains_null_bytes(tok->line_start, tok->inp - tok->line_start)) {
90
0
            _PyTokenizer_syntaxerror(tok, "source code cannot contain null bytes");
91
0
            tok->cur = tok->inp;
92
0
            return EOF;
93
0
        }
94
264k
    }
95
11.5M
    Py_UNREACHABLE();
96
11.5M
}
97
98
/* Back-up one character */
99
static void
100
tok_backup(struct tok_state *tok, int c)
101
3.71M
{
102
3.71M
    if (c != EOF) {
103
3.68M
        if (--tok->cur < tok->buf) {
104
0
            Py_FatalError("tokenizer beginning of buffer");
105
0
        }
106
3.68M
        if ((int)(unsigned char)*tok->cur != Py_CHARMASK(c)) {
107
0
            Py_FatalError("tok_backup: wrong character");
108
0
        }
109
3.68M
        tok->col_offset--;
110
3.68M
    }
111
3.71M
}
112
113
static int
114
23.2k
set_ftstring_expr(struct tok_state* tok, struct token *token, char c) {
115
23.2k
    assert(token != NULL);
116
23.2k
    assert(c == '}' || c == ':' || c == '!');
117
23.2k
    tokenizer_mode *tok_mode = TOK_GET_MODE(tok);
118
119
23.2k
    if (!(tok_mode->in_debug || tok_mode->string_kind == TSTRING) || token->metadata) {
120
12.6k
        return 0;
121
12.6k
    }
122
10.6k
    PyObject *res = NULL;
123
124
    // Look for a # character outside of string literals
125
10.6k
    int hash_detected = 0;
126
10.6k
    int in_string = 0;
127
10.6k
    char quote_char = 0;
128
129
1.04M
    for (Py_ssize_t i = 0; i < tok_mode->last_expr_size - tok_mode->last_expr_end; i++) {
130
1.03M
        char ch = tok_mode->last_expr_buffer[i];
131
132
        // Skip escaped characters
133
1.03M
        if (ch == '\\') {
134
18.3k
            i++;
135
18.3k
            continue;
136
18.3k
        }
137
138
        // Handle quotes
139
1.01M
        if (ch == '"' || ch == '\'') {
140
            // The following if/else block works becase there is an off number
141
            // of quotes in STRING tokens and the lexer only ever reaches this
142
            // function with valid STRING tokens.
143
            // For example: """hello"""
144
            // First quote: in_string = 1
145
            // Second quote: in_string = 0
146
            // Third quote: in_string = 1
147
179k
            if (!in_string) {
148
67.2k
                in_string = 1;
149
67.2k
                quote_char = ch;
150
67.2k
            }
151
112k
            else if (ch == quote_char) {
152
66.4k
                in_string = 0;
153
66.4k
            }
154
179k
            continue;
155
179k
        }
156
157
        // Check for # outside strings
158
838k
        if (ch == '#' && !in_string) {
159
891
            hash_detected = 1;
160
891
            break;
161
891
        }
162
838k
    }
163
    // If we found a # character in the expression, we need to handle comments
164
10.6k
    if (hash_detected) {
165
        // Allocate buffer for processed result
166
891
        char *result = (char *)PyMem_Malloc((tok_mode->last_expr_size - tok_mode->last_expr_end + 1) * sizeof(char));
167
891
        if (!result) {
168
0
            return -1;
169
0
        }
170
171
891
        Py_ssize_t i = 0;  // Input position
172
891
        Py_ssize_t j = 0;  // Output position
173
891
        in_string = 0;     // Whether we're in a string
174
891
        quote_char = 0;    // Current string quote char
175
176
        // Process each character
177
65.9k
        while (i < tok_mode->last_expr_size - tok_mode->last_expr_end) {
178
65.0k
            char ch = tok_mode->last_expr_buffer[i];
179
180
            // Handle string quotes
181
65.0k
            if (ch == '"' || ch == '\'') {
182
                // See comment above to understand this part
183
10.0k
                if (!in_string) {
184
3.97k
                    in_string = 1;
185
3.97k
                    quote_char = ch;
186
6.12k
                } else if (ch == quote_char) {
187
3.96k
                    in_string = 0;
188
3.96k
                }
189
10.0k
                result[j++] = ch;
190
10.0k
            }
191
            // Skip comments
192
54.9k
            else if (ch == '#' && !in_string) {
193
47.4k
                while (i < tok_mode->last_expr_size - tok_mode->last_expr_end &&
194
46.7k
                       tok_mode->last_expr_buffer[i] != '\n') {
195
46.3k
                    i++;
196
46.3k
                }
197
1.09k
                if (i < tok_mode->last_expr_size - tok_mode->last_expr_end) {
198
313
                    result[j++] = '\n';
199
313
                }
200
1.09k
            }
201
            // Copy other chars
202
53.8k
            else {
203
53.8k
                result[j++] = ch;
204
53.8k
            }
205
65.0k
            i++;
206
65.0k
        }
207
208
891
        result[j] = '\0';  // Null-terminate the result string
209
891
        res = PyUnicode_DecodeUTF8(result, j, NULL);
210
891
        PyMem_Free(result);
211
9.74k
    } else {
212
9.74k
        res = PyUnicode_DecodeUTF8(
213
9.74k
            tok_mode->last_expr_buffer,
214
9.74k
            tok_mode->last_expr_size - tok_mode->last_expr_end,
215
9.74k
            NULL
216
9.74k
        );
217
9.74k
    }
218
219
10.6k
    if (!res) {
220
0
        return -1;
221
0
    }
222
10.6k
    token->metadata = res;
223
10.6k
    return 0;
224
10.6k
}
225
226
int
227
_PyLexer_update_ftstring_expr(struct tok_state *tok, char cur)
228
61.7k
{
229
61.7k
    assert(tok->cur != NULL);
230
231
61.7k
    Py_ssize_t size = strlen(tok->cur);
232
61.7k
    tokenizer_mode *tok_mode = TOK_GET_MODE(tok);
233
234
61.7k
    switch (cur) {
235
0
       case 0:
236
0
            if (!tok_mode->last_expr_buffer || tok_mode->last_expr_end >= 0) {
237
0
                return 1;
238
0
            }
239
0
            char *new_buffer = PyMem_Realloc(
240
0
                tok_mode->last_expr_buffer,
241
0
                tok_mode->last_expr_size + size
242
0
            );
243
0
            if (new_buffer == NULL) {
244
0
                PyMem_Free(tok_mode->last_expr_buffer);
245
0
                goto error;
246
0
            }
247
0
            tok_mode->last_expr_buffer = new_buffer;
248
0
            strncpy(tok_mode->last_expr_buffer + tok_mode->last_expr_size, tok->cur, size);
249
0
            tok_mode->last_expr_size += size;
250
0
            break;
251
38.4k
        case '{':
252
38.4k
            if (tok_mode->last_expr_buffer != NULL) {
253
27.0k
                PyMem_Free(tok_mode->last_expr_buffer);
254
27.0k
            }
255
38.4k
            tok_mode->last_expr_buffer = PyMem_Malloc(size);
256
38.4k
            if (tok_mode->last_expr_buffer == NULL) {
257
0
                goto error;
258
0
            }
259
38.4k
            tok_mode->last_expr_size = size;
260
38.4k
            tok_mode->last_expr_end = -1;
261
38.4k
            strncpy(tok_mode->last_expr_buffer, tok->cur, size);
262
38.4k
            break;
263
18.2k
        case '}':
264
19.9k
        case '!':
265
19.9k
            tok_mode->last_expr_end = strlen(tok->start);
266
19.9k
            break;
267
3.38k
        case ':':
268
3.38k
            if (tok_mode->last_expr_end == -1) {
269
2.99k
               tok_mode->last_expr_end = strlen(tok->start);
270
2.99k
            }
271
3.38k
            break;
272
0
        default:
273
0
            Py_UNREACHABLE();
274
61.7k
    }
275
61.7k
    return 1;
276
0
error:
277
0
    tok->done = E_NOMEM;
278
0
    return 0;
279
61.7k
}
280
281
static int
282
lookahead(struct tok_state *tok, const char *test)
283
8.67k
{
284
8.67k
    const char *s = test;
285
8.67k
    int res = 0;
286
23.4k
    while (1) {
287
23.4k
        int c = tok_nextc(tok);
288
23.4k
        if (*s == 0) {
289
8.59k
            res = !is_potential_identifier_char(c);
290
8.59k
        }
291
14.8k
        else if (c == *s) {
292
14.7k
            s++;
293
14.7k
            continue;
294
14.7k
        }
295
296
8.67k
        tok_backup(tok, c);
297
23.4k
        while (s != test) {
298
14.7k
            tok_backup(tok, *--s);
299
14.7k
        }
300
8.67k
        return res;
301
23.4k
    }
302
8.67k
}
303
304
static int
305
97.3k
verify_end_of_number(struct tok_state *tok, int c, const char *kind) {
306
97.3k
    if (tok->tok_extra_tokens) {
307
        // When we are parsing extra tokens, we don't want to emit warnings
308
        // about invalid literals, because we want to be a bit more liberal.
309
0
        return 1;
310
0
    }
311
    /* Emit a deprecation warning only if the numeric literal is immediately
312
     * followed by one of keywords which can occur after a numeric literal
313
     * in valid code: "and", "else", "for", "if", "in", "is" and "or".
314
     * It allows to gradually deprecate existing valid code without adding
315
     * warning before error in most cases of invalid numeric literal (which
316
     * would be confusing and break existing tests).
317
     * Raise a syntax error with slightly better message than plain
318
     * "invalid syntax" if the numeric literal is immediately followed by
319
     * other keyword or identifier.
320
     */
321
97.3k
    int r = 0;
322
97.3k
    if (c == 'a') {
323
1.15k
        r = lookahead(tok, "nd");
324
1.15k
    }
325
96.2k
    else if (c == 'e') {
326
529
        r = lookahead(tok, "lse");
327
529
    }
328
95.7k
    else if (c == 'f') {
329
3.75k
        r = lookahead(tok, "or");
330
3.75k
    }
331
91.9k
    else if (c == 'i') {
332
1.50k
        int c2 = tok_nextc(tok);
333
1.50k
        if (c2 == 'f' || c2 == 'n' || c2 == 's') {
334
1.49k
            r = 1;
335
1.49k
        }
336
1.50k
        tok_backup(tok, c2);
337
1.50k
    }
338
90.4k
    else if (c == 'o') {
339
2.93k
        r = lookahead(tok, "r");
340
2.93k
    }
341
87.5k
    else if (c == 'n') {
342
304
        r = lookahead(tok, "ot");
343
304
    }
344
97.3k
    if (r) {
345
10.0k
        tok_backup(tok, c);
346
10.0k
        if (_PyTokenizer_parser_warn(tok, PyExc_SyntaxWarning,
347
10.0k
                "invalid %s literal", kind))
348
0
        {
349
0
            return 0;
350
0
        }
351
10.0k
        tok_nextc(tok);
352
10.0k
    }
353
87.3k
    else /* In future releases, only error will remain. */
354
87.3k
    if (c < 128 && is_potential_identifier_char(c)) {
355
183
        tok_backup(tok, c);
356
183
        _PyTokenizer_syntaxerror(tok, "invalid %s literal", kind);
357
183
        return 0;
358
183
    }
359
97.2k
    return 1;
360
97.3k
}
361
362
/* Verify that the identifier follows PEP 3131. */
363
static int
364
verify_identifier(struct tok_state *tok)
365
11.6k
{
366
11.6k
    if (tok->tok_extra_tokens) {
367
0
        return 1;
368
0
    }
369
11.6k
    PyObject *s;
370
11.6k
    if (tok->decoding_erred)
371
0
        return 0;
372
11.6k
    s = PyUnicode_DecodeUTF8(tok->start, tok->cur - tok->start, NULL);
373
11.6k
    if (s == NULL) {
374
2
        if (PyErr_ExceptionMatches(PyExc_UnicodeDecodeError)) {
375
2
            tok->done = E_DECODE;
376
2
        }
377
0
        else {
378
0
            tok->done = E_ERROR;
379
0
        }
380
2
        return 0;
381
2
    }
382
11.6k
    Py_ssize_t invalid = _PyUnicode_ScanIdentifier(s);
383
11.6k
    assert(invalid >= 0);
384
11.6k
    assert(PyUnicode_GET_LENGTH(s) > 0);
385
11.6k
    if (invalid < PyUnicode_GET_LENGTH(s)) {
386
625
        Py_UCS4 ch = PyUnicode_READ_CHAR(s, invalid);
387
625
        if (invalid + 1 < PyUnicode_GET_LENGTH(s)) {
388
            /* Determine the offset in UTF-8 encoded input */
389
413
            Py_SETREF(s, PyUnicode_Substring(s, 0, invalid + 1));
390
413
            if (s != NULL) {
391
413
                Py_SETREF(s, PyUnicode_AsUTF8String(s));
392
413
            }
393
413
            if (s == NULL) {
394
0
                tok->done = E_ERROR;
395
0
                return 0;
396
0
            }
397
413
            tok->cur = (char *)tok->start + PyBytes_GET_SIZE(s);
398
413
        }
399
625
        Py_DECREF(s);
400
625
        if (Py_UNICODE_ISPRINTABLE(ch)) {
401
325
            _PyTokenizer_syntaxerror(tok, "invalid character '%c' (U+%04X)", ch, ch);
402
325
        }
403
300
        else {
404
300
            _PyTokenizer_syntaxerror(tok, "invalid non-printable character U+%04X", ch);
405
300
        }
406
625
        return 0;
407
625
    }
408
11.0k
    Py_DECREF(s);
409
11.0k
    return 1;
410
11.6k
}
411
412
static int
413
tok_decimal_tail(struct tok_state *tok)
414
76.5k
{
415
76.5k
    int c;
416
417
77.0k
    while (1) {
418
219k
        do {
419
219k
            c = tok_nextc(tok);
420
219k
        } while (Py_ISDIGIT(c));
421
77.0k
        if (c != '_') {
422
76.5k
            break;
423
76.5k
        }
424
514
        c = tok_nextc(tok);
425
514
        if (!Py_ISDIGIT(c)) {
426
12
            tok_backup(tok, c);
427
12
            _PyTokenizer_syntaxerror(tok, "invalid decimal literal");
428
12
            return 0;
429
12
        }
430
514
    }
431
76.5k
    return c;
432
76.5k
}
433
434
static inline int
435
1.12k
tok_continuation_line(struct tok_state *tok) {
436
1.12k
    int c = tok_nextc(tok);
437
1.12k
    if (c == '\r') {
438
71
        c = tok_nextc(tok);
439
71
    }
440
1.12k
    if (c != '\n') {
441
58
        tok->done = E_LINECONT;
442
58
        return -1;
443
58
    }
444
1.06k
    c = tok_nextc(tok);
445
1.06k
    if (c == EOF) {
446
55
        tok->done = E_EOF;
447
55
        tok->cur = tok->inp;
448
55
        return -1;
449
1.01k
    } else {
450
1.01k
        tok_backup(tok, c);
451
1.01k
    }
452
1.01k
    return c;
453
1.06k
}
454
455
static int
456
maybe_raise_syntax_error_for_string_prefixes(struct tok_state *tok,
457
                                             int saw_b, int saw_r, int saw_u,
458
21.6k
                                             int saw_f, int saw_t) {
459
    // Supported: rb, rf, rt (in any order)
460
    // Unsupported: ub, ur, uf, ut, bf, bt, ft (in any order)
461
462
21.6k
#define RETURN_SYNTAX_ERROR(PREFIX1, PREFIX2)                             \
463
21.6k
    do {                                                                  \
464
7
        (void)_PyTokenizer_syntaxerror_known_range(                       \
465
7
            tok, (int)(tok->start + 1 - tok->line_start),                 \
466
7
            (int)(tok->cur - tok->line_start),                            \
467
7
            "'" PREFIX1 "' and '" PREFIX2 "' prefixes are incompatible"); \
468
7
        return -1;                                                        \
469
7
    } while (0)
470
471
21.6k
    if (saw_u && saw_b) {
472
1
        RETURN_SYNTAX_ERROR("u", "b");
473
1
    }
474
21.6k
    if (saw_u && saw_r) {
475
1
        RETURN_SYNTAX_ERROR("u", "r");
476
1
    }
477
21.6k
    if (saw_u && saw_f) {
478
1
        RETURN_SYNTAX_ERROR("u", "f");
479
1
    }
480
21.6k
    if (saw_u && saw_t) {
481
1
        RETURN_SYNTAX_ERROR("u", "t");
482
1
    }
483
484
21.6k
    if (saw_b && saw_f) {
485
1
        RETURN_SYNTAX_ERROR("b", "f");
486
1
    }
487
21.6k
    if (saw_b && saw_t) {
488
1
        RETURN_SYNTAX_ERROR("b", "t");
489
1
    }
490
491
21.6k
    if (saw_f && saw_t) {
492
1
        RETURN_SYNTAX_ERROR("f", "t");
493
1
    }
494
495
21.6k
#undef RETURN_SYNTAX_ERROR
496
497
21.6k
    return 0;
498
21.6k
}
499
500
static int
501
tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
502
1.72M
{
503
1.72M
    int c;
504
1.72M
    int blankline, nonascii;
505
506
1.72M
    const char *p_start = NULL;
507
1.72M
    const char *p_end = NULL;
508
1.83M
  nextline:
509
1.83M
    tok->start = NULL;
510
1.83M
    tok->starting_col_offset = -1;
511
1.83M
    blankline = 0;
512
513
514
    /* Get indentation level */
515
1.83M
    if (tok->atbol) {
516
257k
        int col = 0;
517
257k
        int altcol = 0;
518
257k
        tok->atbol = 0;
519
257k
        int cont_line_col = 0;
520
1.18M
        for (;;) {
521
1.18M
            c = tok_nextc(tok);
522
1.18M
            if (c == ' ') {
523
926k
                col++, altcol++;
524
926k
            }
525
260k
            else if (c == '\t') {
526
645
                col = (col / tok->tabsize + 1) * tok->tabsize;
527
645
                altcol = (altcol / ALTTABSIZE + 1) * ALTTABSIZE;
528
645
            }
529
259k
            else if (c == '\014')  {/* Control-L (formfeed) */
530
1.06k
                col = altcol = 0; /* For Emacs users */
531
1.06k
            }
532
258k
            else if (c == '\\') {
533
                // Indentation cannot be split over multiple physical lines
534
                // using backslashes. This means that if we found a backslash
535
                // preceded by whitespace, **the first one we find** determines
536
                // the level of indentation of whatever comes next.
537
663
                cont_line_col = cont_line_col ? cont_line_col : col;
538
663
                if ((c = tok_continuation_line(tok)) == -1) {
539
46
                    return MAKE_TOKEN(ERRORTOKEN);
540
46
                }
541
663
            }
542
257k
            else if (c == EOF && PyErr_Occurred()) {
543
0
                return MAKE_TOKEN(ERRORTOKEN);
544
0
            }
545
257k
            else {
546
257k
                break;
547
257k
            }
548
1.18M
        }
549
257k
        tok_backup(tok, c);
550
257k
        if (c == '#' || c == '\n' || c == '\r') {
551
            /* Lines with only whitespace and/or comments
552
               shouldn't affect the indentation and are
553
               not passed to the parser as NEWLINE tokens,
554
               except *totally* empty lines in interactive
555
               mode, which signal the end of a command group. */
556
65.8k
            if (col == 0 && c == '\n' && tok->prompt != NULL) {
557
0
                blankline = 0; /* Let it through */
558
0
            }
559
65.8k
            else if (tok->prompt != NULL && tok->lineno == 1) {
560
                /* In interactive mode, if the first line contains
561
                   only spaces and/or a comment, let it through. */
562
0
                blankline = 0;
563
0
                col = altcol = 0;
564
0
            }
565
65.8k
            else {
566
65.8k
                blankline = 1; /* Ignore completely */
567
65.8k
            }
568
            /* We can't jump back right here since we still
569
               may need to skip to the end of a comment */
570
65.8k
        }
571
257k
        if (!blankline && tok->level == 0) {
572
147k
            col = cont_line_col ? cont_line_col : col;
573
147k
            altcol = cont_line_col ? cont_line_col : altcol;
574
147k
            if (col == tok->indstack[tok->indent]) {
575
                /* No change */
576
97.3k
                if (altcol != tok->altindstack[tok->indent]) {
577
1
                    return MAKE_TOKEN(_PyTokenizer_indenterror(tok));
578
1
                }
579
97.3k
            }
580
50.6k
            else if (col > tok->indstack[tok->indent]) {
581
                /* Indent -- always one */
582
28.2k
                if (tok->indent+1 >= MAXINDENT) {
583
0
                    tok->done = E_TOODEEP;
584
0
                    tok->cur = tok->inp;
585
0
                    return MAKE_TOKEN(ERRORTOKEN);
586
0
                }
587
28.2k
                if (altcol <= tok->altindstack[tok->indent]) {
588
2
                    return MAKE_TOKEN(_PyTokenizer_indenterror(tok));
589
2
                }
590
28.2k
                tok->pendin++;
591
28.2k
                tok->indstack[++tok->indent] = col;
592
28.2k
                tok->altindstack[tok->indent] = altcol;
593
28.2k
            }
594
22.3k
            else /* col < tok->indstack[tok->indent] */ {
595
                /* Dedent -- any number, must be consistent */
596
49.8k
                while (tok->indent > 0 &&
597
44.7k
                    col < tok->indstack[tok->indent]) {
598
27.5k
                    tok->pendin--;
599
27.5k
                    tok->indent--;
600
27.5k
                }
601
22.3k
                if (col != tok->indstack[tok->indent]) {
602
7
                    tok->done = E_DEDENT;
603
7
                    tok->cur = tok->inp;
604
7
                    return MAKE_TOKEN(ERRORTOKEN);
605
7
                }
606
22.3k
                if (altcol != tok->altindstack[tok->indent]) {
607
1
                    return MAKE_TOKEN(_PyTokenizer_indenterror(tok));
608
1
                }
609
22.3k
            }
610
147k
        }
611
257k
    }
612
613
1.83M
    tok->start = tok->cur;
614
1.83M
    tok->starting_col_offset = tok->col_offset;
615
616
    /* Return pending indents/dedents */
617
1.83M
    if (tok->pendin != 0) {
618
55.8k
        if (tok->pendin < 0) {
619
27.5k
            if (tok->tok_extra_tokens) {
620
0
                p_start = tok->cur;
621
0
                p_end = tok->cur;
622
0
            }
623
27.5k
            tok->pendin++;
624
27.5k
            return MAKE_TOKEN(DEDENT);
625
27.5k
        }
626
28.2k
        else {
627
28.2k
            if (tok->tok_extra_tokens) {
628
0
                p_start = tok->buf;
629
0
                p_end = tok->cur;
630
0
            }
631
28.2k
            tok->pendin--;
632
28.2k
            return MAKE_TOKEN(INDENT);
633
28.2k
        }
634
55.8k
    }
635
636
    /* Peek ahead at the next character */
637
1.77M
    c = tok_nextc(tok);
638
1.77M
    tok_backup(tok, c);
639
640
1.77M
 again:
641
1.77M
    tok->start = NULL;
642
    /* Skip spaces */
643
2.17M
    do {
644
2.17M
        c = tok_nextc(tok);
645
2.17M
    } while (c == ' ' || c == '\t' || c == '\014');
646
647
    /* Set start of current token */
648
1.77M
    tok->start = tok->cur == NULL ? NULL : tok->cur - 1;
649
1.77M
    tok->starting_col_offset = tok->col_offset - 1;
650
651
    /* Skip comment, unless it's a type comment */
652
1.77M
    if (c == '#') {
653
654
41.2k
        const char* p = NULL;
655
41.2k
        const char *prefix, *type_start;
656
41.2k
        int current_starting_col_offset;
657
658
1.36M
        while (c != EOF && c != '\n' && c != '\r') {
659
1.31M
            c = tok_nextc(tok);
660
1.31M
        }
661
662
41.2k
        if (tok->tok_extra_tokens) {
663
0
            p = tok->start;
664
0
        }
665
666
41.2k
        if (tok->type_comments) {
667
0
            p = tok->start;
668
0
            current_starting_col_offset = tok->starting_col_offset;
669
0
            prefix = type_comment_prefix;
670
0
            while (*prefix && p < tok->cur) {
671
0
                if (*prefix == ' ') {
672
0
                    while (*p == ' ' || *p == '\t') {
673
0
                        p++;
674
0
                        current_starting_col_offset++;
675
0
                    }
676
0
                } else if (*prefix == *p) {
677
0
                    p++;
678
0
                    current_starting_col_offset++;
679
0
                } else {
680
0
                    break;
681
0
                }
682
683
0
                prefix++;
684
0
            }
685
686
            /* This is a type comment if we matched all of type_comment_prefix. */
687
0
            if (!*prefix) {
688
0
                int is_type_ignore = 1;
689
                // +6 in order to skip the word 'ignore'
690
0
                const char *ignore_end = p + 6;
691
0
                const int ignore_end_col_offset = current_starting_col_offset + 6;
692
0
                tok_backup(tok, c);  /* don't eat the newline or EOF */
693
694
0
                type_start = p;
695
696
                /* A TYPE_IGNORE is "type: ignore" followed by the end of the token
697
                 * or anything ASCII and non-alphanumeric. */
698
0
                is_type_ignore = (
699
0
                    tok->cur >= ignore_end && memcmp(p, "ignore", 6) == 0
700
0
                    && !(tok->cur > ignore_end
701
0
                         && ((unsigned char)ignore_end[0] >= 128 || Py_ISALNUM(ignore_end[0]))));
702
703
0
                if (is_type_ignore) {
704
0
                    p_start = ignore_end;
705
0
                    p_end = tok->cur;
706
707
                    /* If this type ignore is the only thing on the line, consume the newline also. */
708
0
                    if (blankline) {
709
0
                        tok_nextc(tok);
710
0
                        tok->atbol = 1;
711
0
                    }
712
0
                    return MAKE_TYPE_COMMENT_TOKEN(TYPE_IGNORE, ignore_end_col_offset, tok->col_offset);
713
0
                } else {
714
0
                    p_start = type_start;
715
0
                    p_end = tok->cur;
716
0
                    return MAKE_TYPE_COMMENT_TOKEN(TYPE_COMMENT, current_starting_col_offset, tok->col_offset);
717
0
                }
718
0
            }
719
0
        }
720
41.2k
        if (tok->tok_extra_tokens) {
721
0
            tok_backup(tok, c);  /* don't eat the newline or EOF */
722
0
            p_start = p;
723
0
            p_end = tok->cur;
724
0
            tok->comment_newline = blankline;
725
0
            return MAKE_TOKEN(COMMENT);
726
0
        }
727
41.2k
    }
728
729
1.77M
    if (tok->done == E_INTERACT_STOP) {
730
0
        return MAKE_TOKEN(ENDMARKER);
731
0
    }
732
733
    /* Check for EOF and errors now */
734
1.77M
    if (c == EOF) {
735
16.1k
        if (tok->level) {
736
4.16k
            return MAKE_TOKEN(ERRORTOKEN);
737
4.16k
        }
738
11.9k
        return MAKE_TOKEN(tok->done == E_EOF ? ENDMARKER : ERRORTOKEN);
739
16.1k
    }
740
741
    /* Identifier (most frequent token!) */
742
1.75M
    nonascii = 0;
743
1.75M
    if (is_potential_identifier_start(c)) {
744
        /* Process the various legal combinations of b"", r"", u"", and f"". */
745
583k
        int saw_b = 0, saw_r = 0, saw_u = 0, saw_f = 0, saw_t = 0;
746
716k
        while (1) {
747
716k
            if (!saw_b && (c == 'b' || c == 'B')) {
748
22.4k
                saw_b = 1;
749
22.4k
            }
750
            /* Since this is a backwards compatibility support literal we don't
751
               want to support it in arbitrary order like byte literals. */
752
694k
            else if (!saw_u && (c == 'u'|| c == 'U')) {
753
7.83k
                saw_u = 1;
754
7.83k
            }
755
            /* ur"" and ru"" are not supported */
756
686k
            else if (!saw_r && (c == 'r' || c == 'R')) {
757
40.4k
                saw_r = 1;
758
40.4k
            }
759
646k
            else if (!saw_f && (c == 'f' || c == 'F')) {
760
48.3k
                saw_f = 1;
761
48.3k
            }
762
597k
            else if (!saw_t && (c == 't' || c == 'T')) {
763
36.0k
                saw_t = 1;
764
36.0k
            }
765
561k
            else {
766
561k
                break;
767
561k
            }
768
155k
            c = tok_nextc(tok);
769
155k
            if (c == '"' || c == '\'') {
770
                // Raise error on incompatible string prefixes:
771
21.6k
                int status = maybe_raise_syntax_error_for_string_prefixes(
772
21.6k
                    tok, saw_b, saw_r, saw_u, saw_f, saw_t);
773
21.6k
                if (status < 0) {
774
7
                    return MAKE_TOKEN(ERRORTOKEN);
775
7
                }
776
777
                // Handle valid f or t string creation:
778
21.6k
                if (saw_f || saw_t) {
779
16.5k
                    goto f_string_quote;
780
16.5k
                }
781
5.12k
                goto letter_quote;
782
21.6k
            }
783
155k
        }
784
2.64M
        while (is_potential_identifier_char(c)) {
785
2.07M
            if (c >= 128) {
786
116k
                nonascii = 1;
787
116k
            }
788
2.07M
            c = tok_nextc(tok);
789
2.07M
        }
790
561k
        tok_backup(tok, c);
791
561k
        if (nonascii && !verify_identifier(tok)) {
792
627
            return MAKE_TOKEN(ERRORTOKEN);
793
627
        }
794
795
560k
        p_start = tok->start;
796
560k
        p_end = tok->cur;
797
798
560k
        return MAKE_TOKEN(NAME);
799
561k
    }
800
801
1.17M
    if (c == '\r') {
802
414
        c = tok_nextc(tok);
803
414
    }
804
805
    /* Newline */
806
1.17M
    if (c == '\n') {
807
239k
        tok->atbol = 1;
808
239k
        if (blankline || tok->level > 0) {
809
109k
            if (tok->tok_extra_tokens) {
810
0
                if (tok->comment_newline) {
811
0
                    tok->comment_newline = 0;
812
0
                }
813
0
                p_start = tok->start;
814
0
                p_end = tok->cur;
815
0
                return MAKE_TOKEN(NL);
816
0
            }
817
109k
            goto nextline;
818
109k
        }
819
129k
        if (tok->comment_newline && tok->tok_extra_tokens) {
820
0
            tok->comment_newline = 0;
821
0
            p_start = tok->start;
822
0
            p_end = tok->cur;
823
0
            return MAKE_TOKEN(NL);
824
0
        }
825
129k
        p_start = tok->start;
826
129k
        p_end = tok->cur - 1; /* Leave '\n' out of the string */
827
129k
        tok->cont_line = 0;
828
129k
        return MAKE_TOKEN(NEWLINE);
829
129k
    }
830
831
    /* Period or number starting with period? */
832
937k
    if (c == '.') {
833
41.2k
        c = tok_nextc(tok);
834
41.2k
        if (Py_ISDIGIT(c)) {
835
3.27k
            goto fraction;
836
38.0k
        } else if (c == '.') {
837
1.31k
            c = tok_nextc(tok);
838
1.31k
            if (c == '.') {
839
608
                p_start = tok->start;
840
608
                p_end = tok->cur;
841
608
                return MAKE_TOKEN(ELLIPSIS);
842
608
            }
843
704
            else {
844
704
                tok_backup(tok, c);
845
704
            }
846
704
            tok_backup(tok, '.');
847
704
        }
848
36.6k
        else {
849
36.6k
            tok_backup(tok, c);
850
36.6k
        }
851
37.4k
        p_start = tok->start;
852
37.4k
        p_end = tok->cur;
853
37.4k
        return MAKE_TOKEN(DOT);
854
41.2k
    }
855
856
    /* Number */
857
896k
    if (Py_ISDIGIT(c)) {
858
94.2k
        if (c == '0') {
859
            /* Hex, octal or binary -- maybe. */
860
33.9k
            c = tok_nextc(tok);
861
33.9k
            if (c == 'x' || c == 'X') {
862
                /* Hex */
863
16.0k
                c = tok_nextc(tok);
864
16.2k
                do {
865
16.2k
                    if (c == '_') {
866
212
                        c = tok_nextc(tok);
867
212
                    }
868
16.2k
                    if (!Py_ISXDIGIT(c)) {
869
20
                        tok_backup(tok, c);
870
20
                        return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid hexadecimal literal"));
871
20
                    }
872
79.9k
                    do {
873
79.9k
                        c = tok_nextc(tok);
874
79.9k
                    } while (Py_ISXDIGIT(c));
875
16.2k
                } while (c == '_');
876
16.0k
                if (!verify_end_of_number(tok, c, "hexadecimal")) {
877
2
                    return MAKE_TOKEN(ERRORTOKEN);
878
2
                }
879
16.0k
            }
880
17.8k
            else if (c == 'o' || c == 'O') {
881
                /* Octal */
882
608
                c = tok_nextc(tok);
883
956
                do {
884
956
                    if (c == '_') {
885
355
                        c = tok_nextc(tok);
886
355
                    }
887
956
                    if (c < '0' || c >= '8') {
888
23
                        if (Py_ISDIGIT(c)) {
889
1
                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
890
1
                                    "invalid digit '%c' in octal literal", c));
891
1
                        }
892
22
                        else {
893
22
                            tok_backup(tok, c);
894
22
                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid octal literal"));
895
22
                        }
896
23
                    }
897
2.38k
                    do {
898
2.38k
                        c = tok_nextc(tok);
899
2.38k
                    } while ('0' <= c && c < '8');
900
933
                } while (c == '_');
901
585
                if (Py_ISDIGIT(c)) {
902
1
                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
903
1
                            "invalid digit '%c' in octal literal", c));
904
1
                }
905
584
                if (!verify_end_of_number(tok, c, "octal")) {
906
4
                    return MAKE_TOKEN(ERRORTOKEN);
907
4
                }
908
584
            }
909
17.2k
            else if (c == 'b' || c == 'B') {
910
                /* Binary */
911
558
                c = tok_nextc(tok);
912
1.04k
                do {
913
1.04k
                    if (c == '_') {
914
497
                        c = tok_nextc(tok);
915
497
                    }
916
1.04k
                    if (c != '0' && c != '1') {
917
19
                        if (Py_ISDIGIT(c)) {
918
1
                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid digit '%c' in binary literal", c));
919
1
                        }
920
18
                        else {
921
18
                            tok_backup(tok, c);
922
18
                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid binary literal"));
923
18
                        }
924
19
                    }
925
4.06k
                    do {
926
4.06k
                        c = tok_nextc(tok);
927
4.06k
                    } while (c == '0' || c == '1');
928
1.02k
                } while (c == '_');
929
539
                if (Py_ISDIGIT(c)) {
930
2
                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid digit '%c' in binary literal", c));
931
2
                }
932
537
                if (!verify_end_of_number(tok, c, "binary")) {
933
1
                    return MAKE_TOKEN(ERRORTOKEN);
934
1
                }
935
537
            }
936
16.7k
            else {
937
16.7k
                int nonzero = 0;
938
                /* maybe old-style octal; c is first char of it */
939
                /* in any case, allow '0' as a literal */
940
17.9k
                while (1) {
941
17.9k
                    if (c == '_') {
942
91
                        c = tok_nextc(tok);
943
91
                        if (!Py_ISDIGIT(c)) {
944
3
                            tok_backup(tok, c);
945
3
                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid decimal literal"));
946
3
                        }
947
91
                    }
948
17.9k
                    if (c != '0') {
949
16.7k
                        break;
950
16.7k
                    }
951
1.18k
                    c = tok_nextc(tok);
952
1.18k
                }
953
16.7k
                char* zeros_end = tok->cur;
954
16.7k
                if (Py_ISDIGIT(c)) {
955
384
                    nonzero = 1;
956
384
                    c = tok_decimal_tail(tok);
957
384
                    if (c == 0) {
958
1
                        return MAKE_TOKEN(ERRORTOKEN);
959
1
                    }
960
384
                }
961
16.7k
                if (c == '.') {
962
921
                    c = tok_nextc(tok);
963
921
                    goto fraction;
964
921
                }
965
15.8k
                else if (c == 'e' || c == 'E') {
966
843
                    goto exponent;
967
843
                }
968
14.9k
                else if (c == 'j' || c == 'J') {
969
858
                    goto imaginary;
970
858
                }
971
14.1k
                else if (nonzero && !tok->tok_extra_tokens) {
972
                    /* Old-style octal: now disallowed. */
973
25
                    tok_backup(tok, c);
974
25
                    return MAKE_TOKEN(_PyTokenizer_syntaxerror_known_range(
975
25
                            tok, (int)(tok->start + 1 - tok->line_start),
976
25
                            (int)(zeros_end - tok->line_start),
977
25
                            "leading zeros in decimal integer "
978
25
                            "literals are not permitted; "
979
25
                            "use an 0o prefix for octal integers"));
980
25
                }
981
14.0k
                if (!verify_end_of_number(tok, c, "decimal")) {
982
26
                    return MAKE_TOKEN(ERRORTOKEN);
983
26
                }
984
14.0k
            }
985
33.9k
        }
986
60.2k
        else {
987
            /* Decimal */
988
60.2k
            c = tok_decimal_tail(tok);
989
60.2k
            if (c == 0) {
990
9
                return MAKE_TOKEN(ERRORTOKEN);
991
9
            }
992
60.2k
            {
993
                /* Accept floating-point numbers. */
994
60.2k
                if (c == '.') {
995
3.52k
                    c = tok_nextc(tok);
996
7.72k
        fraction:
997
                    /* Fraction */
998
7.72k
                    if (Py_ISDIGIT(c)) {
999
5.95k
                        c = tok_decimal_tail(tok);
1000
5.95k
                        if (c == 0) {
1001
1
                            return MAKE_TOKEN(ERRORTOKEN);
1002
1
                        }
1003
5.95k
                    }
1004
7.72k
                }
1005
64.4k
                if (c == 'e' || c == 'E') {
1006
9.65k
                    int e;
1007
10.4k
                  exponent:
1008
10.4k
                    e = c;
1009
                    /* Exponent part */
1010
10.4k
                    c = tok_nextc(tok);
1011
10.4k
                    if (c == '+' || c == '-') {
1012
3.79k
                        c = tok_nextc(tok);
1013
3.79k
                        if (!Py_ISDIGIT(c)) {
1014
11
                            tok_backup(tok, c);
1015
11
                            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid decimal literal"));
1016
11
                        }
1017
6.69k
                    } else if (!Py_ISDIGIT(c)) {
1018
531
                        tok_backup(tok, c);
1019
531
                        if (!verify_end_of_number(tok, e, "decimal")) {
1020
32
                            return MAKE_TOKEN(ERRORTOKEN);
1021
32
                        }
1022
499
                        tok_backup(tok, e);
1023
499
                        p_start = tok->start;
1024
499
                        p_end = tok->cur;
1025
499
                        return MAKE_TOKEN(NUMBER);
1026
531
                    }
1027
9.95k
                    c = tok_decimal_tail(tok);
1028
9.95k
                    if (c == 0) {
1029
1
                        return MAKE_TOKEN(ERRORTOKEN);
1030
1
                    }
1031
9.95k
                }
1032
64.7k
                if (c == 'j' || c == 'J') {
1033
                    /* Imaginary part */
1034
4.17k
        imaginary:
1035
4.17k
                    c = tok_nextc(tok);
1036
4.17k
                    if (!verify_end_of_number(tok, c, "imaginary")) {
1037
10
                        return MAKE_TOKEN(ERRORTOKEN);
1038
10
                    }
1039
4.17k
                }
1040
61.4k
                else if (!verify_end_of_number(tok, c, "decimal")) {
1041
108
                    return MAKE_TOKEN(ERRORTOKEN);
1042
108
                }
1043
64.7k
            }
1044
64.7k
        }
1045
96.7k
        tok_backup(tok, c);
1046
96.7k
        p_start = tok->start;
1047
96.7k
        p_end = tok->cur;
1048
96.7k
        return MAKE_TOKEN(NUMBER);
1049
94.2k
    }
1050
1051
818k
  f_string_quote:
1052
818k
    if (((Py_TOLOWER(*tok->start) == 'f' || Py_TOLOWER(*tok->start) == 'r' || Py_TOLOWER(*tok->start) == 't')
1053
16.5k
        && (c == '\'' || c == '"'))) {
1054
1055
16.5k
        int quote = c;
1056
16.5k
        int quote_size = 1;             /* 1 or 3 */
1057
1058
        /* Nodes of type STRING, especially multi line strings
1059
           must be handled differently in order to get both
1060
           the starting line number and the column offset right.
1061
           (cf. issue 16806) */
1062
16.5k
        tok->first_lineno = tok->lineno;
1063
16.5k
        tok->multi_line_start = tok->line_start;
1064
1065
        /* Find the quote size and start of string */
1066
16.5k
        int after_quote = tok_nextc(tok);
1067
16.5k
        if (after_quote == quote) {
1068
2.61k
            int after_after_quote = tok_nextc(tok);
1069
2.61k
            if (after_after_quote == quote) {
1070
881
                quote_size = 3;
1071
881
            }
1072
1.73k
            else {
1073
                // TODO: Check this
1074
1.73k
                tok_backup(tok, after_after_quote);
1075
1.73k
                tok_backup(tok, after_quote);
1076
1.73k
            }
1077
2.61k
        }
1078
16.5k
        if (after_quote != quote) {
1079
13.8k
            tok_backup(tok, after_quote);
1080
13.8k
        }
1081
1082
1083
16.5k
        p_start = tok->start;
1084
16.5k
        p_end = tok->cur;
1085
16.5k
        if (tok->tok_mode_stack_index + 1 >= MAXFSTRINGLEVEL) {
1086
3
            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "too many nested f-strings or t-strings"));
1087
3
        }
1088
16.5k
        tokenizer_mode *the_current_tok = TOK_NEXT_MODE(tok);
1089
16.5k
        the_current_tok->kind = TOK_FSTRING_MODE;
1090
16.5k
        the_current_tok->quote = quote;
1091
16.5k
        the_current_tok->quote_size = quote_size;
1092
16.5k
        the_current_tok->start = tok->start;
1093
16.5k
        the_current_tok->multi_line_start = tok->line_start;
1094
16.5k
        the_current_tok->first_line = tok->lineno;
1095
16.5k
        the_current_tok->start_offset = -1;
1096
16.5k
        the_current_tok->multi_line_start_offset = -1;
1097
16.5k
        the_current_tok->last_expr_buffer = NULL;
1098
16.5k
        the_current_tok->last_expr_size = 0;
1099
16.5k
        the_current_tok->last_expr_end = -1;
1100
16.5k
        the_current_tok->in_format_spec = 0;
1101
16.5k
        the_current_tok->in_debug = 0;
1102
1103
16.5k
        enum string_kind_t string_kind = FSTRING;
1104
16.5k
        switch (*tok->start) {
1105
582
            case 'T':
1106
4.24k
            case 't':
1107
4.24k
                the_current_tok->raw = Py_TOLOWER(*(tok->start + 1)) == 'r';
1108
4.24k
                string_kind = TSTRING;
1109
4.24k
                break;
1110
1.82k
            case 'F':
1111
11.7k
            case 'f':
1112
11.7k
                the_current_tok->raw = Py_TOLOWER(*(tok->start + 1)) == 'r';
1113
11.7k
                break;
1114
196
            case 'R':
1115
476
            case 'r':
1116
476
                the_current_tok->raw = 1;
1117
476
                if (Py_TOLOWER(*(tok->start + 1)) == 't') {
1118
200
                    string_kind = TSTRING;
1119
200
                }
1120
476
                break;
1121
0
            default:
1122
0
                Py_UNREACHABLE();
1123
16.5k
        }
1124
1125
16.5k
        the_current_tok->string_kind = string_kind;
1126
16.5k
        the_current_tok->curly_bracket_depth = 0;
1127
16.5k
        the_current_tok->curly_bracket_expr_start_depth = -1;
1128
16.5k
        return string_kind == TSTRING ? MAKE_TOKEN(TSTRING_START) : MAKE_TOKEN(FSTRING_START);
1129
16.5k
    }
1130
1131
806k
  letter_quote:
1132
    /* String */
1133
806k
    if (c == '\'' || c == '"') {
1134
61.6k
        int quote = c;
1135
61.6k
        int quote_size = 1;             /* 1 or 3 */
1136
61.6k
        int end_quote_size = 0;
1137
61.6k
        int has_escaped_quote = 0;
1138
1139
        /* Nodes of type STRING, especially multi line strings
1140
           must be handled differently in order to get both
1141
           the starting line number and the column offset right.
1142
           (cf. issue 16806) */
1143
61.6k
        tok->first_lineno = tok->lineno;
1144
61.6k
        tok->multi_line_start = tok->line_start;
1145
1146
        /* Find the quote size and start of string */
1147
61.6k
        c = tok_nextc(tok);
1148
61.6k
        if (c == quote) {
1149
9.92k
            c = tok_nextc(tok);
1150
9.92k
            if (c == quote) {
1151
3.11k
                quote_size = 3;
1152
3.11k
            }
1153
6.81k
            else {
1154
6.81k
                end_quote_size = 1;     /* empty string found */
1155
6.81k
            }
1156
9.92k
        }
1157
61.6k
        if (c != quote) {
1158
58.5k
            tok_backup(tok, c);
1159
58.5k
        }
1160
1161
        /* Get rest of string */
1162
1.29M
        while (end_quote_size != quote_size) {
1163
1.23M
            c = tok_nextc(tok);
1164
1.23M
            if (tok->done == E_ERROR) {
1165
0
                return MAKE_TOKEN(ERRORTOKEN);
1166
0
            }
1167
1.23M
            if (tok->done == E_DECODE) {
1168
0
                break;
1169
0
            }
1170
1.23M
            if (c == EOF || (quote_size == 1 && c == '\n')) {
1171
296
                assert(tok->multi_line_start != NULL);
1172
                // shift the tok_state's location into
1173
                // the start of string, and report the error
1174
                // from the initial quote character
1175
296
                tok->cur = (char *)tok->start;
1176
296
                tok->cur++;
1177
296
                tok->line_start = tok->multi_line_start;
1178
296
                int start = tok->lineno;
1179
296
                tok->lineno = tok->first_lineno;
1180
1181
296
                if (INSIDE_FSTRING(tok)) {
1182
                    /* When we are in an f-string, before raising the
1183
                     * unterminated string literal error, check whether
1184
                     * does the initial quote matches with f-strings quotes
1185
                     * and if it is, then this must be a missing '}' token
1186
                     * so raise the proper error */
1187
35
                    tokenizer_mode *the_current_tok = TOK_GET_MODE(tok);
1188
35
                    if (the_current_tok->quote == quote &&
1189
29
                        the_current_tok->quote_size == quote_size) {
1190
23
                        return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
1191
23
                            "%c-string: expecting '}'", TOK_GET_STRING_PREFIX(tok)));
1192
23
                    }
1193
35
                }
1194
1195
273
                if (quote_size == 3) {
1196
18
                    _PyTokenizer_syntaxerror(tok, "unterminated triple-quoted string literal"
1197
18
                                     " (detected at line %d)", start);
1198
18
                    if (c != '\n') {
1199
18
                        tok->done = E_EOFS;
1200
18
                    }
1201
18
                    return MAKE_TOKEN(ERRORTOKEN);
1202
18
                }
1203
255
                else {
1204
255
                    if (has_escaped_quote) {
1205
10
                        _PyTokenizer_syntaxerror(
1206
10
                            tok,
1207
10
                            "unterminated string literal (detected at line %d); "
1208
10
                            "perhaps you escaped the end quote?",
1209
10
                            start
1210
10
                        );
1211
245
                    } else {
1212
245
                        _PyTokenizer_syntaxerror(
1213
245
                            tok, "unterminated string literal (detected at line %d)", start
1214
245
                        );
1215
245
                    }
1216
255
                    if (c != '\n') {
1217
14
                        tok->done = E_EOLS;
1218
14
                    }
1219
255
                    return MAKE_TOKEN(ERRORTOKEN);
1220
255
                }
1221
273
            }
1222
1.23M
            if (c == quote) {
1223
62.5k
                end_quote_size += 1;
1224
62.5k
            }
1225
1.16M
            else {
1226
1.16M
                end_quote_size = 0;
1227
1.16M
                if (c == '\\') {
1228
27.0k
                    c = tok_nextc(tok);  /* skip escaped char */
1229
27.0k
                    if (c == quote) {  /* but record whether the escaped char was a quote */
1230
1.12k
                        has_escaped_quote = 1;
1231
1.12k
                    }
1232
27.0k
                    if (c == '\r') {
1233
67
                        c = tok_nextc(tok);
1234
67
                    }
1235
27.0k
                }
1236
1.16M
            }
1237
1.23M
        }
1238
1239
61.3k
        p_start = tok->start;
1240
61.3k
        p_end = tok->cur;
1241
61.3k
        return MAKE_TOKEN(STRING);
1242
61.6k
    }
1243
1244
    /* Line continuation */
1245
745k
    if (c == '\\') {
1246
462
        if ((c = tok_continuation_line(tok)) == -1) {
1247
67
            return MAKE_TOKEN(ERRORTOKEN);
1248
67
        }
1249
395
        tok->cont_line = 1;
1250
395
        goto again; /* Read next line */
1251
462
    }
1252
1253
    /* Punctuation character */
1254
744k
    int is_punctuation = (c == ':' || c == '}' || c == '!' || c == '{');
1255
744k
    if (is_punctuation && INSIDE_FSTRING(tok) && INSIDE_FSTRING_EXPR(current_tok)) {
1256
        /* This code block gets executed before the curly_bracket_depth is incremented
1257
         * by the `{` case, so for ensuring that we are on the 0th level, we need
1258
         * to adjust it manually */
1259
54.5k
        int cursor = current_tok->curly_bracket_depth - (c != '{');
1260
54.5k
        int in_format_spec = current_tok->in_format_spec;
1261
54.5k
         int cursor_in_format_with_debug =
1262
54.5k
             cursor == 1 && (current_tok->in_debug || in_format_spec);
1263
54.5k
         int cursor_valid = cursor == 0 || cursor_in_format_with_debug;
1264
54.5k
        if ((cursor_valid) && !_PyLexer_update_ftstring_expr(tok, c)) {
1265
0
            return MAKE_TOKEN(ENDMARKER);
1266
0
        }
1267
54.5k
        if ((cursor_valid) && c != '{' && set_ftstring_expr(tok, token, c)) {
1268
0
            return MAKE_TOKEN(ERRORTOKEN);
1269
0
        }
1270
1271
54.5k
        if (c == ':' && cursor == current_tok->curly_bracket_expr_start_depth) {
1272
4.31k
            current_tok->kind = TOK_FSTRING_MODE;
1273
4.31k
            current_tok->in_format_spec = 1;
1274
4.31k
            p_start = tok->start;
1275
4.31k
            p_end = tok->cur;
1276
4.31k
            return MAKE_TOKEN(_PyToken_OneChar(c));
1277
4.31k
        }
1278
54.5k
    }
1279
1280
    /* Check for two-character token */
1281
740k
    {
1282
740k
        int c2 = tok_nextc(tok);
1283
740k
        int current_token = _PyToken_TwoChars(c, c2);
1284
740k
        if (current_token != OP) {
1285
26.1k
            int c3 = tok_nextc(tok);
1286
26.1k
            int current_token3 = _PyToken_ThreeChars(c, c2, c3);
1287
26.1k
            if (current_token3 != OP) {
1288
1.08k
                current_token = current_token3;
1289
1.08k
            }
1290
25.0k
            else {
1291
25.0k
                tok_backup(tok, c3);
1292
25.0k
            }
1293
26.1k
            p_start = tok->start;
1294
26.1k
            p_end = tok->cur;
1295
26.1k
            return MAKE_TOKEN(current_token);
1296
26.1k
        }
1297
714k
        tok_backup(tok, c2);
1298
714k
    }
1299
1300
    /* Keep track of parentheses nesting level */
1301
0
    switch (c) {
1302
96.4k
    case '(':
1303
128k
    case '[':
1304
171k
    case '{':
1305
171k
        if (tok->level >= MAXLEVEL) {
1306
3
            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "too many nested parentheses"));
1307
3
        }
1308
171k
        tok->parenstack[tok->level] = c;
1309
171k
        tok->parenlinenostack[tok->level] = tok->lineno;
1310
171k
        tok->parencolstack[tok->level] = (int)(tok->start - tok->line_start);
1311
171k
        tok->level++;
1312
171k
        if (INSIDE_FSTRING(tok)) {
1313
28.7k
            current_tok->curly_bracket_depth++;
1314
28.7k
        }
1315
171k
        break;
1316
69.5k
    case ')':
1317
82.0k
    case ']':
1318
107k
    case '}':
1319
107k
        if (INSIDE_FSTRING(tok) && !current_tok->curly_bracket_depth && c == '}') {
1320
51
            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
1321
51
                "%c-string: single '}' is not allowed", TOK_GET_STRING_PREFIX(tok)));
1322
51
        }
1323
107k
        if (!tok->tok_extra_tokens && !tok->level) {
1324
180
            return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "unmatched '%c'", c));
1325
180
        }
1326
107k
        if (tok->level > 0) {
1327
107k
            tok->level--;
1328
107k
            int opening = tok->parenstack[tok->level];
1329
107k
            if (!tok->tok_extra_tokens && !((opening == '(' && c == ')') ||
1330
37.9k
                                            (opening == '[' && c == ']') ||
1331
25.3k
                                            (opening == '{' && c == '}'))) {
1332
                /* If the opening bracket belongs to an f-string's expression
1333
                part (e.g. f"{)}") and the closing bracket is an arbitrary
1334
                nested expression, then instead of matching a different
1335
                syntactical construct with it; we'll throw an unmatched
1336
                parentheses error. */
1337
29
                if (INSIDE_FSTRING(tok) && opening == '{') {
1338
6
                    assert(current_tok->curly_bracket_depth >= 0);
1339
6
                    int previous_bracket = current_tok->curly_bracket_depth - 1;
1340
6
                    if (previous_bracket == current_tok->curly_bracket_expr_start_depth) {
1341
4
                        return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
1342
4
                            "%c-string: unmatched '%c'", TOK_GET_STRING_PREFIX(tok), c));
1343
4
                    }
1344
6
                }
1345
25
                if (tok->parenlinenostack[tok->level] != tok->lineno) {
1346
2
                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
1347
2
                            "closing parenthesis '%c' does not match "
1348
2
                            "opening parenthesis '%c' on line %d",
1349
2
                            c, opening, tok->parenlinenostack[tok->level]));
1350
2
                }
1351
23
                else {
1352
23
                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
1353
23
                            "closing parenthesis '%c' does not match "
1354
23
                            "opening parenthesis '%c'",
1355
23
                            c, opening));
1356
23
                }
1357
25
            }
1358
107k
        }
1359
1360
107k
        if (INSIDE_FSTRING(tok)) {
1361
21.3k
            current_tok->curly_bracket_depth--;
1362
21.3k
            if (current_tok->curly_bracket_depth < 0) {
1363
1
                return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "%c-string: unmatched '%c'",
1364
1
                    TOK_GET_STRING_PREFIX(tok), c));
1365
1
            }
1366
21.3k
            if (c == '}' && current_tok->curly_bracket_depth == current_tok->curly_bracket_expr_start_depth) {
1367
20.2k
                current_tok->curly_bracket_expr_start_depth--;
1368
20.2k
                current_tok->kind = TOK_FSTRING_MODE;
1369
20.2k
                current_tok->in_format_spec = 0;
1370
20.2k
                current_tok->in_debug = 0;
1371
20.2k
            }
1372
21.3k
        }
1373
107k
        break;
1374
435k
    default:
1375
435k
        break;
1376
714k
    }
1377
1378
714k
    if (!Py_UNICODE_ISPRINTABLE(c)) {
1379
393
        return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok, "invalid non-printable character U+%04X", c));
1380
393
    }
1381
1382
713k
    if( c == '=' && INSIDE_FSTRING_EXPR_AT_TOP(current_tok)) {
1383
5.07k
        current_tok->in_debug = 1;
1384
5.07k
    }
1385
1386
    /* Punctuation character */
1387
713k
    p_start = tok->start;
1388
713k
    p_end = tok->cur;
1389
713k
    return MAKE_TOKEN(_PyToken_OneChar(c));
1390
714k
}
1391
1392
static int
1393
tok_get_fstring_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
1394
50.5k
{
1395
50.5k
    const char *p_start = NULL;
1396
50.5k
    const char *p_end = NULL;
1397
50.5k
    int end_quote_size = 0;
1398
50.5k
    int unicode_escape = 0;
1399
1400
50.5k
    tok->start = tok->cur;
1401
50.5k
    tok->first_lineno = tok->lineno;
1402
50.5k
    tok->starting_col_offset = tok->col_offset;
1403
1404
    // If we start with a bracket, we defer to the normal mode as there is nothing for us to tokenize
1405
    // before it.
1406
50.5k
    int start_char = tok_nextc(tok);
1407
50.5k
    if (start_char == '{') {
1408
14.2k
        int peek1 = tok_nextc(tok);
1409
14.2k
        tok_backup(tok, peek1);
1410
14.2k
        tok_backup(tok, start_char);
1411
14.2k
        if (peek1 != '{') {
1412
12.6k
            current_tok->curly_bracket_expr_start_depth++;
1413
12.6k
            if (current_tok->curly_bracket_expr_start_depth >= MAX_EXPR_NESTING) {
1414
3
                return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
1415
3
                    "%c-string: expressions nested too deeply", TOK_GET_STRING_PREFIX(tok)));
1416
3
            }
1417
12.6k
            TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
1418
12.6k
            return tok_get_normal_mode(tok, current_tok, token);
1419
12.6k
        }
1420
14.2k
    }
1421
36.2k
    else {
1422
36.2k
        tok_backup(tok, start_char);
1423
36.2k
    }
1424
1425
    // Check if we are at the end of the string
1426
54.4k
    for (int i = 0; i < current_tok->quote_size; i++) {
1427
42.4k
        int quote = tok_nextc(tok);
1428
42.4k
        if (quote != current_tok->quote) {
1429
25.9k
            tok_backup(tok, quote);
1430
25.9k
            goto f_string_middle;
1431
25.9k
        }
1432
42.4k
    }
1433
1434
11.9k
    if (current_tok->last_expr_buffer != NULL) {
1435
7.04k
        PyMem_Free(current_tok->last_expr_buffer);
1436
7.04k
        current_tok->last_expr_buffer = NULL;
1437
7.04k
        current_tok->last_expr_size = 0;
1438
7.04k
        current_tok->last_expr_end = -1;
1439
7.04k
    }
1440
1441
11.9k
    p_start = tok->start;
1442
11.9k
    p_end = tok->cur;
1443
11.9k
    tok->tok_mode_stack_index--;
1444
11.9k
    return MAKE_TOKEN(FTSTRING_END(current_tok));
1445
1446
25.9k
f_string_middle:
1447
1448
    // TODO: This is a bit of a hack, but it works for now. We need to find a better way to handle
1449
    // this.
1450
25.9k
    tok->multi_line_start = tok->line_start;
1451
161k
    while (end_quote_size != current_tok->quote_size) {
1452
156k
        int c = tok_nextc(tok);
1453
156k
        if (tok->done == E_ERROR || tok->done == E_DECODE) {
1454
0
            return MAKE_TOKEN(ERRORTOKEN);
1455
0
        }
1456
156k
        int in_format_spec = (
1457
156k
                current_tok->in_format_spec
1458
10.4k
                &&
1459
10.4k
                INSIDE_FSTRING_EXPR(current_tok)
1460
156k
        );
1461
1462
156k
       if (c == EOF || (current_tok->quote_size == 1 && c == '\n')) {
1463
436
            if (tok->decoding_erred) {
1464
0
                return MAKE_TOKEN(ERRORTOKEN);
1465
0
            }
1466
1467
            // If we are in a format spec and we found a newline,
1468
            // it means that the format spec ends here and we should
1469
            // return to the regular mode.
1470
436
            if (in_format_spec && c == '\n') {
1471
52
                if (current_tok->quote_size == 1) {
1472
52
                    return MAKE_TOKEN(
1473
52
                        _PyTokenizer_syntaxerror(
1474
52
                            tok,
1475
52
                            "%c-string: newlines are not allowed in format specifiers for single quoted %c-strings",
1476
52
                            TOK_GET_STRING_PREFIX(tok), TOK_GET_STRING_PREFIX(tok)
1477
52
                        )
1478
52
                    );
1479
52
                }
1480
0
                tok_backup(tok, c);
1481
0
                TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
1482
0
                current_tok->in_format_spec = 0;
1483
0
                p_start = tok->start;
1484
0
                p_end = tok->cur;
1485
0
                return MAKE_TOKEN(FTSTRING_MIDDLE(current_tok));
1486
52
            }
1487
1488
436
            assert(tok->multi_line_start != NULL);
1489
            // shift the tok_state's location into
1490
            // the start of string, and report the error
1491
            // from the initial quote character
1492
384
            tok->cur = (char *)current_tok->start;
1493
384
            tok->cur++;
1494
384
            tok->line_start = current_tok->multi_line_start;
1495
384
            int start = tok->lineno;
1496
1497
384
            tokenizer_mode *the_current_tok = TOK_GET_MODE(tok);
1498
384
            tok->lineno = the_current_tok->first_line;
1499
1500
384
            if (current_tok->quote_size == 3) {
1501
37
                _PyTokenizer_syntaxerror(tok,
1502
37
                                    "unterminated triple-quoted %c-string literal"
1503
37
                                    " (detected at line %d)",
1504
37
                                    TOK_GET_STRING_PREFIX(tok), start);
1505
37
                if (c != '\n') {
1506
37
                    tok->done = E_EOFS;
1507
37
                }
1508
37
                return MAKE_TOKEN(ERRORTOKEN);
1509
37
            }
1510
347
            else {
1511
347
                return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
1512
347
                                    "unterminated %c-string literal (detected at"
1513
347
                                    " line %d)", TOK_GET_STRING_PREFIX(tok), start));
1514
347
            }
1515
384
        }
1516
1517
155k
        if (c == current_tok->quote) {
1518
8.36k
            end_quote_size += 1;
1519
8.36k
            continue;
1520
147k
        } else {
1521
147k
            end_quote_size = 0;
1522
147k
        }
1523
1524
147k
        if (c == '{') {
1525
15.2k
            if (!_PyLexer_update_ftstring_expr(tok, c)) {
1526
0
                return MAKE_TOKEN(ENDMARKER);
1527
0
            }
1528
15.2k
            int peek = tok_nextc(tok);
1529
15.2k
            if (peek != '{' || in_format_spec) {
1530
13.0k
                tok_backup(tok, peek);
1531
13.0k
                tok_backup(tok, c);
1532
13.0k
                current_tok->curly_bracket_expr_start_depth++;
1533
13.0k
                if (current_tok->curly_bracket_expr_start_depth >= MAX_EXPR_NESTING) {
1534
5
                    return MAKE_TOKEN(_PyTokenizer_syntaxerror(tok,
1535
5
                        "%c-string: expressions nested too deeply", TOK_GET_STRING_PREFIX(tok)));
1536
5
                }
1537
13.0k
                TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
1538
13.0k
                current_tok->in_format_spec = 0;
1539
13.0k
                p_start = tok->start;
1540
13.0k
                p_end = tok->cur;
1541
13.0k
            } else {
1542
2.12k
                p_start = tok->start;
1543
2.12k
                p_end = tok->cur - 1;
1544
2.12k
            }
1545
15.1k
            return MAKE_TOKEN(FTSTRING_MIDDLE(current_tok));
1546
132k
        } else if (c == '}') {
1547
4.98k
            if (unicode_escape) {
1548
340
                p_start = tok->start;
1549
340
                p_end = tok->cur;
1550
340
                return MAKE_TOKEN(FTSTRING_MIDDLE(current_tok));
1551
340
            }
1552
4.64k
            int peek = tok_nextc(tok);
1553
1554
            // The tokenizer can only be in the format spec if we have already completed the expression
1555
            // scanning (indicated by the end of the expression being set) and we are not at the top level
1556
            // of the bracket stack (-1 is the top level). Since format specifiers can't legally use double
1557
            // brackets, we can bypass it here.
1558
4.64k
            int cursor = current_tok->curly_bracket_depth;
1559
4.64k
            if (peek == '}' && !in_format_spec && cursor == 0) {
1560
1.63k
                p_start = tok->start;
1561
1.63k
                p_end = tok->cur - 1;
1562
3.01k
            } else {
1563
3.01k
                tok_backup(tok, peek);
1564
3.01k
                tok_backup(tok, c);
1565
3.01k
                TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
1566
3.01k
                current_tok->in_format_spec = 0;
1567
3.01k
                p_start = tok->start;
1568
3.01k
                p_end = tok->cur;
1569
3.01k
            }
1570
4.64k
            return MAKE_TOKEN(FTSTRING_MIDDLE(current_tok));
1571
127k
        } else if (c == '\\') {
1572
5.36k
            int peek = tok_nextc(tok);
1573
5.36k
            if (peek == '\r') {
1574
67
                peek = tok_nextc(tok);
1575
67
            }
1576
            // Special case when the backslash is right before a curly
1577
            // brace. We have to restore and return the control back
1578
            // to the loop for the next iteration.
1579
5.36k
            if (peek == '{' || peek == '}') {
1580
1.27k
                if (!current_tok->raw) {
1581
1.08k
                    if (_PyTokenizer_warn_invalid_escape_sequence(tok, peek)) {
1582
1
                        return MAKE_TOKEN(ERRORTOKEN);
1583
1
                    }
1584
1.08k
                }
1585
1.27k
                tok_backup(tok, peek);
1586
1.27k
                continue;
1587
1.27k
            }
1588
1589
4.08k
            if (!current_tok->raw) {
1590
3.96k
                if (peek == 'N') {
1591
                    /* Handle named unicode escapes (\N{BULLET}) */
1592
467
                    peek = tok_nextc(tok);
1593
467
                    if (peek == '{') {
1594
380
                        unicode_escape = 1;
1595
380
                    } else {
1596
87
                        tok_backup(tok, peek);
1597
87
                    }
1598
467
                }
1599
3.96k
            } /* else {
1600
                skip the escaped character
1601
            }*/
1602
4.08k
        }
1603
147k
    }
1604
1605
    // Backup the f-string quotes to emit a final FSTRING_MIDDLE and
1606
    // add the quotes to the FSTRING_END in the next tokenizer iteration.
1607
11.4k
    for (int i = 0; i < current_tok->quote_size; i++) {
1608
6.13k
        tok_backup(tok, current_tok->quote);
1609
6.13k
    }
1610
5.31k
    p_start = tok->start;
1611
5.31k
    p_end = tok->cur;
1612
5.31k
    return MAKE_TOKEN(FTSTRING_MIDDLE(current_tok));
1613
25.9k
}
1614
1615
static int
1616
tok_get(struct tok_state *tok, struct token *token)
1617
1.75M
{
1618
1.75M
    tokenizer_mode *current_tok = TOK_GET_MODE(tok);
1619
1.75M
    if (current_tok->kind == TOK_REGULAR_MODE) {
1620
1.70M
        return tok_get_normal_mode(tok, current_tok, token);
1621
1.70M
    } else {
1622
50.5k
        return tok_get_fstring_mode(tok, current_tok, token);
1623
50.5k
    }
1624
1.75M
}
1625
1626
int
1627
_PyTokenizer_Get(struct tok_state *tok, struct token *token)
1628
1.75M
{
1629
1.75M
    int result = tok_get(tok, token);
1630
1.75M
    if (tok->decoding_erred) {
1631
0
        result = ERRORTOKEN;
1632
0
        tok->done = E_DECODE;
1633
0
    }
1634
1.75M
    return result;
1635
1.75M
}