Coverage Report

Created: 2026-03-08 06:40

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Parser/tokenizer/helpers.c
Line
Count
Source
1
#include "Python.h"
2
#include "errcode.h"
3
#include "pycore_token.h"
4
5
#include "../lexer/state.h"
6
7
8
/* ############## ERRORS ############## */
9
10
static int
11
_syntaxerror_range(struct tok_state *tok, const char *format,
12
                   int col_offset, int end_col_offset,
13
                   va_list vargs)
14
2.76k
{
15
    // In release builds, we don't want to overwrite a previous error, but in debug builds we
16
    // want to fail if we are not doing it so we can fix it.
17
2.76k
    assert(tok->done != E_ERROR);
18
2.76k
    if (tok->done == E_ERROR) {
19
0
        return ERRORTOKEN;
20
0
    }
21
2.76k
    PyObject *errmsg, *errtext, *args;
22
2.76k
    errmsg = PyUnicode_FromFormatV(format, vargs);
23
2.76k
    if (!errmsg) {
24
0
        goto error;
25
0
    }
26
27
2.76k
    errtext = PyUnicode_DecodeUTF8(tok->line_start, tok->cur - tok->line_start,
28
2.76k
                                   "replace");
29
2.76k
    if (!errtext) {
30
0
        goto error;
31
0
    }
32
33
2.76k
    if (col_offset == -1) {
34
2.21k
        col_offset = (int)PyUnicode_GET_LENGTH(errtext);
35
2.21k
    }
36
2.76k
    if (end_col_offset == -1) {
37
2.21k
        end_col_offset = col_offset;
38
2.21k
    }
39
40
2.76k
    Py_ssize_t line_len = strcspn(tok->line_start, "\n");
41
2.76k
    if (line_len != tok->cur - tok->line_start) {
42
2.12k
        Py_DECREF(errtext);
43
2.12k
        errtext = PyUnicode_DecodeUTF8(tok->line_start, line_len,
44
2.12k
                                       "replace");
45
2.12k
    }
46
2.76k
    if (!errtext) {
47
0
        goto error;
48
0
    }
49
50
2.76k
    args = Py_BuildValue("(O(OiiNii))", errmsg,
51
2.76k
                         tok->filename ? tok->filename : Py_None,
52
2.76k
                         tok->lineno, col_offset, errtext,
53
2.76k
                         tok->lineno, end_col_offset);
54
2.76k
    if (args) {
55
2.76k
        PyErr_SetObject(PyExc_SyntaxError, args);
56
2.76k
        Py_DECREF(args);
57
2.76k
    }
58
59
2.76k
error:
60
2.76k
    Py_XDECREF(errmsg);
61
2.76k
    tok->done = E_ERROR;
62
2.76k
    return ERRORTOKEN;
63
2.76k
}
64
65
int
66
_PyTokenizer_syntaxerror(struct tok_state *tok, const char *format, ...)
67
2.21k
{
68
    // These errors are cleaned on startup. Todo: Fix it.
69
2.21k
    va_list vargs;
70
2.21k
    va_start(vargs, format);
71
2.21k
    int ret = _syntaxerror_range(tok, format, -1, -1, vargs);
72
2.21k
    va_end(vargs);
73
2.21k
    return ret;
74
2.21k
}
75
76
int
77
_PyTokenizer_syntaxerror_known_range(struct tok_state *tok,
78
                        int col_offset, int end_col_offset,
79
                        const char *format, ...)
80
554
{
81
554
    va_list vargs;
82
554
    va_start(vargs, format);
83
554
    int ret = _syntaxerror_range(tok, format, col_offset, end_col_offset, vargs);
84
554
    va_end(vargs);
85
554
    return ret;
86
554
}
87
88
int
89
_PyTokenizer_indenterror(struct tok_state *tok)
90
4
{
91
4
    tok->done = E_TABSPACE;
92
4
    tok->cur = tok->inp;
93
4
    return ERRORTOKEN;
94
4
}
95
96
char *
97
_PyTokenizer_error_ret(struct tok_state *tok) /* XXX */
98
2.07k
{
99
2.07k
    tok->decoding_erred = 1;
100
2.07k
    if ((tok->fp != NULL || tok->readline != NULL) && tok->buf != NULL) {/* see _PyTokenizer_Free */
101
0
        PyMem_Free(tok->buf);
102
0
    }
103
2.07k
    tok->buf = tok->cur = tok->inp = NULL;
104
2.07k
    tok->start = NULL;
105
2.07k
    tok->end = NULL;
106
2.07k
    tok->done = E_DECODE;
107
2.07k
    return NULL;                /* as if it were EOF */
108
2.07k
}
109
110
int
111
_PyTokenizer_warn_invalid_escape_sequence(struct tok_state *tok, int first_invalid_escape_char)
112
957
{
113
957
    if (!tok->report_warnings) {
114
0
        return 0;
115
0
    }
116
117
957
    PyObject *msg = PyUnicode_FromFormat(
118
957
        "\"\\%c\" is an invalid escape sequence. "
119
957
        "Such sequences will not work in the future. "
120
957
        "Did you mean \"\\\\%c\"? A raw string is also an option.",
121
957
        (char) first_invalid_escape_char,
122
957
        (char) first_invalid_escape_char
123
957
    );
124
125
957
    if (msg == NULL) {
126
0
        return -1;
127
0
    }
128
129
957
    if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg, tok->filename,
130
957
                                 tok->lineno, tok->module, NULL) < 0) {
131
0
        Py_DECREF(msg);
132
133
0
        if (PyErr_ExceptionMatches(PyExc_SyntaxWarning)) {
134
            /* Replace the SyntaxWarning exception with a SyntaxError
135
               to get a more accurate error report */
136
0
            PyErr_Clear();
137
138
0
            return _PyTokenizer_syntaxerror(tok,
139
0
                "\"\\%c\" is an invalid escape sequence. "
140
0
                "Did you mean \"\\\\%c\"? A raw string is also an option.",
141
0
                (char) first_invalid_escape_char,
142
0
                (char) first_invalid_escape_char);
143
0
        }
144
145
0
        return -1;
146
0
    }
147
148
957
    Py_DECREF(msg);
149
957
    return 0;
150
957
}
151
152
int
153
_PyTokenizer_parser_warn(struct tok_state *tok, PyObject *category, const char *format, ...)
154
7.95k
{
155
7.95k
    if (!tok->report_warnings) {
156
0
        return 0;
157
0
    }
158
159
7.95k
    PyObject *errmsg;
160
7.95k
    va_list vargs;
161
7.95k
    va_start(vargs, format);
162
7.95k
    errmsg = PyUnicode_FromFormatV(format, vargs);
163
7.95k
    va_end(vargs);
164
7.95k
    if (!errmsg) {
165
0
        goto error;
166
0
    }
167
168
7.95k
    if (PyErr_WarnExplicitObject(category, errmsg, tok->filename,
169
7.95k
                                 tok->lineno, tok->module, NULL) < 0) {
170
0
        if (PyErr_ExceptionMatches(category)) {
171
            /* Replace the DeprecationWarning exception with a SyntaxError
172
               to get a more accurate error report */
173
0
            PyErr_Clear();
174
0
            _PyTokenizer_syntaxerror(tok, "%U", errmsg);
175
0
        }
176
0
        goto error;
177
0
    }
178
7.95k
    Py_DECREF(errmsg);
179
7.95k
    return 0;
180
181
0
error:
182
0
    Py_XDECREF(errmsg);
183
0
    tok->done = E_ERROR;
184
0
    return -1;
185
7.95k
}
186
187
188
/* ############## STRING MANIPULATION ############## */
189
190
char *
191
_PyTokenizer_new_string(const char *s, Py_ssize_t len, struct tok_state *tok)
192
96.0k
{
193
96.0k
    char* result = (char *)PyMem_Malloc(len + 1);
194
96.0k
    if (!result) {
195
0
        tok->done = E_NOMEM;
196
0
        return NULL;
197
0
    }
198
96.0k
    memcpy(result, s, len);
199
96.0k
    result[len] = '\0';
200
96.0k
    return result;
201
96.0k
}
202
203
PyObject *
204
3.32k
_PyTokenizer_translate_into_utf8(const char* str, const char* enc) {
205
3.32k
    PyObject *utf8;
206
3.32k
    PyObject* buf = PyUnicode_Decode(str, strlen(str), enc, NULL);
207
3.32k
    if (buf == NULL)
208
1.52k
        return NULL;
209
1.79k
    utf8 = PyUnicode_AsUTF8String(buf);
210
1.79k
    Py_DECREF(buf);
211
1.79k
    return utf8;
212
3.32k
}
213
214
char *
215
_PyTokenizer_translate_newlines(const char *s, int exec_input, int preserve_crlf,
216
112k
                   struct tok_state *tok) {
217
112k
    int skip_next_lf = 0;
218
112k
    size_t needed_length = strlen(s) + 2, final_length;
219
112k
    char *buf, *current;
220
112k
    char c = '\0';
221
112k
    buf = PyMem_Malloc(needed_length);
222
112k
    if (buf == NULL) {
223
0
        tok->done = E_NOMEM;
224
0
        return NULL;
225
0
    }
226
7.84M
    for (current = buf; *s; s++, current++) {
227
7.73M
        c = *s;
228
7.73M
        if (skip_next_lf) {
229
8.09k
            skip_next_lf = 0;
230
8.09k
            if (c == '\n') {
231
317
                c = *++s;
232
317
                if (!c)
233
12
                    break;
234
317
            }
235
8.09k
        }
236
7.73M
        if (!preserve_crlf && c == '\r') {
237
8.14k
            skip_next_lf = 1;
238
8.14k
            c = '\n';
239
8.14k
        }
240
7.73M
        *current = c;
241
7.73M
    }
242
    /* If this is exec input, add a newline to the end of the string if
243
       there isn't one already. */
244
112k
    if (exec_input && c != '\n' && c != '\0') {
245
110k
        *current = '\n';
246
110k
        current++;
247
110k
    }
248
112k
    *current = '\0';
249
112k
    final_length = current - buf + 1;
250
112k
    if (final_length < needed_length && final_length) {
251
        /* should never fail */
252
2.75k
        char* result = PyMem_Realloc(buf, final_length);
253
2.75k
        if (result == NULL) {
254
0
            PyMem_Free(buf);
255
0
        }
256
2.75k
        buf = result;
257
2.75k
    }
258
112k
    return buf;
259
112k
}
260
261
/* ############## ENCODING STUFF ############## */
262
263
264
/* See whether the file starts with a BOM. If it does,
265
   invoke the set_readline function with the new encoding.
266
   Return 1 on success, 0 on failure.  */
267
int
268
_PyTokenizer_check_bom(int get_char(struct tok_state *),
269
          void unget_char(int, struct tok_state *),
270
          int set_readline(struct tok_state *, const char *),
271
          struct tok_state *tok)
272
18.3k
{
273
18.3k
    int ch1, ch2, ch3;
274
18.3k
    ch1 = get_char(tok);
275
18.3k
    tok->decoding_state = STATE_SEEK_CODING;
276
18.3k
    if (ch1 == EOF) {
277
0
        return 1;
278
18.3k
    } else if (ch1 == 0xEF) {
279
56
        ch2 = get_char(tok);
280
56
        if (ch2 != 0xBB) {
281
49
            unget_char(ch2, tok);
282
49
            unget_char(ch1, tok);
283
49
            return 1;
284
49
        }
285
7
        ch3 = get_char(tok);
286
7
        if (ch3 != 0xBF) {
287
3
            unget_char(ch3, tok);
288
3
            unget_char(ch2, tok);
289
3
            unget_char(ch1, tok);
290
3
            return 1;
291
3
        }
292
18.3k
    } else {
293
18.3k
        unget_char(ch1, tok);
294
18.3k
        return 1;
295
18.3k
    }
296
4
    if (tok->encoding != NULL)
297
0
        PyMem_Free(tok->encoding);
298
4
    tok->encoding = _PyTokenizer_new_string("utf-8", 5, tok);
299
4
    if (!tok->encoding)
300
0
        return 0;
301
    /* No need to set_readline: input is already utf-8 */
302
4
    return 1;
303
4
}
304
305
static const char *
306
get_normal_name(const char *s)  /* for utf-8 and latin-1 */
307
3.32k
{
308
3.32k
    char buf[13];
309
3.32k
    int i;
310
21.7k
    for (i = 0; i < 12; i++) {
311
21.5k
        int c = s[i];
312
21.5k
        if (c == '\0')
313
3.14k
            break;
314
18.4k
        else if (c == '_')
315
482
            buf[i] = '-';
316
17.9k
        else
317
17.9k
            buf[i] = Py_TOLOWER(c);
318
21.5k
    }
319
3.32k
    buf[i] = '\0';
320
3.32k
    if (strcmp(buf, "utf-8") == 0 ||
321
3.32k
        strncmp(buf, "utf-8-", 6) == 0)
322
2
        return "utf-8";
323
3.32k
    else if (strcmp(buf, "latin-1") == 0 ||
324
3.32k
             strcmp(buf, "iso-8859-1") == 0 ||
325
3.32k
             strcmp(buf, "iso-latin-1") == 0 ||
326
3.32k
             strncmp(buf, "latin-1-", 8) == 0 ||
327
3.32k
             strncmp(buf, "iso-8859-1-", 11) == 0 ||
328
3.31k
             strncmp(buf, "iso-latin-1-", 12) == 0)
329
11
        return "iso-8859-1";
330
3.31k
    else
331
3.31k
        return s;
332
3.32k
}
333
334
/* Return the coding spec in S, or NULL if none is found.  */
335
static int
336
get_coding_spec(const char *s, char **spec, Py_ssize_t size, struct tok_state *tok)
337
18.9k
{
338
18.9k
    Py_ssize_t i;
339
18.9k
    *spec = NULL;
340
    /* Coding spec must be in a comment, and that comment must be
341
     * the only statement on the source code line. */
342
20.2k
    for (i = 0; i < size - 6; i++) {
343
16.3k
        if (s[i] == '#')
344
3.53k
            break;
345
12.8k
        if (s[i] != ' ' && s[i] != '\t' && s[i] != '\014')
346
11.5k
            return 1;
347
12.8k
    }
348
190k
    for (; i < size - 6; i++) { /* XXX inefficient search */
349
186k
        const char* t = s + i;
350
186k
        if (memcmp(t, "coding", 6) == 0) {
351
3.85k
            const char* begin = NULL;
352
3.85k
            t += 6;
353
3.85k
            if (t[0] != ':' && t[0] != '=')
354
249
                continue;
355
4.16k
            do {
356
4.16k
                t++;
357
4.16k
            } while (t[0] == ' ' || t[0] == '\t');
358
359
3.60k
            begin = t;
360
25.8k
            while (Py_ISALNUM(t[0]) ||
361
5.89k
                   t[0] == '-' || t[0] == '_' || t[0] == '.')
362
22.2k
                t++;
363
364
3.60k
            if (begin < t) {
365
3.32k
                char* r = _PyTokenizer_new_string(begin, t - begin, tok);
366
3.32k
                const char* q;
367
3.32k
                if (!r)
368
0
                    return 0;
369
3.32k
                q = get_normal_name(r);
370
3.32k
                if (r != q) {
371
13
                    PyMem_Free(r);
372
13
                    r = _PyTokenizer_new_string(q, strlen(q), tok);
373
13
                    if (!r)
374
0
                        return 0;
375
13
                }
376
3.32k
                *spec = r;
377
3.32k
                break;
378
3.32k
            }
379
3.60k
        }
380
186k
    }
381
7.36k
    return 1;
382
7.36k
}
383
384
/* Check whether the line contains a coding spec. If it does,
385
   invoke the set_readline function for the new encoding.
386
   This function receives the tok_state and the new encoding.
387
   Return 1 on success, 0 on failure.  */
388
int
389
_PyTokenizer_check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
390
                  int set_readline(struct tok_state *, const char *))
391
18.9k
{
392
18.9k
    char *cs;
393
18.9k
    if (tok->cont_line) {
394
        /* It's a continuation line, so it can't be a coding spec. */
395
0
        tok->decoding_state = STATE_NORMAL;
396
0
        return 1;
397
0
    }
398
18.9k
    if (!get_coding_spec(line, &cs, size, tok)) {
399
0
        return 0;
400
0
    }
401
18.9k
    if (!cs) {
402
15.5k
        Py_ssize_t i;
403
17.0k
        for (i = 0; i < size; i++) {
404
16.7k
            if (line[i] == '#' || line[i] == '\n' || line[i] == '\r')
405
446
                break;
406
16.2k
            if (line[i] != ' ' && line[i] != '\t' && line[i] != '\014') {
407
                /* Stop checking coding spec after a line containing
408
                 * anything except a comment. */
409
14.8k
                tok->decoding_state = STATE_NORMAL;
410
14.8k
                break;
411
14.8k
            }
412
16.2k
        }
413
15.5k
        return 1;
414
15.5k
    }
415
3.32k
    tok->decoding_state = STATE_NORMAL;
416
3.32k
    if (tok->encoding == NULL) {
417
3.32k
        assert(tok->decoding_readline == NULL);
418
3.32k
        if (strcmp(cs, "utf-8") != 0 && !set_readline(tok, cs)) {
419
0
            _PyTokenizer_error_ret(tok);
420
0
            PyErr_Format(PyExc_SyntaxError, "encoding problem: %s", cs);
421
0
            PyMem_Free(cs);
422
0
            return 0;
423
0
        }
424
3.32k
        tok->encoding = cs;
425
3.32k
    } else {                /* then, compare cs with BOM */
426
2
        if (strcmp(tok->encoding, cs) != 0) {
427
2
            tok->line_start = line;
428
2
            tok->cur = (char *)line;
429
2
            assert(size <= INT_MAX);
430
2
            _PyTokenizer_syntaxerror_known_range(tok, 0, (int)size,
431
2
                        "encoding problem: %s with BOM", cs);
432
2
            PyMem_Free(cs);
433
2
            _PyTokenizer_error_ret(tok);
434
2
            return 0;
435
2
        }
436
0
        PyMem_Free(cs);
437
0
    }
438
3.32k
    return 1;
439
3.32k
}
440
441
/* Check whether the characters at s start a valid
442
   UTF-8 sequence. Return the number of characters forming
443
   the sequence if yes, 0 if not.  The special cases match
444
   those in stringlib/codecs.h:utf8_decode.
445
*/
446
static int
447
valid_utf8(const unsigned char* s)
448
4.43M
{
449
4.43M
    int expected = 0;
450
4.43M
    int length;
451
4.43M
    if (*s < 0x80) {
452
        /* single-byte code */
453
4.43M
        return 1;
454
4.43M
    }
455
8.55k
    else if (*s < 0xE0) {
456
        /* \xC2\x80-\xDF\xBF -- 0080-07FF */
457
3.27k
        if (*s < 0xC2) {
458
            /* invalid sequence
459
               \x80-\xBF -- continuation byte
460
               \xC0-\xC1 -- fake 0000-007F */
461
106
            return 0;
462
106
        }
463
3.16k
        expected = 1;
464
3.16k
    }
465
5.28k
    else if (*s < 0xF0) {
466
        /* \xE0\xA0\x80-\xEF\xBF\xBF -- 0800-FFFF */
467
2.36k
        if (*s == 0xE0 && *(s + 1) < 0xA0) {
468
            /* invalid sequence
469
               \xE0\x80\x80-\xE0\x9F\xBF -- fake 0000-0800 */
470
14
            return 0;
471
14
        }
472
2.35k
        else if (*s == 0xED && *(s + 1) >= 0xA0) {
473
            /* Decoding UTF-8 sequences in range \xED\xA0\x80-\xED\xBF\xBF
474
               will result in surrogates in range D800-DFFF. Surrogates are
475
               not valid UTF-8 so they are rejected.
476
               See https://www.unicode.org/versions/Unicode5.2.0/ch03.pdf
477
               (table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt */
478
21
            return 0;
479
21
        }
480
2.33k
        expected = 2;
481
2.33k
    }
482
2.91k
    else if (*s < 0xF5) {
483
        /* \xF0\x90\x80\x80-\xF4\x8F\xBF\xBF -- 10000-10FFFF */
484
2.87k
        if (*(s + 1) < 0x90 ? *s == 0xF0 : *s == 0xF4) {
485
            /* invalid sequence -- one of:
486
               \xF0\x80\x80\x80-\xF0\x8F\xBF\xBF -- fake 0000-FFFF
487
               \xF4\x90\x80\x80- -- 110000- overflow */
488
38
            return 0;
489
38
        }
490
2.84k
        expected = 3;
491
2.84k
    }
492
38
    else {
493
        /* invalid start byte */
494
38
        return 0;
495
38
    }
496
8.33k
    length = expected + 1;
497
24.1k
    for (int i = 1; i <= expected; i++) {
498
16.1k
        if (s[i] < 0x80 || s[i] >= 0xC0) {
499
299
            return 0;
500
299
        }
501
16.1k
    }
502
8.03k
    return length;
503
8.33k
}
504
505
int
506
_PyTokenizer_ensure_utf8(const char *line, struct tok_state *tok, int lineno)
507
15.3k
{
508
15.3k
    const char *badchar = NULL;
509
15.3k
    const char *c;
510
15.3k
    int length;
511
15.3k
    int col_offset = 0;
512
15.3k
    const char *line_start = line;
513
4.45M
    for (c = line; *c; c += length) {
514
4.43M
        if (!(length = valid_utf8((const unsigned char *)c))) {
515
516
            badchar = c;
516
516
            break;
517
516
        }
518
4.43M
        col_offset++;
519
4.43M
        if (*c == '\n') {
520
148k
            lineno++;
521
148k
            col_offset = 0;
522
148k
            line_start = c + 1;
523
148k
        }
524
4.43M
    }
525
15.3k
    if (badchar) {
526
516
        tok->lineno = lineno;
527
516
        tok->line_start = line_start;
528
516
        tok->cur = (char *)badchar;
529
516
        _PyTokenizer_syntaxerror_known_range(tok,
530
516
                col_offset + 1, col_offset + 1,
531
516
                "Non-UTF-8 code starting with '\\x%.2x'"
532
516
                "%s%V on line %i, "
533
516
                "but no encoding declared; "
534
516
                "see https://peps.python.org/pep-0263/ for details",
535
516
                (unsigned char)*badchar,
536
516
                tok->filename ? " in file " : "", tok->filename, "",
537
516
                lineno);
538
516
        return 0;
539
516
    }
540
14.8k
    return 1;
541
15.3k
}
542
543
544
/* ############## DEBUGGING STUFF ############## */
545
546
#ifdef Py_DEBUG
547
void
548
_PyTokenizer_print_escape(FILE *f, const char *s, Py_ssize_t size)
549
{
550
    if (s == NULL) {
551
        fputs("NULL", f);
552
        return;
553
    }
554
    putc('"', f);
555
    while (size-- > 0) {
556
        unsigned char c = *s++;
557
        switch (c) {
558
            case '\n': fputs("\\n", f); break;
559
            case '\r': fputs("\\r", f); break;
560
            case '\t': fputs("\\t", f); break;
561
            case '\f': fputs("\\f", f); break;
562
            case '\'': fputs("\\'", f); break;
563
            case '"': fputs("\\\"", f); break;
564
            default:
565
                if (0x20 <= c && c <= 0x7f)
566
                    putc(c, f);
567
                else
568
                    fprintf(f, "\\x%02x", c);
569
        }
570
    }
571
    putc('"', f);
572
}
573
574
void
575
_PyTokenizer_tok_dump(int type, char *start, char *end)
576
{
577
    fprintf(stderr, "%s", _PyParser_TokenNames[type]);
578
    if (type == NAME || type == NUMBER || type == STRING || type == OP)
579
        fprintf(stderr, "(%.*s)", (int)(end - start), start);
580
}
581
#endif