Coverage Report

Created: 2025-11-30 06:38

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/cpython/Parser/tokenizer/helpers.c
Line
Count
Source
1
#include "Python.h"
2
#include "errcode.h"
3
#include "pycore_token.h"
4
5
#include "../lexer/state.h"
6
7
8
/* ############## ERRORS ############## */
9
10
static int
11
_syntaxerror_range(struct tok_state *tok, const char *format,
12
                   int col_offset, int end_col_offset,
13
                   va_list vargs)
14
2.90k
{
15
    // In release builds, we don't want to overwrite a previous error, but in debug builds we
16
    // want to fail if we are not doing it so we can fix it.
17
2.90k
    assert(tok->done != E_ERROR);
18
2.90k
    if (tok->done == E_ERROR) {
19
0
        return ERRORTOKEN;
20
0
    }
21
2.90k
    PyObject *errmsg, *errtext, *args;
22
2.90k
    errmsg = PyUnicode_FromFormatV(format, vargs);
23
2.90k
    if (!errmsg) {
24
0
        goto error;
25
0
    }
26
27
2.90k
    errtext = PyUnicode_DecodeUTF8(tok->line_start, tok->cur - tok->line_start,
28
2.90k
                                   "replace");
29
2.90k
    if (!errtext) {
30
0
        goto error;
31
0
    }
32
33
2.90k
    if (col_offset == -1) {
34
2.31k
        col_offset = (int)PyUnicode_GET_LENGTH(errtext);
35
2.31k
    }
36
2.90k
    if (end_col_offset == -1) {
37
2.31k
        end_col_offset = col_offset;
38
2.31k
    }
39
40
2.90k
    Py_ssize_t line_len = strcspn(tok->line_start, "\n");
41
2.90k
    if (line_len != tok->cur - tok->line_start) {
42
2.14k
        Py_DECREF(errtext);
43
2.14k
        errtext = PyUnicode_DecodeUTF8(tok->line_start, line_len,
44
2.14k
                                       "replace");
45
2.14k
    }
46
2.90k
    if (!errtext) {
47
0
        goto error;
48
0
    }
49
50
2.90k
    args = Py_BuildValue("(O(OiiNii))", errmsg,
51
2.90k
                         tok->filename ? tok->filename : Py_None,
52
2.90k
                         tok->lineno, col_offset, errtext,
53
2.90k
                         tok->lineno, end_col_offset);
54
2.90k
    if (args) {
55
2.90k
        PyErr_SetObject(PyExc_SyntaxError, args);
56
2.90k
        Py_DECREF(args);
57
2.90k
    }
58
59
2.90k
error:
60
2.90k
    Py_XDECREF(errmsg);
61
2.90k
    tok->done = E_ERROR;
62
2.90k
    return ERRORTOKEN;
63
2.90k
}
64
65
int
66
_PyTokenizer_syntaxerror(struct tok_state *tok, const char *format, ...)
67
2.31k
{
68
    // This errors are cleaned on startup. Todo: Fix it.
69
2.31k
    va_list vargs;
70
2.31k
    va_start(vargs, format);
71
2.31k
    int ret = _syntaxerror_range(tok, format, -1, -1, vargs);
72
2.31k
    va_end(vargs);
73
2.31k
    return ret;
74
2.31k
}
75
76
int
77
_PyTokenizer_syntaxerror_known_range(struct tok_state *tok,
78
                        int col_offset, int end_col_offset,
79
                        const char *format, ...)
80
590
{
81
590
    va_list vargs;
82
590
    va_start(vargs, format);
83
590
    int ret = _syntaxerror_range(tok, format, col_offset, end_col_offset, vargs);
84
590
    va_end(vargs);
85
590
    return ret;
86
590
}
87
88
int
89
_PyTokenizer_indenterror(struct tok_state *tok)
90
4
{
91
4
    tok->done = E_TABSPACE;
92
4
    tok->cur = tok->inp;
93
4
    return ERRORTOKEN;
94
4
}
95
96
char *
97
_PyTokenizer_error_ret(struct tok_state *tok) /* XXX */
98
2.17k
{
99
2.17k
    tok->decoding_erred = 1;
100
2.17k
    if ((tok->fp != NULL || tok->readline != NULL) && tok->buf != NULL) {/* see _PyTokenizer_Free */
101
0
        PyMem_Free(tok->buf);
102
0
    }
103
2.17k
    tok->buf = tok->cur = tok->inp = NULL;
104
2.17k
    tok->start = NULL;
105
2.17k
    tok->end = NULL;
106
2.17k
    tok->done = E_DECODE;
107
2.17k
    return NULL;                /* as if it were EOF */
108
2.17k
}
109
110
int
111
_PyTokenizer_warn_invalid_escape_sequence(struct tok_state *tok, int first_invalid_escape_char)
112
1.13k
{
113
1.13k
    if (!tok->report_warnings) {
114
0
        return 0;
115
0
    }
116
117
1.13k
    PyObject *msg = PyUnicode_FromFormat(
118
1.13k
        "\"\\%c\" is an invalid escape sequence. "
119
1.13k
        "Such sequences will not work in the future. "
120
1.13k
        "Did you mean \"\\\\%c\"? A raw string is also an option.",
121
1.13k
        (char) first_invalid_escape_char,
122
1.13k
        (char) first_invalid_escape_char
123
1.13k
    );
124
125
1.13k
    if (msg == NULL) {
126
0
        return -1;
127
0
    }
128
129
1.13k
    if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg, tok->filename,
130
1.13k
                                 tok->lineno, tok->module, NULL) < 0) {
131
1
        Py_DECREF(msg);
132
133
1
        if (PyErr_ExceptionMatches(PyExc_SyntaxWarning)) {
134
            /* Replace the SyntaxWarning exception with a SyntaxError
135
               to get a more accurate error report */
136
0
            PyErr_Clear();
137
138
0
            return _PyTokenizer_syntaxerror(tok,
139
0
                "\"\\%c\" is an invalid escape sequence. "
140
0
                "Did you mean \"\\\\%c\"? A raw string is also an option.",
141
0
                (char) first_invalid_escape_char,
142
0
                (char) first_invalid_escape_char);
143
0
        }
144
145
1
        return -1;
146
1
    }
147
148
1.13k
    Py_DECREF(msg);
149
1.13k
    return 0;
150
1.13k
}
151
152
int
153
_PyTokenizer_parser_warn(struct tok_state *tok, PyObject *category, const char *format, ...)
154
10.6k
{
155
10.6k
    if (!tok->report_warnings) {
156
0
        return 0;
157
0
    }
158
159
10.6k
    PyObject *errmsg;
160
10.6k
    va_list vargs;
161
10.6k
    va_start(vargs, format);
162
10.6k
    errmsg = PyUnicode_FromFormatV(format, vargs);
163
10.6k
    va_end(vargs);
164
10.6k
    if (!errmsg) {
165
0
        goto error;
166
0
    }
167
168
10.6k
    if (PyErr_WarnExplicitObject(category, errmsg, tok->filename,
169
10.6k
                                 tok->lineno, tok->module, NULL) < 0) {
170
0
        if (PyErr_ExceptionMatches(category)) {
171
            /* Replace the DeprecationWarning exception with a SyntaxError
172
               to get a more accurate error report */
173
0
            PyErr_Clear();
174
0
            _PyTokenizer_syntaxerror(tok, "%U", errmsg);
175
0
        }
176
0
        goto error;
177
0
    }
178
10.6k
    Py_DECREF(errmsg);
179
10.6k
    return 0;
180
181
0
error:
182
0
    Py_XDECREF(errmsg);
183
0
    tok->done = E_ERROR;
184
0
    return -1;
185
10.6k
}
186
187
188
/* ############## STRING MANIPULATION ############## */
189
190
char *
191
_PyTokenizer_new_string(const char *s, Py_ssize_t len, struct tok_state *tok)
192
3.94k
{
193
3.94k
    char* result = (char *)PyMem_Malloc(len + 1);
194
3.94k
    if (!result) {
195
0
        tok->done = E_NOMEM;
196
0
        return NULL;
197
0
    }
198
3.94k
    memcpy(result, s, len);
199
3.94k
    result[len] = '\0';
200
3.94k
    return result;
201
3.94k
}
202
203
PyObject *
204
3.81k
_PyTokenizer_translate_into_utf8(const char* str, const char* enc) {
205
3.81k
    PyObject *utf8;
206
3.81k
    PyObject* buf = PyUnicode_Decode(str, strlen(str), enc, NULL);
207
3.81k
    if (buf == NULL)
208
1.57k
        return NULL;
209
2.24k
    utf8 = PyUnicode_AsUTF8String(buf);
210
2.24k
    Py_DECREF(buf);
211
2.24k
    return utf8;
212
3.81k
}
213
214
char *
215
_PyTokenizer_translate_newlines(const char *s, int exec_input, int preserve_crlf,
216
20.9k
                   struct tok_state *tok) {
217
20.9k
    int skip_next_lf = 0;
218
20.9k
    size_t needed_length = strlen(s) + 2, final_length;
219
20.9k
    char *buf, *current;
220
20.9k
    char c = '\0';
221
20.9k
    buf = PyMem_Malloc(needed_length);
222
20.9k
    if (buf == NULL) {
223
0
        tok->done = E_NOMEM;
224
0
        return NULL;
225
0
    }
226
7.83M
    for (current = buf; *s; s++, current++) {
227
7.81M
        c = *s;
228
7.81M
        if (skip_next_lf) {
229
14.2k
            skip_next_lf = 0;
230
14.2k
            if (c == '\n') {
231
271
                c = *++s;
232
271
                if (!c)
233
3
                    break;
234
271
            }
235
14.2k
        }
236
7.81M
        if (!preserve_crlf && c == '\r') {
237
14.3k
            skip_next_lf = 1;
238
14.3k
            c = '\n';
239
14.3k
        }
240
7.81M
        *current = c;
241
7.81M
    }
242
    /* If this is exec input, add a newline to the end of the string if
243
       there isn't one already. */
244
20.9k
    if (exec_input && c != '\n' && c != '\0') {
245
20.2k
        *current = '\n';
246
20.2k
        current++;
247
20.2k
    }
248
20.9k
    *current = '\0';
249
20.9k
    final_length = current - buf + 1;
250
20.9k
    if (final_length < needed_length && final_length) {
251
        /* should never fail */
252
756
        char* result = PyMem_Realloc(buf, final_length);
253
756
        if (result == NULL) {
254
0
            PyMem_Free(buf);
255
0
        }
256
756
        buf = result;
257
756
    }
258
20.9k
    return buf;
259
20.9k
}
260
261
/* ############## ENCODING STUFF ############## */
262
263
264
/* See whether the file starts with a BOM. If it does,
265
   invoke the set_readline function with the new encoding.
266
   Return 1 on success, 0 on failure.  */
267
int
268
_PyTokenizer_check_bom(int get_char(struct tok_state *),
269
          void unget_char(int, struct tok_state *),
270
          int set_readline(struct tok_state *, const char *),
271
          struct tok_state *tok)
272
20.8k
{
273
20.8k
    int ch1, ch2, ch3;
274
20.8k
    ch1 = get_char(tok);
275
20.8k
    tok->decoding_state = STATE_SEEK_CODING;
276
20.8k
    if (ch1 == EOF) {
277
0
        return 1;
278
20.8k
    } else if (ch1 == 0xEF) {
279
61
        ch2 = get_char(tok);
280
61
        if (ch2 != 0xBB) {
281
54
            unget_char(ch2, tok);
282
54
            unget_char(ch1, tok);
283
54
            return 1;
284
54
        }
285
7
        ch3 = get_char(tok);
286
7
        if (ch3 != 0xBF) {
287
1
            unget_char(ch3, tok);
288
1
            unget_char(ch2, tok);
289
1
            unget_char(ch1, tok);
290
1
            return 1;
291
1
        }
292
20.7k
    } else {
293
20.7k
        unget_char(ch1, tok);
294
20.7k
        return 1;
295
20.7k
    }
296
6
    if (tok->encoding != NULL)
297
0
        PyMem_Free(tok->encoding);
298
6
    tok->encoding = _PyTokenizer_new_string("utf-8", 5, tok);
299
6
    if (!tok->encoding)
300
0
        return 0;
301
    /* No need to set_readline: input is already utf-8 */
302
6
    return 1;
303
6
}
304
305
static const char *
306
get_normal_name(const char *s)  /* for utf-8 and latin-1 */
307
3.82k
{
308
3.82k
    char buf[13];
309
3.82k
    int i;
310
23.9k
    for (i = 0; i < 12; i++) {
311
23.8k
        int c = s[i];
312
23.8k
        if (c == '\0')
313
3.68k
            break;
314
20.1k
        else if (c == '_')
315
451
            buf[i] = '-';
316
19.6k
        else
317
19.6k
            buf[i] = Py_TOLOWER(c);
318
23.8k
    }
319
3.82k
    buf[i] = '\0';
320
3.82k
    if (strcmp(buf, "utf-8") == 0 ||
321
3.82k
        strncmp(buf, "utf-8-", 6) == 0)
322
4
        return "utf-8";
323
3.81k
    else if (strcmp(buf, "latin-1") == 0 ||
324
3.81k
             strcmp(buf, "iso-8859-1") == 0 ||
325
3.81k
             strcmp(buf, "iso-latin-1") == 0 ||
326
3.81k
             strncmp(buf, "latin-1-", 8) == 0 ||
327
3.81k
             strncmp(buf, "iso-8859-1-", 11) == 0 ||
328
3.81k
             strncmp(buf, "iso-latin-1-", 12) == 0)
329
11
        return "iso-8859-1";
330
3.80k
    else
331
3.80k
        return s;
332
3.82k
}
333
334
/* Return the coding spec in S, or NULL if none is found.  */
335
static int
336
get_coding_spec(const char *s, char **spec, Py_ssize_t size, struct tok_state *tok)
337
21.4k
{
338
21.4k
    Py_ssize_t i;
339
21.4k
    *spec = NULL;
340
    /* Coding spec must be in a comment, and that comment must be
341
     * the only statement on the source code line. */
342
22.7k
    for (i = 0; i < size - 6; i++) {
343
18.1k
        if (s[i] == '#')
344
4.04k
            break;
345
14.1k
        if (s[i] != ' ' && s[i] != '\t' && s[i] != '\014')
346
12.7k
            return 1;
347
14.1k
    }
348
146k
    for (; i < size - 6; i++) { /* XXX inefficient search */
349
141k
        const char* t = s + i;
350
141k
        if (memcmp(t, "coding", 6) == 0) {
351
4.49k
            const char* begin = NULL;
352
4.49k
            t += 6;
353
4.49k
            if (t[0] != ':' && t[0] != '=')
354
207
                continue;
355
5.01k
            do {
356
5.01k
                t++;
357
5.01k
            } while (t[0] == ' ' || t[0] == '\t');
358
359
4.29k
            begin = t;
360
29.1k
            while (Py_ISALNUM(t[0]) ||
361
6.77k
                   t[0] == '-' || t[0] == '_' || t[0] == '.')
362
24.8k
                t++;
363
364
4.29k
            if (begin < t) {
365
3.82k
                char* r = _PyTokenizer_new_string(begin, t - begin, tok);
366
3.82k
                const char* q;
367
3.82k
                if (!r)
368
0
                    return 0;
369
3.82k
                q = get_normal_name(r);
370
3.82k
                if (r != q) {
371
15
                    PyMem_Free(r);
372
15
                    r = _PyTokenizer_new_string(q, strlen(q), tok);
373
15
                    if (!r)
374
0
                        return 0;
375
15
                }
376
3.82k
                *spec = r;
377
3.82k
                break;
378
3.82k
            }
379
4.29k
        }
380
141k
    }
381
8.64k
    return 1;
382
8.64k
}
383
384
/* Check whether the line contains a coding spec. If it does,
385
   invoke the set_readline function for the new encoding.
386
   This function receives the tok_state and the new encoding.
387
   Return 1 on success, 0 on failure.  */
388
int
389
_PyTokenizer_check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
390
                  int set_readline(struct tok_state *, const char *))
391
21.4k
{
392
21.4k
    char *cs;
393
21.4k
    if (tok->cont_line) {
394
        /* It's a continuation line, so it can't be a coding spec. */
395
0
        tok->decoding_state = STATE_NORMAL;
396
0
        return 1;
397
0
    }
398
21.4k
    if (!get_coding_spec(line, &cs, size, tok)) {
399
0
        return 0;
400
0
    }
401
21.4k
    if (!cs) {
402
17.6k
        Py_ssize_t i;
403
19.1k
        for (i = 0; i < size; i++) {
404
18.7k
            if (line[i] == '#' || line[i] == '\n' || line[i] == '\r')
405
516
                break;
406
18.2k
            if (line[i] != ' ' && line[i] != '\t' && line[i] != '\014') {
407
                /* Stop checking coding spec after a line containing
408
                 * anything except a comment. */
409
16.7k
                tok->decoding_state = STATE_NORMAL;
410
16.7k
                break;
411
16.7k
            }
412
18.2k
        }
413
17.6k
        return 1;
414
17.6k
    }
415
3.82k
    tok->decoding_state = STATE_NORMAL;
416
3.82k
    if (tok->encoding == NULL) {
417
3.82k
        assert(tok->decoding_readline == NULL);
418
3.82k
        if (strcmp(cs, "utf-8") != 0 && !set_readline(tok, cs)) {
419
0
            _PyTokenizer_error_ret(tok);
420
0
            PyErr_Format(PyExc_SyntaxError, "encoding problem: %s", cs);
421
0
            PyMem_Free(cs);
422
0
            return 0;
423
0
        }
424
3.82k
        tok->encoding = cs;
425
3.82k
    } else {                /* then, compare cs with BOM */
426
3
        if (strcmp(tok->encoding, cs) != 0) {
427
2
            tok->line_start = line;
428
2
            tok->cur = (char *)line;
429
2
            assert(size <= INT_MAX);
430
2
            _PyTokenizer_syntaxerror_known_range(tok, 0, (int)size,
431
2
                        "encoding problem: %s with BOM", cs);
432
2
            PyMem_Free(cs);
433
2
            _PyTokenizer_error_ret(tok);
434
2
            return 0;
435
2
        }
436
1
        PyMem_Free(cs);
437
1
    }
438
3.82k
    return 1;
439
3.82k
}
440
441
/* Check whether the characters at s start a valid
442
   UTF-8 sequence. Return the number of characters forming
443
   the sequence if yes, 0 if not.  The special cases match
444
   those in stringlib/codecs.h:utf8_decode.
445
*/
446
static int
447
valid_utf8(const unsigned char* s)
448
7.23M
{
449
7.23M
    int expected = 0;
450
7.23M
    int length;
451
7.23M
    if (*s < 0x80) {
452
        /* single-byte code */
453
7.22M
        return 1;
454
7.22M
    }
455
8.49k
    else if (*s < 0xE0) {
456
        /* \xC2\x80-\xDF\xBF -- 0080-07FF */
457
3.43k
        if (*s < 0xC2) {
458
            /* invalid sequence
459
               \x80-\xBF -- continuation byte
460
               \xC0-\xC1 -- fake 0000-007F */
461
114
            return 0;
462
114
        }
463
3.31k
        expected = 1;
464
3.31k
    }
465
5.06k
    else if (*s < 0xF0) {
466
        /* \xE0\xA0\x80-\xEF\xBF\xBF -- 0800-FFFF */
467
2.32k
        if (*s == 0xE0 && *(s + 1) < 0xA0) {
468
            /* invalid sequence
469
               \xE0\x80\x80-\xE0\x9F\xBF -- fake 0000-0800 */
470
21
            return 0;
471
21
        }
472
2.30k
        else if (*s == 0xED && *(s + 1) >= 0xA0) {
473
            /* Decoding UTF-8 sequences in range \xED\xA0\x80-\xED\xBF\xBF
474
               will result in surrogates in range D800-DFFF. Surrogates are
475
               not valid UTF-8 so they are rejected.
476
               See https://www.unicode.org/versions/Unicode5.2.0/ch03.pdf
477
               (table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt */
478
20
            return 0;
479
20
        }
480
2.28k
        expected = 2;
481
2.28k
    }
482
2.73k
    else if (*s < 0xF5) {
483
        /* \xF0\x90\x80\x80-\xF4\x8F\xBF\xBF -- 10000-10FFFF */
484
2.69k
        if (*(s + 1) < 0x90 ? *s == 0xF0 : *s == 0xF4) {
485
            /* invalid sequence -- one of:
486
               \xF0\x80\x80\x80-\xF0\x8F\xBF\xBF -- fake 0000-FFFF
487
               \xF4\x90\x80\x80- -- 110000- overflow */
488
57
            return 0;
489
57
        }
490
2.63k
        expected = 3;
491
2.63k
    }
492
45
    else {
493
        /* invalid start byte */
494
45
        return 0;
495
45
    }
496
8.23k
    length = expected + 1;
497
23.4k
    for (; expected; expected--)
498
15.4k
        if (s[expected] < 0x80 || s[expected] >= 0xC0)
499
303
            return 0;
500
7.93k
    return length;
501
8.23k
}
502
503
int
504
_PyTokenizer_ensure_utf8(const char *line, struct tok_state *tok, int lineno)
505
17.0k
{
506
17.0k
    const char *badchar = NULL;
507
17.0k
    const char *c;
508
17.0k
    int length;
509
17.0k
    int col_offset = 0;
510
17.0k
    const char *line_start = line;
511
7.24M
    for (c = line; *c; c += length) {
512
7.23M
        if (!(length = valid_utf8((const unsigned char *)c))) {
513
560
            badchar = c;
514
560
            break;
515
560
        }
516
7.23M
        col_offset++;
517
7.23M
        if (*c == '\n') {
518
244k
            lineno++;
519
244k
            col_offset = 0;
520
244k
            line_start = c + 1;
521
244k
        }
522
7.23M
    }
523
17.0k
    if (badchar) {
524
560
        tok->lineno = lineno;
525
560
        tok->line_start = line_start;
526
560
        tok->cur = (char *)badchar;
527
560
        _PyTokenizer_syntaxerror_known_range(tok,
528
560
                col_offset + 1, col_offset + 1,
529
560
                "Non-UTF-8 code starting with '\\x%.2x'"
530
560
                "%s%V on line %i, "
531
560
                "but no encoding declared; "
532
560
                "see https://peps.python.org/pep-0263/ for details",
533
560
                (unsigned char)*badchar,
534
560
                tok->filename ? " in file " : "", tok->filename, "",
535
560
                lineno);
536
560
        return 0;
537
560
    }
538
16.4k
    return 1;
539
17.0k
}
540
541
542
/* ############## DEBUGGING STUFF ############## */
543
544
#ifdef Py_DEBUG
545
void
546
_PyTokenizer_print_escape(FILE *f, const char *s, Py_ssize_t size)
547
{
548
    if (s == NULL) {
549
        fputs("NULL", f);
550
        return;
551
    }
552
    putc('"', f);
553
    while (size-- > 0) {
554
        unsigned char c = *s++;
555
        switch (c) {
556
            case '\n': fputs("\\n", f); break;
557
            case '\r': fputs("\\r", f); break;
558
            case '\t': fputs("\\t", f); break;
559
            case '\f': fputs("\\f", f); break;
560
            case '\'': fputs("\\'", f); break;
561
            case '"': fputs("\\\"", f); break;
562
            default:
563
                if (0x20 <= c && c <= 0x7f)
564
                    putc(c, f);
565
                else
566
                    fprintf(f, "\\x%02x", c);
567
        }
568
    }
569
    putc('"', f);
570
}
571
572
void
573
_PyTokenizer_tok_dump(int type, char *start, char *end)
574
{
575
    fprintf(stderr, "%s", _PyParser_TokenNames[type]);
576
    if (type == NAME || type == NUMBER || type == STRING || type == OP)
577
        fprintf(stderr, "(%.*s)", (int)(end - start), start);
578
}
579
#endif