/src/cpython/Python/clinic/Python-tokenize.c.h
Line | Count | Source (jump to first uncovered line) |
1 | | /*[clinic input] |
2 | | preserve |
3 | | [clinic start generated code]*/ |
4 | | |
5 | | #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) |
6 | | # include "pycore_gc.h" // PyGC_Head |
7 | | # include "pycore_runtime.h" // _Py_ID() |
8 | | #endif |
9 | | #include "pycore_modsupport.h" // _PyArg_UnpackKeywords() |
10 | | |
11 | | static PyObject * |
12 | | tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline, |
13 | | int extra_tokens, const char *encoding); |
14 | | |
15 | | static PyObject * |
16 | | tokenizeriter_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) |
17 | 0 | { |
18 | 0 | PyObject *return_value = NULL; |
19 | 0 | #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) |
20 | |
|
21 | 0 | #define NUM_KEYWORDS 2 |
22 | 0 | static struct { |
23 | 0 | PyGC_Head _this_is_not_used; |
24 | 0 | PyObject_VAR_HEAD |
25 | 0 | Py_hash_t ob_hash; |
26 | 0 | PyObject *ob_item[NUM_KEYWORDS]; |
27 | 0 | } _kwtuple = { |
28 | 0 | .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) |
29 | 0 | .ob_hash = -1, |
30 | 0 | .ob_item = { &_Py_ID(extra_tokens), &_Py_ID(encoding), }, |
31 | 0 | }; |
32 | 0 | #undef NUM_KEYWORDS |
33 | 0 | #define KWTUPLE (&_kwtuple.ob_base.ob_base) |
34 | |
|
35 | | #else // !Py_BUILD_CORE |
36 | | # define KWTUPLE NULL |
37 | | #endif // !Py_BUILD_CORE |
38 | |
|
39 | 0 | static const char * const _keywords[] = {"", "extra_tokens", "encoding", NULL}; |
40 | 0 | static _PyArg_Parser _parser = { |
41 | 0 | .keywords = _keywords, |
42 | 0 | .fname = "tokenizeriter", |
43 | 0 | .kwtuple = KWTUPLE, |
44 | 0 | }; |
45 | 0 | #undef KWTUPLE |
46 | 0 | PyObject *argsbuf[3]; |
47 | 0 | PyObject * const *fastargs; |
48 | 0 | Py_ssize_t nargs = PyTuple_GET_SIZE(args); |
49 | 0 | Py_ssize_t noptargs = nargs + (kwargs ? PyDict_GET_SIZE(kwargs) : 0) - 2; |
50 | 0 | PyObject *readline; |
51 | 0 | int extra_tokens; |
52 | 0 | const char *encoding = NULL; |
53 | |
|
54 | 0 | fastargs = _PyArg_UnpackKeywords(_PyTuple_CAST(args)->ob_item, nargs, kwargs, NULL, &_parser, |
55 | 0 | /*minpos*/ 1, /*maxpos*/ 1, /*minkw*/ 1, /*varpos*/ 0, argsbuf); |
56 | 0 | if (!fastargs) { |
57 | 0 | goto exit; |
58 | 0 | } |
59 | 0 | readline = fastargs[0]; |
60 | 0 | extra_tokens = PyObject_IsTrue(fastargs[1]); |
61 | 0 | if (extra_tokens < 0) { |
62 | 0 | goto exit; |
63 | 0 | } |
64 | 0 | if (!noptargs) { |
65 | 0 | goto skip_optional_kwonly; |
66 | 0 | } |
67 | 0 | if (!PyUnicode_Check(fastargs[2])) { |
68 | 0 | _PyArg_BadArgument("tokenizeriter", "argument 'encoding'", "str", fastargs[2]); |
69 | 0 | goto exit; |
70 | 0 | } |
71 | 0 | Py_ssize_t encoding_length; |
72 | 0 | encoding = PyUnicode_AsUTF8AndSize(fastargs[2], &encoding_length); |
73 | 0 | if (encoding == NULL) { |
74 | 0 | goto exit; |
75 | 0 | } |
76 | 0 | if (strlen(encoding) != (size_t)encoding_length) { |
77 | 0 | PyErr_SetString(PyExc_ValueError, "embedded null character"); |
78 | 0 | goto exit; |
79 | 0 | } |
80 | 0 | skip_optional_kwonly: |
81 | 0 | return_value = tokenizeriter_new_impl(type, readline, extra_tokens, encoding); |
82 | |
|
83 | 0 | exit: |
84 | 0 | return return_value; |
85 | 0 | } |
86 | | /*[clinic end generated code: output=4c448f34d9c835c0 input=a9049054013a1b77]*/ |