/src/Python-3.8.3/Python/pystate.c
Line | Count | Source (jump to first uncovered line) |
1 | | |
2 | | /* Thread and interpreter state structures and their interfaces */ |
3 | | |
4 | | #include "Python.h" |
5 | | #include "pycore_ceval.h" |
6 | | #include "pycore_initconfig.h" |
7 | | #include "pycore_pymem.h" |
8 | | #include "pycore_pystate.h" |
9 | | #include "pycore_pylifecycle.h" |
10 | | |
11 | | /* -------------------------------------------------------------------------- |
12 | | CAUTION |
13 | | |
14 | | Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A |
15 | | number of these functions are advertised as safe to call when the GIL isn't |
16 | | held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's |
17 | | debugging obmalloc functions. Those aren't thread-safe (they rely on the GIL |
18 | | to avoid the expense of doing their own locking). |
19 | | -------------------------------------------------------------------------- */ |
20 | | |
21 | | #ifdef HAVE_DLOPEN |
22 | | #ifdef HAVE_DLFCN_H |
23 | | #include <dlfcn.h> |
24 | | #endif |
25 | | #if !HAVE_DECL_RTLD_LAZY |
26 | | #define RTLD_LAZY 1 |
27 | | #endif |
28 | | #endif |
29 | | |
30 | | #ifdef __cplusplus |
31 | | extern "C" { |
32 | | #endif |
33 | | |
34 | | #define _PyRuntimeGILState_GetThreadState(gilstate) \ |
35 | 15.9k | ((PyThreadState*)_Py_atomic_load_relaxed(&(gilstate)->tstate_current)) |
36 | | #define _PyRuntimeGILState_SetThreadState(gilstate, value) \ |
37 | 15.9k | _Py_atomic_store_relaxed(&(gilstate)->tstate_current, \ |
38 | 15.9k | (uintptr_t)(value)) |
39 | | |
40 | | /* Forward declarations */ |
41 | | static PyThreadState *_PyGILState_GetThisThreadState(struct _gilstate_runtime_state *gilstate); |
42 | | static void _PyThreadState_Delete(_PyRuntimeState *runtime, PyThreadState *tstate); |
43 | | |
44 | | |
45 | | static PyStatus |
46 | | _PyRuntimeState_Init_impl(_PyRuntimeState *runtime) |
47 | 14 | { |
48 | | /* We preserve the hook across init, because there is |
49 | | currently no public API to set it between runtime |
50 | | initialization and interpreter initialization. */ |
51 | 14 | void *open_code_hook = runtime->open_code_hook; |
52 | 14 | void *open_code_userdata = runtime->open_code_userdata; |
53 | 14 | _Py_AuditHookEntry *audit_hook_head = runtime->audit_hook_head; |
54 | | |
55 | 14 | memset(runtime, 0, sizeof(*runtime)); |
56 | | |
57 | 14 | runtime->open_code_hook = open_code_hook; |
58 | 14 | runtime->open_code_userdata = open_code_userdata; |
59 | 14 | runtime->audit_hook_head = audit_hook_head; |
60 | | |
61 | 14 | _PyGC_Initialize(&runtime->gc); |
62 | 14 | _PyEval_Initialize(&runtime->ceval); |
63 | | |
64 | 14 | PyPreConfig_InitPythonConfig(&runtime->preconfig); |
65 | | |
66 | 14 | runtime->gilstate.check_enabled = 1; |
67 | | |
68 | | /* A TSS key must be initialized with Py_tss_NEEDS_INIT |
69 | | in accordance with the specification. */ |
70 | 14 | Py_tss_t initial = Py_tss_NEEDS_INIT; |
71 | 14 | runtime->gilstate.autoTSSkey = initial; |
72 | | |
73 | 14 | runtime->interpreters.mutex = PyThread_allocate_lock(); |
74 | 14 | if (runtime->interpreters.mutex == NULL) { |
75 | 0 | return _PyStatus_ERR("Can't initialize threads for interpreter"); |
76 | 0 | } |
77 | 14 | runtime->interpreters.next_id = -1; |
78 | | |
79 | 14 | runtime->xidregistry.mutex = PyThread_allocate_lock(); |
80 | 14 | if (runtime->xidregistry.mutex == NULL) { |
81 | 0 | return _PyStatus_ERR("Can't initialize threads for cross-interpreter data registry"); |
82 | 0 | } |
83 | | |
84 | | // Set it to the ID of the main thread of the main interpreter. |
85 | 14 | runtime->main_thread = PyThread_get_thread_ident(); |
86 | | |
87 | 14 | return _PyStatus_OK(); |
88 | 14 | } |
89 | | |
90 | | PyStatus |
91 | | _PyRuntimeState_Init(_PyRuntimeState *runtime) |
92 | 14 | { |
93 | | /* Force default allocator, since _PyRuntimeState_Fini() must |
94 | | use the same allocator than this function. */ |
95 | 14 | PyMemAllocatorEx old_alloc; |
96 | 14 | _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc); |
97 | | |
98 | 14 | PyStatus status = _PyRuntimeState_Init_impl(runtime); |
99 | | |
100 | 14 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc); |
101 | 14 | return status; |
102 | 14 | } |
103 | | |
104 | | void |
105 | | _PyRuntimeState_Fini(_PyRuntimeState *runtime) |
106 | 0 | { |
107 | | /* Force the allocator used by _PyRuntimeState_Init(). */ |
108 | 0 | PyMemAllocatorEx old_alloc; |
109 | 0 | _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc); |
110 | |
|
111 | 0 | if (runtime->interpreters.mutex != NULL) { |
112 | 0 | PyThread_free_lock(runtime->interpreters.mutex); |
113 | 0 | runtime->interpreters.mutex = NULL; |
114 | 0 | } |
115 | |
|
116 | 0 | if (runtime->xidregistry.mutex != NULL) { |
117 | 0 | PyThread_free_lock(runtime->xidregistry.mutex); |
118 | 0 | runtime->xidregistry.mutex = NULL; |
119 | 0 | } |
120 | |
|
121 | 0 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc); |
122 | 0 | } |
123 | | |
124 | | /* This function is called from PyOS_AfterFork_Child to ensure that |
125 | | * newly created child processes do not share locks with the parent. |
126 | | */ |
127 | | |
128 | | void |
129 | | _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime) |
130 | 0 | { |
131 | | // This was initially set in _PyRuntimeState_Init(). |
132 | 0 | runtime->main_thread = PyThread_get_thread_ident(); |
133 | | |
134 | | /* Force default allocator, since _PyRuntimeState_Fini() must |
135 | | use the same allocator than this function. */ |
136 | 0 | PyMemAllocatorEx old_alloc; |
137 | 0 | _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc); |
138 | |
|
139 | 0 | runtime->interpreters.mutex = PyThread_allocate_lock(); |
140 | 0 | runtime->interpreters.main->id_mutex = PyThread_allocate_lock(); |
141 | 0 | runtime->xidregistry.mutex = PyThread_allocate_lock(); |
142 | |
|
143 | 0 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc); |
144 | |
|
145 | 0 | if (runtime->interpreters.mutex == NULL) { |
146 | 0 | Py_FatalError("Can't initialize lock for runtime interpreters"); |
147 | 0 | } |
148 | | |
149 | 0 | if (runtime->interpreters.main->id_mutex == NULL) { |
150 | 0 | Py_FatalError("Can't initialize ID lock for main interpreter"); |
151 | 0 | } |
152 | | |
153 | 0 | if (runtime->xidregistry.mutex == NULL) { |
154 | 0 | Py_FatalError("Can't initialize lock for cross-interpreter data registry"); |
155 | 0 | } |
156 | 0 | } |
157 | | |
158 | | #define HEAD_LOCK(runtime) \ |
159 | 28 | PyThread_acquire_lock((runtime)->interpreters.mutex, WAIT_LOCK) |
160 | | #define HEAD_UNLOCK(runtime) \ |
161 | 28 | PyThread_release_lock((runtime)->interpreters.mutex) |
162 | | |
163 | | /* Forward declaration */ |
164 | | static void _PyGILState_NoteThreadState( |
165 | | struct _gilstate_runtime_state *gilstate, PyThreadState* tstate); |
166 | | |
167 | | PyStatus |
168 | | _PyInterpreterState_Enable(_PyRuntimeState *runtime) |
169 | 14 | { |
170 | 14 | struct pyinterpreters *interpreters = &runtime->interpreters; |
171 | 14 | interpreters->next_id = 0; |
172 | | |
173 | | /* Py_Finalize() calls _PyRuntimeState_Fini() which clears the mutex. |
174 | | Create a new mutex if needed. */ |
175 | 14 | if (interpreters->mutex == NULL) { |
176 | | /* Force default allocator, since _PyRuntimeState_Fini() must |
177 | | use the same allocator than this function. */ |
178 | 0 | PyMemAllocatorEx old_alloc; |
179 | 0 | _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc); |
180 | |
|
181 | 0 | interpreters->mutex = PyThread_allocate_lock(); |
182 | |
|
183 | 0 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc); |
184 | |
|
185 | 0 | if (interpreters->mutex == NULL) { |
186 | 0 | return _PyStatus_ERR("Can't initialize threads for interpreter"); |
187 | 0 | } |
188 | 0 | } |
189 | | |
190 | 14 | return _PyStatus_OK(); |
191 | 14 | } |
192 | | |
193 | | PyInterpreterState * |
194 | | PyInterpreterState_New(void) |
195 | 14 | { |
196 | 14 | if (PySys_Audit("cpython.PyInterpreterState_New", NULL) < 0) { |
197 | 0 | return NULL; |
198 | 0 | } |
199 | | |
200 | 14 | PyInterpreterState *interp = PyMem_RawMalloc(sizeof(PyInterpreterState)); |
201 | 14 | if (interp == NULL) { |
202 | 0 | return NULL; |
203 | 0 | } |
204 | | |
205 | 14 | memset(interp, 0, sizeof(*interp)); |
206 | 14 | interp->id_refcount = -1; |
207 | 14 | interp->check_interval = 100; |
208 | | |
209 | 14 | PyConfig_InitPythonConfig(&interp->config); |
210 | | |
211 | 14 | interp->eval_frame = _PyEval_EvalFrameDefault; |
212 | 14 | #ifdef HAVE_DLOPEN |
213 | 14 | #if HAVE_DECL_RTLD_NOW |
214 | 14 | interp->dlopenflags = RTLD_NOW; |
215 | | #else |
216 | | interp->dlopenflags = RTLD_LAZY; |
217 | | #endif |
218 | 14 | #endif |
219 | | |
220 | 14 | _PyRuntimeState *runtime = &_PyRuntime; |
221 | 14 | struct pyinterpreters *interpreters = &runtime->interpreters; |
222 | | |
223 | 14 | HEAD_LOCK(runtime); |
224 | 14 | if (interpreters->next_id < 0) { |
225 | | /* overflow or Py_Initialize() not called! */ |
226 | 0 | PyErr_SetString(PyExc_RuntimeError, |
227 | 0 | "failed to get an interpreter ID"); |
228 | 0 | PyMem_RawFree(interp); |
229 | 0 | interp = NULL; |
230 | 0 | } |
231 | 14 | else { |
232 | 14 | interp->id = interpreters->next_id; |
233 | 14 | interpreters->next_id += 1; |
234 | 14 | interp->next = interpreters->head; |
235 | 14 | if (interpreters->main == NULL) { |
236 | 14 | interpreters->main = interp; |
237 | 14 | } |
238 | 14 | interpreters->head = interp; |
239 | 14 | } |
240 | 14 | HEAD_UNLOCK(runtime); |
241 | | |
242 | 14 | if (interp == NULL) { |
243 | 0 | return NULL; |
244 | 0 | } |
245 | | |
246 | 14 | interp->tstate_next_unique_id = 0; |
247 | | |
248 | 14 | interp->audit_hooks = NULL; |
249 | | |
250 | 14 | return interp; |
251 | 14 | } |
252 | | |
253 | | |
254 | | static void |
255 | | _PyInterpreterState_Clear(_PyRuntimeState *runtime, PyInterpreterState *interp) |
256 | 0 | { |
257 | 0 | if (PySys_Audit("cpython.PyInterpreterState_Clear", NULL) < 0) { |
258 | 0 | PyErr_Clear(); |
259 | 0 | } |
260 | |
|
261 | 0 | HEAD_LOCK(runtime); |
262 | 0 | for (PyThreadState *p = interp->tstate_head; p != NULL; p = p->next) { |
263 | 0 | PyThreadState_Clear(p); |
264 | 0 | } |
265 | 0 | HEAD_UNLOCK(runtime); |
266 | |
|
267 | 0 | Py_CLEAR(interp->audit_hooks); |
268 | |
|
269 | 0 | PyConfig_Clear(&interp->config); |
270 | 0 | Py_CLEAR(interp->codec_search_path); |
271 | 0 | Py_CLEAR(interp->codec_search_cache); |
272 | 0 | Py_CLEAR(interp->codec_error_registry); |
273 | 0 | Py_CLEAR(interp->modules); |
274 | 0 | Py_CLEAR(interp->modules_by_index); |
275 | 0 | Py_CLEAR(interp->sysdict); |
276 | 0 | Py_CLEAR(interp->builtins); |
277 | 0 | Py_CLEAR(interp->builtins_copy); |
278 | 0 | Py_CLEAR(interp->importlib); |
279 | 0 | Py_CLEAR(interp->import_func); |
280 | 0 | Py_CLEAR(interp->dict); |
281 | 0 | #ifdef HAVE_FORK |
282 | 0 | Py_CLEAR(interp->before_forkers); |
283 | 0 | Py_CLEAR(interp->after_forkers_parent); |
284 | 0 | Py_CLEAR(interp->after_forkers_child); |
285 | 0 | #endif |
286 | 0 | if (runtime->finalizing == NULL) { |
287 | 0 | _PyWarnings_Fini(interp); |
288 | 0 | } |
289 | | // XXX Once we have one allocator per interpreter (i.e. |
290 | | // per-interpreter GC) we must ensure that all of the interpreter's |
291 | | // objects have been cleaned up at the point. |
292 | 0 | } |
293 | | |
294 | | void |
295 | | PyInterpreterState_Clear(PyInterpreterState *interp) |
296 | 0 | { |
297 | 0 | _PyInterpreterState_Clear(&_PyRuntime, interp); |
298 | 0 | } |
299 | | |
300 | | |
301 | | static void |
302 | | zapthreads(_PyRuntimeState *runtime, PyInterpreterState *interp) |
303 | 0 | { |
304 | 0 | PyThreadState *p; |
305 | | /* No need to lock the mutex here because this should only happen |
306 | | when the threads are all really dead (XXX famous last words). */ |
307 | 0 | while ((p = interp->tstate_head) != NULL) { |
308 | 0 | _PyThreadState_Delete(runtime, p); |
309 | 0 | } |
310 | 0 | } |
311 | | |
312 | | |
313 | | static void |
314 | | _PyInterpreterState_Delete(_PyRuntimeState *runtime, |
315 | | PyInterpreterState *interp) |
316 | 0 | { |
317 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
318 | 0 | zapthreads(runtime, interp); |
319 | 0 | HEAD_LOCK(runtime); |
320 | 0 | PyInterpreterState **p; |
321 | 0 | for (p = &interpreters->head; ; p = &(*p)->next) { |
322 | 0 | if (*p == NULL) { |
323 | 0 | Py_FatalError("PyInterpreterState_Delete: invalid interp"); |
324 | 0 | } |
325 | 0 | if (*p == interp) { |
326 | 0 | break; |
327 | 0 | } |
328 | 0 | } |
329 | 0 | if (interp->tstate_head != NULL) { |
330 | 0 | Py_FatalError("PyInterpreterState_Delete: remaining threads"); |
331 | 0 | } |
332 | 0 | *p = interp->next; |
333 | 0 | if (interpreters->main == interp) { |
334 | 0 | interpreters->main = NULL; |
335 | 0 | if (interpreters->head != NULL) { |
336 | 0 | Py_FatalError("PyInterpreterState_Delete: remaining subinterpreters"); |
337 | 0 | } |
338 | 0 | } |
339 | 0 | HEAD_UNLOCK(runtime); |
340 | 0 | if (interp->id_mutex != NULL) { |
341 | 0 | PyThread_free_lock(interp->id_mutex); |
342 | 0 | } |
343 | 0 | PyMem_RawFree(interp); |
344 | 0 | } |
345 | | |
346 | | |
347 | | void |
348 | | PyInterpreterState_Delete(PyInterpreterState *interp) |
349 | 0 | { |
350 | 0 | _PyInterpreterState_Delete(&_PyRuntime, interp); |
351 | 0 | } |
352 | | |
353 | | |
354 | | /* |
355 | | * Delete all interpreter states except the main interpreter. If there |
356 | | * is a current interpreter state, it *must* be the main interpreter. |
357 | | */ |
358 | | void |
359 | | _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime) |
360 | 0 | { |
361 | 0 | struct _gilstate_runtime_state *gilstate = &runtime->gilstate; |
362 | 0 | struct pyinterpreters *interpreters = &runtime->interpreters; |
363 | |
|
364 | 0 | PyThreadState *tstate = _PyThreadState_Swap(gilstate, NULL); |
365 | 0 | if (tstate != NULL && tstate->interp != interpreters->main) { |
366 | 0 | Py_FatalError("PyInterpreterState_DeleteExceptMain: not main interpreter"); |
367 | 0 | } |
368 | | |
369 | 0 | HEAD_LOCK(runtime); |
370 | 0 | PyInterpreterState *interp = interpreters->head; |
371 | 0 | interpreters->head = NULL; |
372 | 0 | while (interp != NULL) { |
373 | 0 | if (interp == interpreters->main) { |
374 | 0 | interpreters->main->next = NULL; |
375 | 0 | interpreters->head = interp; |
376 | 0 | interp = interp->next; |
377 | 0 | continue; |
378 | 0 | } |
379 | | |
380 | 0 | _PyInterpreterState_Clear(runtime, interp); // XXX must activate? |
381 | 0 | zapthreads(runtime, interp); |
382 | 0 | if (interp->id_mutex != NULL) { |
383 | 0 | PyThread_free_lock(interp->id_mutex); |
384 | 0 | } |
385 | 0 | PyInterpreterState *prev_interp = interp; |
386 | 0 | interp = interp->next; |
387 | 0 | PyMem_RawFree(prev_interp); |
388 | 0 | } |
389 | 0 | HEAD_UNLOCK(runtime); |
390 | |
|
391 | 0 | if (interpreters->head == NULL) { |
392 | 0 | Py_FatalError("PyInterpreterState_DeleteExceptMain: missing main"); |
393 | 0 | } |
394 | 0 | _PyThreadState_Swap(gilstate, tstate); |
395 | 0 | } |
396 | | |
397 | | |
398 | | PyInterpreterState * |
399 | | _PyInterpreterState_Get(void) |
400 | 388 | { |
401 | 388 | PyThreadState *tstate = _PyThreadState_GET(); |
402 | 388 | if (tstate == NULL) { |
403 | 0 | Py_FatalError("_PyInterpreterState_Get(): no current thread state"); |
404 | 0 | } |
405 | 388 | PyInterpreterState *interp = tstate->interp; |
406 | 388 | if (interp == NULL) { |
407 | 0 | Py_FatalError("_PyInterpreterState_Get(): no current interpreter"); |
408 | 0 | } |
409 | 388 | return interp; |
410 | 388 | } |
411 | | |
412 | | |
413 | | int64_t |
414 | | PyInterpreterState_GetID(PyInterpreterState *interp) |
415 | 0 | { |
416 | 0 | if (interp == NULL) { |
417 | 0 | PyErr_SetString(PyExc_RuntimeError, "no interpreter provided"); |
418 | 0 | return -1; |
419 | 0 | } |
420 | 0 | return interp->id; |
421 | 0 | } |
422 | | |
423 | | |
424 | | static PyInterpreterState * |
425 | | interp_look_up_id(_PyRuntimeState *runtime, PY_INT64_T requested_id) |
426 | 0 | { |
427 | 0 | PyInterpreterState *interp = runtime->interpreters.head; |
428 | 0 | while (interp != NULL) { |
429 | 0 | PY_INT64_T id = PyInterpreterState_GetID(interp); |
430 | 0 | if (id < 0) { |
431 | 0 | return NULL; |
432 | 0 | } |
433 | 0 | if (requested_id == id) { |
434 | 0 | return interp; |
435 | 0 | } |
436 | 0 | interp = PyInterpreterState_Next(interp); |
437 | 0 | } |
438 | 0 | return NULL; |
439 | 0 | } |
440 | | |
441 | | PyInterpreterState * |
442 | | _PyInterpreterState_LookUpID(PY_INT64_T requested_id) |
443 | 0 | { |
444 | 0 | PyInterpreterState *interp = NULL; |
445 | 0 | if (requested_id >= 0) { |
446 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
447 | 0 | HEAD_LOCK(runtime); |
448 | 0 | interp = interp_look_up_id(runtime, requested_id); |
449 | 0 | HEAD_UNLOCK(runtime); |
450 | 0 | } |
451 | 0 | if (interp == NULL && !PyErr_Occurred()) { |
452 | 0 | PyErr_Format(PyExc_RuntimeError, |
453 | 0 | "unrecognized interpreter ID %lld", requested_id); |
454 | 0 | } |
455 | 0 | return interp; |
456 | 0 | } |
457 | | |
458 | | |
459 | | int |
460 | | _PyInterpreterState_IDInitref(PyInterpreterState *interp) |
461 | 0 | { |
462 | 0 | if (interp->id_mutex != NULL) { |
463 | 0 | return 0; |
464 | 0 | } |
465 | 0 | interp->id_mutex = PyThread_allocate_lock(); |
466 | 0 | if (interp->id_mutex == NULL) { |
467 | 0 | PyErr_SetString(PyExc_RuntimeError, |
468 | 0 | "failed to create init interpreter ID mutex"); |
469 | 0 | return -1; |
470 | 0 | } |
471 | 0 | interp->id_refcount = 0; |
472 | 0 | return 0; |
473 | 0 | } |
474 | | |
475 | | |
476 | | void |
477 | | _PyInterpreterState_IDIncref(PyInterpreterState *interp) |
478 | 0 | { |
479 | 0 | if (interp->id_mutex == NULL) { |
480 | 0 | return; |
481 | 0 | } |
482 | 0 | PyThread_acquire_lock(interp->id_mutex, WAIT_LOCK); |
483 | 0 | interp->id_refcount += 1; |
484 | 0 | PyThread_release_lock(interp->id_mutex); |
485 | 0 | } |
486 | | |
487 | | |
488 | | void |
489 | | _PyInterpreterState_IDDecref(PyInterpreterState *interp) |
490 | 0 | { |
491 | 0 | if (interp->id_mutex == NULL) { |
492 | 0 | return; |
493 | 0 | } |
494 | 0 | struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate; |
495 | 0 | PyThread_acquire_lock(interp->id_mutex, WAIT_LOCK); |
496 | 0 | assert(interp->id_refcount != 0); |
497 | 0 | interp->id_refcount -= 1; |
498 | 0 | int64_t refcount = interp->id_refcount; |
499 | 0 | PyThread_release_lock(interp->id_mutex); |
500 | |
|
501 | 0 | if (refcount == 0 && interp->requires_idref) { |
502 | | // XXX Using the "head" thread isn't strictly correct. |
503 | 0 | PyThreadState *tstate = PyInterpreterState_ThreadHead(interp); |
504 | | // XXX Possible GILState issues? |
505 | 0 | PyThreadState *save_tstate = _PyThreadState_Swap(gilstate, tstate); |
506 | 0 | Py_EndInterpreter(tstate); |
507 | 0 | _PyThreadState_Swap(gilstate, save_tstate); |
508 | 0 | } |
509 | 0 | } |
510 | | |
511 | | int |
512 | | _PyInterpreterState_RequiresIDRef(PyInterpreterState *interp) |
513 | 0 | { |
514 | 0 | return interp->requires_idref; |
515 | 0 | } |
516 | | |
517 | | void |
518 | | _PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required) |
519 | 0 | { |
520 | 0 | interp->requires_idref = required ? 1 : 0; |
521 | 0 | } |
522 | | |
523 | | PyObject * |
524 | | _PyInterpreterState_GetMainModule(PyInterpreterState *interp) |
525 | 0 | { |
526 | 0 | if (interp->modules == NULL) { |
527 | 0 | PyErr_SetString(PyExc_RuntimeError, "interpreter not initialized"); |
528 | 0 | return NULL; |
529 | 0 | } |
530 | 0 | return PyMapping_GetItemString(interp->modules, "__main__"); |
531 | 0 | } |
532 | | |
533 | | PyObject * |
534 | | PyInterpreterState_GetDict(PyInterpreterState *interp) |
535 | 0 | { |
536 | 0 | if (interp->dict == NULL) { |
537 | 0 | interp->dict = PyDict_New(); |
538 | 0 | if (interp->dict == NULL) { |
539 | 0 | PyErr_Clear(); |
540 | 0 | } |
541 | 0 | } |
542 | | /* Returning NULL means no per-interpreter dict is available. */ |
543 | 0 | return interp->dict; |
544 | 0 | } |
545 | | |
546 | | /* Default implementation for _PyThreadState_GetFrame */ |
547 | | static struct _frame * |
548 | | threadstate_getframe(PyThreadState *self) |
549 | 975 | { |
550 | 975 | return self->frame; |
551 | 975 | } |
552 | | |
553 | | static PyThreadState * |
554 | | new_threadstate(PyInterpreterState *interp, int init) |
555 | 14 | { |
556 | 14 | _PyRuntimeState *runtime = &_PyRuntime; |
557 | 14 | PyThreadState *tstate = (PyThreadState *)PyMem_RawMalloc(sizeof(PyThreadState)); |
558 | 14 | if (tstate == NULL) { |
559 | 0 | return NULL; |
560 | 0 | } |
561 | | |
562 | 14 | if (_PyThreadState_GetFrame == NULL) { |
563 | 14 | _PyThreadState_GetFrame = threadstate_getframe; |
564 | 14 | } |
565 | | |
566 | 14 | tstate->interp = interp; |
567 | | |
568 | 14 | tstate->frame = NULL; |
569 | 14 | tstate->recursion_depth = 0; |
570 | 14 | tstate->overflowed = 0; |
571 | 14 | tstate->recursion_critical = 0; |
572 | 14 | tstate->stackcheck_counter = 0; |
573 | 14 | tstate->tracing = 0; |
574 | 14 | tstate->use_tracing = 0; |
575 | 14 | tstate->gilstate_counter = 0; |
576 | 14 | tstate->async_exc = NULL; |
577 | 14 | tstate->thread_id = PyThread_get_thread_ident(); |
578 | | |
579 | 14 | tstate->dict = NULL; |
580 | | |
581 | 14 | tstate->curexc_type = NULL; |
582 | 14 | tstate->curexc_value = NULL; |
583 | 14 | tstate->curexc_traceback = NULL; |
584 | | |
585 | 14 | tstate->exc_state.exc_type = NULL; |
586 | 14 | tstate->exc_state.exc_value = NULL; |
587 | 14 | tstate->exc_state.exc_traceback = NULL; |
588 | 14 | tstate->exc_state.previous_item = NULL; |
589 | 14 | tstate->exc_info = &tstate->exc_state; |
590 | | |
591 | 14 | tstate->c_profilefunc = NULL; |
592 | 14 | tstate->c_tracefunc = NULL; |
593 | 14 | tstate->c_profileobj = NULL; |
594 | 14 | tstate->c_traceobj = NULL; |
595 | | |
596 | 14 | tstate->trash_delete_nesting = 0; |
597 | 14 | tstate->trash_delete_later = NULL; |
598 | 14 | tstate->on_delete = NULL; |
599 | 14 | tstate->on_delete_data = NULL; |
600 | | |
601 | 14 | tstate->coroutine_origin_tracking_depth = 0; |
602 | | |
603 | 14 | tstate->async_gen_firstiter = NULL; |
604 | 14 | tstate->async_gen_finalizer = NULL; |
605 | | |
606 | 14 | tstate->context = NULL; |
607 | 14 | tstate->context_ver = 1; |
608 | | |
609 | 14 | if (init) { |
610 | 14 | _PyThreadState_Init(runtime, tstate); |
611 | 14 | } |
612 | | |
613 | 14 | HEAD_LOCK(runtime); |
614 | 14 | tstate->id = ++interp->tstate_next_unique_id; |
615 | 14 | tstate->prev = NULL; |
616 | 14 | tstate->next = interp->tstate_head; |
617 | 14 | if (tstate->next) |
618 | 0 | tstate->next->prev = tstate; |
619 | 14 | interp->tstate_head = tstate; |
620 | 14 | HEAD_UNLOCK(runtime); |
621 | | |
622 | 14 | return tstate; |
623 | 14 | } |
624 | | |
625 | | PyThreadState * |
626 | | PyThreadState_New(PyInterpreterState *interp) |
627 | 14 | { |
628 | 14 | return new_threadstate(interp, 1); |
629 | 14 | } |
630 | | |
631 | | PyThreadState * |
632 | | _PyThreadState_Prealloc(PyInterpreterState *interp) |
633 | 0 | { |
634 | 0 | return new_threadstate(interp, 0); |
635 | 0 | } |
636 | | |
637 | | void |
638 | | _PyThreadState_Init(_PyRuntimeState *runtime, PyThreadState *tstate) |
639 | 14 | { |
640 | 14 | _PyGILState_NoteThreadState(&runtime->gilstate, tstate); |
641 | 14 | } |
642 | | |
643 | | PyObject* |
644 | | PyState_FindModule(struct PyModuleDef* module) |
645 | 0 | { |
646 | 0 | Py_ssize_t index = module->m_base.m_index; |
647 | 0 | PyInterpreterState *state = _PyInterpreterState_GET_UNSAFE(); |
648 | 0 | PyObject *res; |
649 | 0 | if (module->m_slots) { |
650 | 0 | return NULL; |
651 | 0 | } |
652 | 0 | if (index == 0) |
653 | 0 | return NULL; |
654 | 0 | if (state->modules_by_index == NULL) |
655 | 0 | return NULL; |
656 | 0 | if (index >= PyList_GET_SIZE(state->modules_by_index)) |
657 | 0 | return NULL; |
658 | 0 | res = PyList_GET_ITEM(state->modules_by_index, index); |
659 | 0 | return res==Py_None ? NULL : res; |
660 | 0 | } |
661 | | |
662 | | int |
663 | | _PyState_AddModule(PyObject* module, struct PyModuleDef* def) |
664 | 203 | { |
665 | 203 | PyInterpreterState *state; |
666 | 203 | if (!def) { |
667 | 0 | assert(PyErr_Occurred()); |
668 | 0 | return -1; |
669 | 0 | } |
670 | 203 | if (def->m_slots) { |
671 | 0 | PyErr_SetString(PyExc_SystemError, |
672 | 0 | "PyState_AddModule called on module with slots"); |
673 | 0 | return -1; |
674 | 0 | } |
675 | 203 | state = _PyInterpreterState_GET_UNSAFE(); |
676 | 203 | if (!state->modules_by_index) { |
677 | 14 | state->modules_by_index = PyList_New(0); |
678 | 14 | if (!state->modules_by_index) |
679 | 0 | return -1; |
680 | 14 | } |
681 | 407 | while (PyList_GET_SIZE(state->modules_by_index) <= def->m_base.m_index) |
682 | 204 | if (PyList_Append(state->modules_by_index, Py_None) < 0) |
683 | 0 | return -1; |
684 | 203 | Py_INCREF(module); |
685 | 203 | return PyList_SetItem(state->modules_by_index, |
686 | 203 | def->m_base.m_index, module); |
687 | 203 | } |
688 | | |
689 | | int |
690 | | PyState_AddModule(PyObject* module, struct PyModuleDef* def) |
691 | 0 | { |
692 | 0 | Py_ssize_t index; |
693 | 0 | PyInterpreterState *state = _PyInterpreterState_GET_UNSAFE(); |
694 | 0 | if (!def) { |
695 | 0 | Py_FatalError("PyState_AddModule: Module Definition is NULL"); |
696 | 0 | return -1; |
697 | 0 | } |
698 | 0 | index = def->m_base.m_index; |
699 | 0 | if (state->modules_by_index && |
700 | 0 | index < PyList_GET_SIZE(state->modules_by_index) && |
701 | 0 | module == PyList_GET_ITEM(state->modules_by_index, index)) { |
702 | 0 | Py_FatalError("PyState_AddModule: Module already added!"); |
703 | 0 | return -1; |
704 | 0 | } |
705 | 0 | return _PyState_AddModule(module, def); |
706 | 0 | } |
707 | | |
708 | | int |
709 | | PyState_RemoveModule(struct PyModuleDef* def) |
710 | 0 | { |
711 | 0 | PyInterpreterState *state; |
712 | 0 | Py_ssize_t index = def->m_base.m_index; |
713 | 0 | if (def->m_slots) { |
714 | 0 | PyErr_SetString(PyExc_SystemError, |
715 | 0 | "PyState_RemoveModule called on module with slots"); |
716 | 0 | return -1; |
717 | 0 | } |
718 | 0 | state = _PyInterpreterState_GET_UNSAFE(); |
719 | 0 | if (index == 0) { |
720 | 0 | Py_FatalError("PyState_RemoveModule: Module index invalid."); |
721 | 0 | return -1; |
722 | 0 | } |
723 | 0 | if (state->modules_by_index == NULL) { |
724 | 0 | Py_FatalError("PyState_RemoveModule: Interpreters module-list not accessible."); |
725 | 0 | return -1; |
726 | 0 | } |
727 | 0 | if (index > PyList_GET_SIZE(state->modules_by_index)) { |
728 | 0 | Py_FatalError("PyState_RemoveModule: Module index out of bounds."); |
729 | 0 | return -1; |
730 | 0 | } |
731 | 0 | Py_INCREF(Py_None); |
732 | 0 | return PyList_SetItem(state->modules_by_index, index, Py_None); |
733 | 0 | } |
734 | | |
735 | | /* used by import.c:PyImport_Cleanup */ |
736 | | void |
737 | | _PyState_ClearModules(void) |
738 | 0 | { |
739 | 0 | PyInterpreterState *state = _PyInterpreterState_GET_UNSAFE(); |
740 | 0 | if (state->modules_by_index) { |
741 | 0 | Py_ssize_t i; |
742 | 0 | for (i = 0; i < PyList_GET_SIZE(state->modules_by_index); i++) { |
743 | 0 | PyObject *m = PyList_GET_ITEM(state->modules_by_index, i); |
744 | 0 | if (PyModule_Check(m)) { |
745 | | /* cleanup the saved copy of module dicts */ |
746 | 0 | PyModuleDef *md = PyModule_GetDef(m); |
747 | 0 | if (md) |
748 | 0 | Py_CLEAR(md->m_base.m_copy); |
749 | 0 | } |
750 | 0 | } |
751 | | /* Setting modules_by_index to NULL could be dangerous, so we |
752 | | clear the list instead. */ |
753 | 0 | if (PyList_SetSlice(state->modules_by_index, |
754 | 0 | 0, PyList_GET_SIZE(state->modules_by_index), |
755 | 0 | NULL)) |
756 | 0 | PyErr_WriteUnraisable(state->modules_by_index); |
757 | 0 | } |
758 | 0 | } |
759 | | |
760 | | void |
761 | | PyThreadState_Clear(PyThreadState *tstate) |
762 | 0 | { |
763 | 0 | int verbose = tstate->interp->config.verbose; |
764 | |
|
765 | 0 | if (verbose && tstate->frame != NULL) { |
766 | | /* bpo-20526: After the main thread calls |
767 | | _PyRuntimeState_SetFinalizing() in Py_FinalizeEx(), threads must |
768 | | exit when trying to take the GIL. If a thread exit in the middle of |
769 | | _PyEval_EvalFrameDefault(), tstate->frame is not reset to its |
770 | | previous value. It is more likely with daemon threads, but it can |
771 | | happen with regular threads if threading._shutdown() fails |
772 | | (ex: interrupted by CTRL+C). */ |
773 | 0 | fprintf(stderr, |
774 | 0 | "PyThreadState_Clear: warning: thread still has a frame\n"); |
775 | 0 | } |
776 | | |
777 | | /* Don't clear tstate->frame: it is a borrowed reference */ |
778 | |
|
779 | 0 | Py_CLEAR(tstate->dict); |
780 | 0 | Py_CLEAR(tstate->async_exc); |
781 | |
|
782 | 0 | Py_CLEAR(tstate->curexc_type); |
783 | 0 | Py_CLEAR(tstate->curexc_value); |
784 | 0 | Py_CLEAR(tstate->curexc_traceback); |
785 | |
|
786 | 0 | Py_CLEAR(tstate->exc_state.exc_type); |
787 | 0 | Py_CLEAR(tstate->exc_state.exc_value); |
788 | 0 | Py_CLEAR(tstate->exc_state.exc_traceback); |
789 | | |
790 | | /* The stack of exception states should contain just this thread. */ |
791 | 0 | if (verbose && tstate->exc_info != &tstate->exc_state) { |
792 | 0 | fprintf(stderr, |
793 | 0 | "PyThreadState_Clear: warning: thread still has a generator\n"); |
794 | 0 | } |
795 | |
|
796 | 0 | tstate->c_profilefunc = NULL; |
797 | 0 | tstate->c_tracefunc = NULL; |
798 | 0 | Py_CLEAR(tstate->c_profileobj); |
799 | 0 | Py_CLEAR(tstate->c_traceobj); |
800 | |
|
801 | 0 | Py_CLEAR(tstate->async_gen_firstiter); |
802 | 0 | Py_CLEAR(tstate->async_gen_finalizer); |
803 | |
|
804 | 0 | Py_CLEAR(tstate->context); |
805 | 0 | } |
806 | | |
807 | | |
808 | | /* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */ |
809 | | static void |
810 | | tstate_delete_common(_PyRuntimeState *runtime, PyThreadState *tstate) |
811 | 0 | { |
812 | 0 | if (tstate == NULL) { |
813 | 0 | Py_FatalError("PyThreadState_Delete: NULL tstate"); |
814 | 0 | } |
815 | 0 | PyInterpreterState *interp = tstate->interp; |
816 | 0 | if (interp == NULL) { |
817 | 0 | Py_FatalError("PyThreadState_Delete: NULL interp"); |
818 | 0 | } |
819 | 0 | HEAD_LOCK(runtime); |
820 | 0 | if (tstate->prev) |
821 | 0 | tstate->prev->next = tstate->next; |
822 | 0 | else |
823 | 0 | interp->tstate_head = tstate->next; |
824 | 0 | if (tstate->next) |
825 | 0 | tstate->next->prev = tstate->prev; |
826 | 0 | HEAD_UNLOCK(runtime); |
827 | 0 | if (tstate->on_delete != NULL) { |
828 | 0 | tstate->on_delete(tstate->on_delete_data); |
829 | 0 | } |
830 | 0 | PyMem_RawFree(tstate); |
831 | 0 | } |
832 | | |
833 | | |
834 | | static void |
835 | | _PyThreadState_Delete(_PyRuntimeState *runtime, PyThreadState *tstate) |
836 | 0 | { |
837 | 0 | struct _gilstate_runtime_state *gilstate = &runtime->gilstate; |
838 | 0 | if (tstate == _PyRuntimeGILState_GetThreadState(gilstate)) { |
839 | 0 | Py_FatalError("PyThreadState_Delete: tstate is still current"); |
840 | 0 | } |
841 | 0 | if (gilstate->autoInterpreterState && |
842 | 0 | PyThread_tss_get(&gilstate->autoTSSkey) == tstate) |
843 | 0 | { |
844 | 0 | PyThread_tss_set(&gilstate->autoTSSkey, NULL); |
845 | 0 | } |
846 | 0 | tstate_delete_common(runtime, tstate); |
847 | 0 | } |
848 | | |
849 | | |
850 | | void |
851 | | PyThreadState_Delete(PyThreadState *tstate) |
852 | 0 | { |
853 | 0 | _PyThreadState_Delete(&_PyRuntime, tstate); |
854 | 0 | } |
855 | | |
856 | | |
857 | | static void |
858 | | _PyThreadState_DeleteCurrent(_PyRuntimeState *runtime) |
859 | 0 | { |
860 | 0 | struct _gilstate_runtime_state *gilstate = &runtime->gilstate; |
861 | 0 | PyThreadState *tstate = _PyRuntimeGILState_GetThreadState(gilstate); |
862 | 0 | if (tstate == NULL) |
863 | 0 | Py_FatalError( |
864 | 0 | "PyThreadState_DeleteCurrent: no current tstate"); |
865 | 0 | tstate_delete_common(runtime, tstate); |
866 | 0 | if (gilstate->autoInterpreterState && |
867 | 0 | PyThread_tss_get(&gilstate->autoTSSkey) == tstate) |
868 | 0 | { |
869 | 0 | PyThread_tss_set(&gilstate->autoTSSkey, NULL); |
870 | 0 | } |
871 | 0 | _PyRuntimeGILState_SetThreadState(gilstate, NULL); |
872 | 0 | PyEval_ReleaseLock(); |
873 | 0 | } |
874 | | |
875 | | void |
876 | | PyThreadState_DeleteCurrent() |
877 | 0 | { |
878 | 0 | _PyThreadState_DeleteCurrent(&_PyRuntime); |
879 | 0 | } |
880 | | |
881 | | |
882 | | /* |
883 | | * Delete all thread states except the one passed as argument. |
884 | | * Note that, if there is a current thread state, it *must* be the one |
885 | | * passed as argument. Also, this won't touch any other interpreters |
886 | | * than the current one, since we don't know which thread state should |
887 | | * be kept in those other interpreters. |
888 | | */ |
889 | | void |
890 | | _PyThreadState_DeleteExcept(_PyRuntimeState *runtime, PyThreadState *tstate) |
891 | 0 | { |
892 | 0 | PyInterpreterState *interp = tstate->interp; |
893 | 0 | PyThreadState *p, *next, *garbage; |
894 | 0 | HEAD_LOCK(runtime); |
895 | | /* Remove all thread states, except tstate, from the linked list of |
896 | | thread states. This will allow calling PyThreadState_Clear() |
897 | | without holding the lock. */ |
898 | 0 | garbage = interp->tstate_head; |
899 | 0 | if (garbage == tstate) |
900 | 0 | garbage = tstate->next; |
901 | 0 | if (tstate->prev) |
902 | 0 | tstate->prev->next = tstate->next; |
903 | 0 | if (tstate->next) |
904 | 0 | tstate->next->prev = tstate->prev; |
905 | 0 | tstate->prev = tstate->next = NULL; |
906 | 0 | interp->tstate_head = tstate; |
907 | 0 | HEAD_UNLOCK(runtime); |
908 | | /* Clear and deallocate all stale thread states. Even if this |
909 | | executes Python code, we should be safe since it executes |
910 | | in the current thread, not one of the stale threads. */ |
911 | 0 | for (p = garbage; p; p = next) { |
912 | 0 | next = p->next; |
913 | 0 | PyThreadState_Clear(p); |
914 | 0 | PyMem_RawFree(p); |
915 | 0 | } |
916 | 0 | } |
917 | | |
918 | | |
919 | | PyThreadState * |
920 | | _PyThreadState_UncheckedGet(void) |
921 | 0 | { |
922 | 0 | return _PyThreadState_GET(); |
923 | 0 | } |
924 | | |
925 | | |
926 | | PyThreadState * |
927 | | PyThreadState_Get(void) |
928 | 1 | { |
929 | 1 | PyThreadState *tstate = _PyThreadState_GET(); |
930 | 1 | if (tstate == NULL) |
931 | 0 | Py_FatalError("PyThreadState_Get: no current thread"); |
932 | | |
933 | 1 | return tstate; |
934 | 1 | } |
935 | | |
936 | | |
937 | | PyThreadState * |
938 | | _PyThreadState_Swap(struct _gilstate_runtime_state *gilstate, PyThreadState *newts) |
939 | 15.9k | { |
940 | 15.9k | PyThreadState *oldts = _PyRuntimeGILState_GetThreadState(gilstate); |
941 | | |
942 | 15.9k | _PyRuntimeGILState_SetThreadState(gilstate, newts); |
943 | | /* It should not be possible for more than one thread state |
944 | | to be used for a thread. Check this the best we can in debug |
945 | | builds. |
946 | | */ |
947 | | #if defined(Py_DEBUG) |
948 | | if (newts) { |
949 | | /* This can be called from PyEval_RestoreThread(). Similar |
950 | | to it, we need to ensure errno doesn't change. |
951 | | */ |
952 | | int err = errno; |
953 | | PyThreadState *check = _PyGILState_GetThisThreadState(gilstate); |
954 | | if (check && check->interp == newts->interp && check != newts) |
955 | | Py_FatalError("Invalid thread state for this thread"); |
956 | | errno = err; |
957 | | } |
958 | | #endif |
959 | 15.9k | return oldts; |
960 | 15.9k | } |
961 | | |
962 | | PyThreadState * |
963 | | PyThreadState_Swap(PyThreadState *newts) |
964 | 14 | { |
965 | 14 | return _PyThreadState_Swap(&_PyRuntime.gilstate, newts); |
966 | 14 | } |
967 | | |
968 | | /* An extension mechanism to store arbitrary additional per-thread state. |
969 | | PyThreadState_GetDict() returns a dictionary that can be used to hold such |
970 | | state; the caller should pick a unique key and store its state there. If |
971 | | PyThreadState_GetDict() returns NULL, an exception has *not* been raised |
972 | | and the caller should assume no per-thread state is available. */ |
973 | | |
974 | | PyObject * |
975 | | PyThreadState_GetDict(void) |
976 | 4 | { |
977 | 4 | PyThreadState *tstate = _PyThreadState_GET(); |
978 | 4 | if (tstate == NULL) |
979 | 0 | return NULL; |
980 | | |
981 | 4 | if (tstate->dict == NULL) { |
982 | 1 | PyObject *d; |
983 | 1 | tstate->dict = d = PyDict_New(); |
984 | 1 | if (d == NULL) |
985 | 0 | PyErr_Clear(); |
986 | 1 | } |
987 | 4 | return tstate->dict; |
988 | 4 | } |
989 | | |
990 | | |
991 | | /* Asynchronously raise an exception in a thread. |
992 | | Requested by Just van Rossum and Alex Martelli. |
993 | | To prevent naive misuse, you must write your own extension |
994 | | to call this, or use ctypes. Must be called with the GIL held. |
995 | | Returns the number of tstates modified (normally 1, but 0 if `id` didn't |
996 | | match any known thread id). Can be called with exc=NULL to clear an |
997 | | existing async exception. This raises no exceptions. */ |
998 | | |
999 | | int |
1000 | | PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc) |
1001 | 0 | { |
1002 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
1003 | 0 | PyInterpreterState *interp = _PyRuntimeState_GetThreadState(runtime)->interp; |
1004 | | |
1005 | | /* Although the GIL is held, a few C API functions can be called |
1006 | | * without the GIL held, and in particular some that create and |
1007 | | * destroy thread and interpreter states. Those can mutate the |
1008 | | * list of thread states we're traversing, so to prevent that we lock |
1009 | | * head_mutex for the duration. |
1010 | | */ |
1011 | 0 | HEAD_LOCK(runtime); |
1012 | 0 | for (PyThreadState *p = interp->tstate_head; p != NULL; p = p->next) { |
1013 | 0 | if (p->thread_id == id) { |
1014 | | /* Tricky: we need to decref the current value |
1015 | | * (if any) in p->async_exc, but that can in turn |
1016 | | * allow arbitrary Python code to run, including |
1017 | | * perhaps calls to this function. To prevent |
1018 | | * deadlock, we need to release head_mutex before |
1019 | | * the decref. |
1020 | | */ |
1021 | 0 | PyObject *old_exc = p->async_exc; |
1022 | 0 | Py_XINCREF(exc); |
1023 | 0 | p->async_exc = exc; |
1024 | 0 | HEAD_UNLOCK(runtime); |
1025 | 0 | Py_XDECREF(old_exc); |
1026 | 0 | _PyEval_SignalAsyncExc(&runtime->ceval); |
1027 | 0 | return 1; |
1028 | 0 | } |
1029 | 0 | } |
1030 | 0 | HEAD_UNLOCK(runtime); |
1031 | 0 | return 0; |
1032 | 0 | } |
1033 | | |
1034 | | |
1035 | | /* Routines for advanced debuggers, requested by David Beazley. |
1036 | | Don't use unless you know what you are doing! */ |
1037 | | |
1038 | | PyInterpreterState * |
1039 | | PyInterpreterState_Head(void) |
1040 | 0 | { |
1041 | 0 | return _PyRuntime.interpreters.head; |
1042 | 0 | } |
1043 | | |
1044 | | PyInterpreterState * |
1045 | | PyInterpreterState_Main(void) |
1046 | 0 | { |
1047 | 0 | return _PyRuntime.interpreters.main; |
1048 | 0 | } |
1049 | | |
1050 | | PyInterpreterState * |
1051 | 0 | PyInterpreterState_Next(PyInterpreterState *interp) { |
1052 | 0 | return interp->next; |
1053 | 0 | } |
1054 | | |
1055 | | PyThreadState * |
1056 | 0 | PyInterpreterState_ThreadHead(PyInterpreterState *interp) { |
1057 | 0 | return interp->tstate_head; |
1058 | 0 | } |
1059 | | |
1060 | | PyThreadState * |
1061 | 0 | PyThreadState_Next(PyThreadState *tstate) { |
1062 | 0 | return tstate->next; |
1063 | 0 | } |
1064 | | |
1065 | | /* The implementation of sys._current_frames(). This is intended to be |
1066 | | called with the GIL held, as it will be when called via |
1067 | | sys._current_frames(). It's possible it would work fine even without |
1068 | | the GIL held, but haven't thought enough about that. |
1069 | | */ |
1070 | | PyObject * |
1071 | | _PyThread_CurrentFrames(void) |
1072 | 0 | { |
1073 | 0 | PyObject *result; |
1074 | 0 | PyInterpreterState *i; |
1075 | |
|
1076 | 0 | if (PySys_Audit("sys._current_frames", NULL) < 0) { |
1077 | 0 | return NULL; |
1078 | 0 | } |
1079 | | |
1080 | 0 | result = PyDict_New(); |
1081 | 0 | if (result == NULL) |
1082 | 0 | return NULL; |
1083 | | |
1084 | | /* for i in all interpreters: |
1085 | | * for t in all of i's thread states: |
1086 | | * if t's frame isn't NULL, map t's id to its frame |
1087 | | * Because these lists can mutate even when the GIL is held, we |
1088 | | * need to grab head_mutex for the duration. |
1089 | | */ |
1090 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
1091 | 0 | HEAD_LOCK(runtime); |
1092 | 0 | for (i = runtime->interpreters.head; i != NULL; i = i->next) { |
1093 | 0 | PyThreadState *t; |
1094 | 0 | for (t = i->tstate_head; t != NULL; t = t->next) { |
1095 | 0 | PyObject *id; |
1096 | 0 | int stat; |
1097 | 0 | struct _frame *frame = t->frame; |
1098 | 0 | if (frame == NULL) |
1099 | 0 | continue; |
1100 | 0 | id = PyLong_FromUnsignedLong(t->thread_id); |
1101 | 0 | if (id == NULL) |
1102 | 0 | goto Fail; |
1103 | 0 | stat = PyDict_SetItem(result, id, (PyObject *)frame); |
1104 | 0 | Py_DECREF(id); |
1105 | 0 | if (stat < 0) |
1106 | 0 | goto Fail; |
1107 | 0 | } |
1108 | 0 | } |
1109 | 0 | HEAD_UNLOCK(runtime); |
1110 | 0 | return result; |
1111 | | |
1112 | 0 | Fail: |
1113 | 0 | HEAD_UNLOCK(runtime); |
1114 | 0 | Py_DECREF(result); |
1115 | 0 | return NULL; |
1116 | 0 | } |
1117 | | |
1118 | | /* Python "auto thread state" API. */ |
1119 | | |
1120 | | /* Keep this as a static, as it is not reliable! It can only |
1121 | | ever be compared to the state for the *current* thread. |
1122 | | * If not equal, then it doesn't matter that the actual |
1123 | | value may change immediately after comparison, as it can't |
1124 | | possibly change to the current thread's state. |
1125 | | * If equal, then the current thread holds the lock, so the value can't |
1126 | | change until we yield the lock. |
1127 | | */ |
1128 | | static int |
1129 | | PyThreadState_IsCurrent(PyThreadState *tstate) |
1130 | 0 | { |
1131 | | /* Must be the tstate for this thread */ |
1132 | 0 | struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate; |
1133 | 0 | assert(_PyGILState_GetThisThreadState(gilstate) == tstate); |
1134 | 0 | return tstate == _PyRuntimeGILState_GetThreadState(gilstate); |
1135 | 0 | } |
1136 | | |
1137 | | /* Internal initialization/finalization functions called by |
1138 | | Py_Initialize/Py_FinalizeEx |
1139 | | */ |
1140 | | void |
1141 | | _PyGILState_Init(_PyRuntimeState *runtime, |
1142 | | PyInterpreterState *interp, PyThreadState *tstate) |
1143 | 14 | { |
1144 | | /* must init with valid states */ |
1145 | 14 | assert(interp != NULL); |
1146 | 14 | assert(tstate != NULL); |
1147 | | |
1148 | 14 | struct _gilstate_runtime_state *gilstate = &runtime->gilstate; |
1149 | | |
1150 | 14 | if (PyThread_tss_create(&gilstate->autoTSSkey) != 0) { |
1151 | 0 | Py_FatalError("Could not allocate TSS entry"); |
1152 | 0 | } |
1153 | 14 | gilstate->autoInterpreterState = interp; |
1154 | 14 | assert(PyThread_tss_get(&gilstate->autoTSSkey) == NULL); |
1155 | 14 | assert(tstate->gilstate_counter == 0); |
1156 | | |
1157 | 14 | _PyGILState_NoteThreadState(gilstate, tstate); |
1158 | 14 | } |
1159 | | |
1160 | | PyInterpreterState * |
1161 | | _PyGILState_GetInterpreterStateUnsafe(void) |
1162 | 0 | { |
1163 | 0 | return _PyRuntime.gilstate.autoInterpreterState; |
1164 | 0 | } |
1165 | | |
1166 | | void |
1167 | | _PyGILState_Fini(_PyRuntimeState *runtime) |
1168 | 0 | { |
1169 | 0 | struct _gilstate_runtime_state *gilstate = &runtime->gilstate; |
1170 | 0 | PyThread_tss_delete(&gilstate->autoTSSkey); |
1171 | 0 | gilstate->autoInterpreterState = NULL; |
1172 | 0 | } |
1173 | | |
1174 | | /* Reset the TSS key - called by PyOS_AfterFork_Child(). |
1175 | | * This should not be necessary, but some - buggy - pthread implementations |
1176 | | * don't reset TSS upon fork(), see issue #10517. |
1177 | | */ |
1178 | | void |
1179 | | _PyGILState_Reinit(_PyRuntimeState *runtime) |
1180 | 0 | { |
1181 | 0 | struct _gilstate_runtime_state *gilstate = &runtime->gilstate; |
1182 | 0 | PyThreadState *tstate = _PyGILState_GetThisThreadState(gilstate); |
1183 | |
|
1184 | 0 | PyThread_tss_delete(&gilstate->autoTSSkey); |
1185 | 0 | if (PyThread_tss_create(&gilstate->autoTSSkey) != 0) { |
1186 | 0 | Py_FatalError("Could not allocate TSS entry"); |
1187 | 0 | } |
1188 | | |
1189 | | /* If the thread had an associated auto thread state, reassociate it with |
1190 | | * the new key. */ |
1191 | 0 | if (tstate && |
1192 | 0 | PyThread_tss_set(&gilstate->autoTSSkey, (void *)tstate) != 0) |
1193 | 0 | { |
1194 | 0 | Py_FatalError("Couldn't create autoTSSkey mapping"); |
1195 | 0 | } |
1196 | 0 | } |
1197 | | |
1198 | | /* When a thread state is created for a thread by some mechanism other than |
1199 | | PyGILState_Ensure, it's important that the GILState machinery knows about |
1200 | | it so it doesn't try to create another thread state for the thread (this is |
1201 | | a better fix for SF bug #1010677 than the first one attempted). |
1202 | | */ |
1203 | | static void |
1204 | | _PyGILState_NoteThreadState(struct _gilstate_runtime_state *gilstate, PyThreadState* tstate) |
1205 | 28 | { |
1206 | | /* If autoTSSkey isn't initialized, this must be the very first |
1207 | | threadstate created in Py_Initialize(). Don't do anything for now |
1208 | | (we'll be back here when _PyGILState_Init is called). */ |
1209 | 28 | if (!gilstate->autoInterpreterState) { |
1210 | 14 | return; |
1211 | 14 | } |
1212 | | |
1213 | | /* Stick the thread state for this thread in thread specific storage. |
1214 | | |
1215 | | The only situation where you can legitimately have more than one |
1216 | | thread state for an OS level thread is when there are multiple |
1217 | | interpreters. |
1218 | | |
1219 | | You shouldn't really be using the PyGILState_ APIs anyway (see issues |
1220 | | #10915 and #15751). |
1221 | | |
1222 | | The first thread state created for that given OS level thread will |
1223 | | "win", which seems reasonable behaviour. |
1224 | | */ |
1225 | 14 | if (PyThread_tss_get(&gilstate->autoTSSkey) == NULL) { |
1226 | 14 | if ((PyThread_tss_set(&gilstate->autoTSSkey, (void *)tstate)) != 0) { |
1227 | 0 | Py_FatalError("Couldn't create autoTSSkey mapping"); |
1228 | 0 | } |
1229 | 14 | } |
1230 | | |
1231 | | /* PyGILState_Release must not try to delete this thread state. */ |
1232 | 14 | tstate->gilstate_counter = 1; |
1233 | 14 | } |
1234 | | |
1235 | | /* The public functions */ |
1236 | | static PyThreadState * |
1237 | | _PyGILState_GetThisThreadState(struct _gilstate_runtime_state *gilstate) |
1238 | 0 | { |
1239 | 0 | if (gilstate->autoInterpreterState == NULL) |
1240 | 0 | return NULL; |
1241 | 0 | return (PyThreadState *)PyThread_tss_get(&gilstate->autoTSSkey); |
1242 | 0 | } |
1243 | | |
1244 | | PyThreadState * |
1245 | | PyGILState_GetThisThreadState(void) |
1246 | 0 | { |
1247 | 0 | return _PyGILState_GetThisThreadState(&_PyRuntime.gilstate); |
1248 | 0 | } |
1249 | | |
1250 | | int |
1251 | | PyGILState_Check(void) |
1252 | 0 | { |
1253 | |
|
1254 | 0 | if (!_PyGILState_check_enabled) { |
1255 | 0 | return 1; |
1256 | 0 | } |
1257 | | |
1258 | 0 | struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate; |
1259 | 0 | if (!PyThread_tss_is_created(&gilstate->autoTSSkey)) { |
1260 | 0 | return 1; |
1261 | 0 | } |
1262 | | |
1263 | 0 | PyThreadState *tstate = _PyRuntimeGILState_GetThreadState(gilstate); |
1264 | 0 | if (tstate == NULL) { |
1265 | 0 | return 0; |
1266 | 0 | } |
1267 | | |
1268 | 0 | return (tstate == _PyGILState_GetThisThreadState(gilstate)); |
1269 | 0 | } |
1270 | | |
1271 | | PyGILState_STATE |
1272 | | PyGILState_Ensure(void) |
1273 | 0 | { |
1274 | 0 | struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate; |
1275 | 0 | int current; |
1276 | 0 | PyThreadState *tcur; |
1277 | 0 | int need_init_threads = 0; |
1278 | | |
1279 | | /* Note that we do not auto-init Python here - apart from |
1280 | | potential races with 2 threads auto-initializing, pep-311 |
1281 | | spells out other issues. Embedders are expected to have |
1282 | | called Py_Initialize() and usually PyEval_InitThreads(). |
1283 | | */ |
1284 | | /* Py_Initialize() hasn't been called! */ |
1285 | 0 | assert(gilstate->autoInterpreterState); |
1286 | |
|
1287 | 0 | tcur = (PyThreadState *)PyThread_tss_get(&gilstate->autoTSSkey); |
1288 | 0 | if (tcur == NULL) { |
1289 | 0 | need_init_threads = 1; |
1290 | | |
1291 | | /* Create a new thread state for this thread */ |
1292 | 0 | tcur = PyThreadState_New(gilstate->autoInterpreterState); |
1293 | 0 | if (tcur == NULL) |
1294 | 0 | Py_FatalError("Couldn't create thread-state for new thread"); |
1295 | | /* This is our thread state! We'll need to delete it in the |
1296 | | matching call to PyGILState_Release(). */ |
1297 | 0 | tcur->gilstate_counter = 0; |
1298 | 0 | current = 0; /* new thread state is never current */ |
1299 | 0 | } |
1300 | 0 | else { |
1301 | 0 | current = PyThreadState_IsCurrent(tcur); |
1302 | 0 | } |
1303 | | |
1304 | 0 | if (current == 0) { |
1305 | 0 | PyEval_RestoreThread(tcur); |
1306 | 0 | } |
1307 | | |
1308 | | /* Update our counter in the thread-state - no need for locks: |
1309 | | - tcur will remain valid as we hold the GIL. |
1310 | | - the counter is safe as we are the only thread "allowed" |
1311 | | to modify this value |
1312 | | */ |
1313 | 0 | ++tcur->gilstate_counter; |
1314 | |
|
1315 | 0 | if (need_init_threads) { |
1316 | | /* At startup, Python has no concrete GIL. If PyGILState_Ensure() is |
1317 | | called from a new thread for the first time, we need the create the |
1318 | | GIL. */ |
1319 | 0 | PyEval_InitThreads(); |
1320 | 0 | } |
1321 | |
|
1322 | 0 | return current ? PyGILState_LOCKED : PyGILState_UNLOCKED; |
1323 | 0 | } |
1324 | | |
1325 | | void |
1326 | | PyGILState_Release(PyGILState_STATE oldstate) |
1327 | 0 | { |
1328 | 0 | _PyRuntimeState *runtime = &_PyRuntime; |
1329 | 0 | PyThreadState *tcur = PyThread_tss_get(&runtime->gilstate.autoTSSkey); |
1330 | 0 | if (tcur == NULL) { |
1331 | 0 | Py_FatalError("auto-releasing thread-state, " |
1332 | 0 | "but no thread-state for this thread"); |
1333 | 0 | } |
1334 | | |
1335 | | /* We must hold the GIL and have our thread state current */ |
1336 | | /* XXX - remove the check - the assert should be fine, |
1337 | | but while this is very new (April 2003), the extra check |
1338 | | by release-only users can't hurt. |
1339 | | */ |
1340 | 0 | if (!PyThreadState_IsCurrent(tcur)) { |
1341 | 0 | Py_FatalError("This thread state must be current when releasing"); |
1342 | 0 | } |
1343 | 0 | assert(PyThreadState_IsCurrent(tcur)); |
1344 | 0 | --tcur->gilstate_counter; |
1345 | 0 | assert(tcur->gilstate_counter >= 0); /* illegal counter value */ |
1346 | | |
1347 | | /* If we're going to destroy this thread-state, we must |
1348 | | * clear it while the GIL is held, as destructors may run. |
1349 | | */ |
1350 | 0 | if (tcur->gilstate_counter == 0) { |
1351 | | /* can't have been locked when we created it */ |
1352 | 0 | assert(oldstate == PyGILState_UNLOCKED); |
1353 | 0 | PyThreadState_Clear(tcur); |
1354 | | /* Delete the thread-state. Note this releases the GIL too! |
1355 | | * It's vital that the GIL be held here, to avoid shutdown |
1356 | | * races; see bugs 225673 and 1061968 (that nasty bug has a |
1357 | | * habit of coming back). |
1358 | | */ |
1359 | 0 | _PyThreadState_DeleteCurrent(runtime); |
1360 | 0 | } |
1361 | | /* Release the lock if necessary */ |
1362 | 0 | else if (oldstate == PyGILState_UNLOCKED) |
1363 | 0 | PyEval_SaveThread(); |
1364 | 0 | } |
1365 | | |
1366 | | |
1367 | | /**************************/ |
1368 | | /* cross-interpreter data */ |
1369 | | /**************************/ |
1370 | | |
1371 | | /* cross-interpreter data */ |
1372 | | |
1373 | | crossinterpdatafunc _PyCrossInterpreterData_Lookup(PyObject *); |
1374 | | |
1375 | | /* This is a separate func from _PyCrossInterpreterData_Lookup in order |
1376 | | to keep the registry code separate. */ |
1377 | | static crossinterpdatafunc |
1378 | | _lookup_getdata(PyObject *obj) |
1379 | 0 | { |
1380 | 0 | crossinterpdatafunc getdata = _PyCrossInterpreterData_Lookup(obj); |
1381 | 0 | if (getdata == NULL && PyErr_Occurred() == 0) |
1382 | 0 | PyErr_Format(PyExc_ValueError, |
1383 | 0 | "%S does not support cross-interpreter data", obj); |
1384 | 0 | return getdata; |
1385 | 0 | } |
1386 | | |
1387 | | int |
1388 | | _PyObject_CheckCrossInterpreterData(PyObject *obj) |
1389 | 0 | { |
1390 | 0 | crossinterpdatafunc getdata = _lookup_getdata(obj); |
1391 | 0 | if (getdata == NULL) { |
1392 | 0 | return -1; |
1393 | 0 | } |
1394 | 0 | return 0; |
1395 | 0 | } |
1396 | | |
1397 | | static int |
1398 | | _check_xidata(_PyCrossInterpreterData *data) |
1399 | 0 | { |
1400 | | // data->data can be anything, including NULL, so we don't check it. |
1401 | | |
1402 | | // data->obj may be NULL, so we don't check it. |
1403 | |
|
1404 | 0 | if (data->interp < 0) { |
1405 | 0 | PyErr_SetString(PyExc_SystemError, "missing interp"); |
1406 | 0 | return -1; |
1407 | 0 | } |
1408 | | |
1409 | 0 | if (data->new_object == NULL) { |
1410 | 0 | PyErr_SetString(PyExc_SystemError, "missing new_object func"); |
1411 | 0 | return -1; |
1412 | 0 | } |
1413 | | |
1414 | | // data->free may be NULL, so we don't check it. |
1415 | | |
1416 | 0 | return 0; |
1417 | 0 | } |
1418 | | |
1419 | | int |
1420 | | _PyObject_GetCrossInterpreterData(PyObject *obj, _PyCrossInterpreterData *data) |
1421 | 0 | { |
1422 | | // _PyInterpreterState_Get() aborts if lookup fails, so we don't need |
1423 | | // to check the result for NULL. |
1424 | 0 | PyInterpreterState *interp = _PyInterpreterState_Get(); |
1425 | | |
1426 | | // Reset data before re-populating. |
1427 | 0 | *data = (_PyCrossInterpreterData){0}; |
1428 | 0 | data->free = PyMem_RawFree; // Set a default that may be overridden. |
1429 | | |
1430 | | // Call the "getdata" func for the object. |
1431 | 0 | Py_INCREF(obj); |
1432 | 0 | crossinterpdatafunc getdata = _lookup_getdata(obj); |
1433 | 0 | if (getdata == NULL) { |
1434 | 0 | Py_DECREF(obj); |
1435 | 0 | return -1; |
1436 | 0 | } |
1437 | 0 | int res = getdata(obj, data); |
1438 | 0 | Py_DECREF(obj); |
1439 | 0 | if (res != 0) { |
1440 | 0 | return -1; |
1441 | 0 | } |
1442 | | |
1443 | | // Fill in the blanks and validate the result. |
1444 | 0 | data->interp = interp->id; |
1445 | 0 | if (_check_xidata(data) != 0) { |
1446 | 0 | _PyCrossInterpreterData_Release(data); |
1447 | 0 | return -1; |
1448 | 0 | } |
1449 | | |
1450 | 0 | return 0; |
1451 | 0 | } |
1452 | | |
1453 | | static void |
1454 | | _release_xidata(void *arg) |
1455 | 0 | { |
1456 | 0 | _PyCrossInterpreterData *data = (_PyCrossInterpreterData *)arg; |
1457 | 0 | if (data->free != NULL) { |
1458 | 0 | data->free(data->data); |
1459 | 0 | } |
1460 | 0 | Py_XDECREF(data->obj); |
1461 | 0 | } |
1462 | | |
1463 | | static void |
1464 | | _call_in_interpreter(struct _gilstate_runtime_state *gilstate, |
1465 | | PyInterpreterState *interp, |
1466 | | void (*func)(void *), void *arg) |
1467 | 0 | { |
1468 | | /* We would use Py_AddPendingCall() if it weren't specific to the |
1469 | | * main interpreter (see bpo-33608). In the meantime we take a |
1470 | | * naive approach. |
1471 | | */ |
1472 | 0 | PyThreadState *save_tstate = NULL; |
1473 | 0 | if (interp != _PyRuntimeGILState_GetThreadState(gilstate)->interp) { |
1474 | | // XXX Using the "head" thread isn't strictly correct. |
1475 | 0 | PyThreadState *tstate = PyInterpreterState_ThreadHead(interp); |
1476 | | // XXX Possible GILState issues? |
1477 | 0 | save_tstate = _PyThreadState_Swap(gilstate, tstate); |
1478 | 0 | } |
1479 | |
|
1480 | 0 | func(arg); |
1481 | | |
1482 | | // Switch back. |
1483 | 0 | if (save_tstate != NULL) { |
1484 | 0 | _PyThreadState_Swap(gilstate, save_tstate); |
1485 | 0 | } |
1486 | 0 | } |
1487 | | |
1488 | | void |
1489 | | _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data) |
1490 | 0 | { |
1491 | 0 | if (data->data == NULL && data->obj == NULL) { |
1492 | | // Nothing to release! |
1493 | 0 | return; |
1494 | 0 | } |
1495 | | |
1496 | | // Switch to the original interpreter. |
1497 | 0 | PyInterpreterState *interp = _PyInterpreterState_LookUpID(data->interp); |
1498 | 0 | if (interp == NULL) { |
1499 | | // The intepreter was already destroyed. |
1500 | 0 | if (data->free != NULL) { |
1501 | | // XXX Someone leaked some memory... |
1502 | 0 | } |
1503 | 0 | return; |
1504 | 0 | } |
1505 | | |
1506 | | // "Release" the data and/or the object. |
1507 | 0 | struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate; |
1508 | 0 | _call_in_interpreter(gilstate, interp, _release_xidata, data); |
1509 | 0 | } |
1510 | | |
1511 | | PyObject * |
1512 | | _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *data) |
1513 | 0 | { |
1514 | 0 | return data->new_object(data); |
1515 | 0 | } |
1516 | | |
1517 | | /* registry of {type -> crossinterpdatafunc} */ |
1518 | | |
1519 | | /* For now we use a global registry of shareable classes. An |
1520 | | alternative would be to add a tp_* slot for a class's |
1521 | | crossinterpdatafunc. It would be simpler and more efficient. */ |
1522 | | |
1523 | | static int |
1524 | | _register_xidata(struct _xidregistry *xidregistry, PyTypeObject *cls, |
1525 | | crossinterpdatafunc getdata) |
1526 | 0 | { |
1527 | | // Note that we effectively replace already registered classes |
1528 | | // rather than failing. |
1529 | 0 | struct _xidregitem *newhead = PyMem_RawMalloc(sizeof(struct _xidregitem)); |
1530 | 0 | if (newhead == NULL) |
1531 | 0 | return -1; |
1532 | 0 | newhead->cls = cls; |
1533 | 0 | newhead->getdata = getdata; |
1534 | 0 | newhead->next = xidregistry->head; |
1535 | 0 | xidregistry->head = newhead; |
1536 | 0 | return 0; |
1537 | 0 | } |
1538 | | |
1539 | | static void _register_builtins_for_crossinterpreter_data(struct _xidregistry *xidregistry); |
1540 | | |
1541 | | int |
1542 | | _PyCrossInterpreterData_RegisterClass(PyTypeObject *cls, |
1543 | | crossinterpdatafunc getdata) |
1544 | 0 | { |
1545 | 0 | if (!PyType_Check(cls)) { |
1546 | 0 | PyErr_Format(PyExc_ValueError, "only classes may be registered"); |
1547 | 0 | return -1; |
1548 | 0 | } |
1549 | 0 | if (getdata == NULL) { |
1550 | 0 | PyErr_Format(PyExc_ValueError, "missing 'getdata' func"); |
1551 | 0 | return -1; |
1552 | 0 | } |
1553 | | |
1554 | | // Make sure the class isn't ever deallocated. |
1555 | 0 | Py_INCREF((PyObject *)cls); |
1556 | |
|
1557 | 0 | struct _xidregistry *xidregistry = &_PyRuntime.xidregistry ; |
1558 | 0 | PyThread_acquire_lock(xidregistry->mutex, WAIT_LOCK); |
1559 | 0 | if (xidregistry->head == NULL) { |
1560 | 0 | _register_builtins_for_crossinterpreter_data(xidregistry); |
1561 | 0 | } |
1562 | 0 | int res = _register_xidata(xidregistry, cls, getdata); |
1563 | 0 | PyThread_release_lock(xidregistry->mutex); |
1564 | 0 | return res; |
1565 | 0 | } |
1566 | | |
1567 | | /* Cross-interpreter objects are looked up by exact match on the class. |
1568 | | We can reassess this policy when we move from a global registry to a |
1569 | | tp_* slot. */ |
1570 | | |
1571 | | crossinterpdatafunc |
1572 | | _PyCrossInterpreterData_Lookup(PyObject *obj) |
1573 | 0 | { |
1574 | 0 | struct _xidregistry *xidregistry = &_PyRuntime.xidregistry ; |
1575 | 0 | PyObject *cls = PyObject_Type(obj); |
1576 | 0 | crossinterpdatafunc getdata = NULL; |
1577 | 0 | PyThread_acquire_lock(xidregistry->mutex, WAIT_LOCK); |
1578 | 0 | struct _xidregitem *cur = xidregistry->head; |
1579 | 0 | if (cur == NULL) { |
1580 | 0 | _register_builtins_for_crossinterpreter_data(xidregistry); |
1581 | 0 | cur = xidregistry->head; |
1582 | 0 | } |
1583 | 0 | for(; cur != NULL; cur = cur->next) { |
1584 | 0 | if (cur->cls == (PyTypeObject *)cls) { |
1585 | 0 | getdata = cur->getdata; |
1586 | 0 | break; |
1587 | 0 | } |
1588 | 0 | } |
1589 | 0 | Py_DECREF(cls); |
1590 | 0 | PyThread_release_lock(xidregistry->mutex); |
1591 | 0 | return getdata; |
1592 | 0 | } |
1593 | | |
1594 | | /* cross-interpreter data for builtin types */ |
1595 | | |
1596 | | struct _shared_bytes_data { |
1597 | | char *bytes; |
1598 | | Py_ssize_t len; |
1599 | | }; |
1600 | | |
1601 | | static PyObject * |
1602 | | _new_bytes_object(_PyCrossInterpreterData *data) |
1603 | 0 | { |
1604 | 0 | struct _shared_bytes_data *shared = (struct _shared_bytes_data *)(data->data); |
1605 | 0 | return PyBytes_FromStringAndSize(shared->bytes, shared->len); |
1606 | 0 | } |
1607 | | |
1608 | | static int |
1609 | | _bytes_shared(PyObject *obj, _PyCrossInterpreterData *data) |
1610 | 0 | { |
1611 | 0 | struct _shared_bytes_data *shared = PyMem_NEW(struct _shared_bytes_data, 1); |
1612 | 0 | if (PyBytes_AsStringAndSize(obj, &shared->bytes, &shared->len) < 0) { |
1613 | 0 | return -1; |
1614 | 0 | } |
1615 | 0 | data->data = (void *)shared; |
1616 | 0 | Py_INCREF(obj); |
1617 | 0 | data->obj = obj; // Will be "released" (decref'ed) when data released. |
1618 | 0 | data->new_object = _new_bytes_object; |
1619 | 0 | data->free = PyMem_Free; |
1620 | 0 | return 0; |
1621 | 0 | } |
1622 | | |
1623 | | struct _shared_str_data { |
1624 | | int kind; |
1625 | | const void *buffer; |
1626 | | Py_ssize_t len; |
1627 | | }; |
1628 | | |
1629 | | static PyObject * |
1630 | | _new_str_object(_PyCrossInterpreterData *data) |
1631 | 0 | { |
1632 | 0 | struct _shared_str_data *shared = (struct _shared_str_data *)(data->data); |
1633 | 0 | return PyUnicode_FromKindAndData(shared->kind, shared->buffer, shared->len); |
1634 | 0 | } |
1635 | | |
1636 | | static int |
1637 | | _str_shared(PyObject *obj, _PyCrossInterpreterData *data) |
1638 | 0 | { |
1639 | 0 | struct _shared_str_data *shared = PyMem_NEW(struct _shared_str_data, 1); |
1640 | 0 | shared->kind = PyUnicode_KIND(obj); |
1641 | 0 | shared->buffer = PyUnicode_DATA(obj); |
1642 | 0 | shared->len = PyUnicode_GET_LENGTH(obj) - 1; |
1643 | 0 | data->data = (void *)shared; |
1644 | 0 | Py_INCREF(obj); |
1645 | 0 | data->obj = obj; // Will be "released" (decref'ed) when data released. |
1646 | 0 | data->new_object = _new_str_object; |
1647 | 0 | data->free = PyMem_Free; |
1648 | 0 | return 0; |
1649 | 0 | } |
1650 | | |
1651 | | static PyObject * |
1652 | | _new_long_object(_PyCrossInterpreterData *data) |
1653 | 0 | { |
1654 | 0 | return PyLong_FromSsize_t((Py_ssize_t)(data->data)); |
1655 | 0 | } |
1656 | | |
1657 | | static int |
1658 | | _long_shared(PyObject *obj, _PyCrossInterpreterData *data) |
1659 | 0 | { |
1660 | | /* Note that this means the size of shareable ints is bounded by |
1661 | | * sys.maxsize. Hence on 32-bit architectures that is half the |
1662 | | * size of maximum shareable ints on 64-bit. |
1663 | | */ |
1664 | 0 | Py_ssize_t value = PyLong_AsSsize_t(obj); |
1665 | 0 | if (value == -1 && PyErr_Occurred()) { |
1666 | 0 | if (PyErr_ExceptionMatches(PyExc_OverflowError)) { |
1667 | 0 | PyErr_SetString(PyExc_OverflowError, "try sending as bytes"); |
1668 | 0 | } |
1669 | 0 | return -1; |
1670 | 0 | } |
1671 | 0 | data->data = (void *)value; |
1672 | 0 | data->obj = NULL; |
1673 | 0 | data->new_object = _new_long_object; |
1674 | 0 | data->free = NULL; |
1675 | 0 | return 0; |
1676 | 0 | } |
1677 | | |
1678 | | static PyObject * |
1679 | | _new_none_object(_PyCrossInterpreterData *data) |
1680 | 0 | { |
1681 | | // XXX Singleton refcounts are problematic across interpreters... |
1682 | 0 | Py_INCREF(Py_None); |
1683 | 0 | return Py_None; |
1684 | 0 | } |
1685 | | |
1686 | | static int |
1687 | | _none_shared(PyObject *obj, _PyCrossInterpreterData *data) |
1688 | 0 | { |
1689 | 0 | data->data = NULL; |
1690 | | // data->obj remains NULL |
1691 | 0 | data->new_object = _new_none_object; |
1692 | 0 | data->free = NULL; // There is nothing to free. |
1693 | 0 | return 0; |
1694 | 0 | } |
1695 | | |
1696 | | static void |
1697 | | _register_builtins_for_crossinterpreter_data(struct _xidregistry *xidregistry) |
1698 | 0 | { |
1699 | | // None |
1700 | 0 | if (_register_xidata(xidregistry, (PyTypeObject *)PyObject_Type(Py_None), _none_shared) != 0) { |
1701 | 0 | Py_FatalError("could not register None for cross-interpreter sharing"); |
1702 | 0 | } |
1703 | | |
1704 | | // int |
1705 | 0 | if (_register_xidata(xidregistry, &PyLong_Type, _long_shared) != 0) { |
1706 | 0 | Py_FatalError("could not register int for cross-interpreter sharing"); |
1707 | 0 | } |
1708 | | |
1709 | | // bytes |
1710 | 0 | if (_register_xidata(xidregistry, &PyBytes_Type, _bytes_shared) != 0) { |
1711 | 0 | Py_FatalError("could not register bytes for cross-interpreter sharing"); |
1712 | 0 | } |
1713 | | |
1714 | | // str |
1715 | 0 | if (_register_xidata(xidregistry, &PyUnicode_Type, _str_shared) != 0) { |
1716 | 0 | Py_FatalError("could not register str for cross-interpreter sharing"); |
1717 | 0 | } |
1718 | 0 | } |
1719 | | |
1720 | | |
1721 | | #ifdef __cplusplus |
1722 | | } |
1723 | | #endif |