Coverage Report

Created: 2025-07-11 06:59

/src/Python-3.8.3/Python/ceval_gil.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Implementation of the Global Interpreter Lock (GIL).
3
 */
4
5
#include <stdlib.h>
6
#include <errno.h>
7
8
#include "pycore_atomic.h"
9
10
11
/*
12
   Notes about the implementation:
13
14
   - The GIL is just a boolean variable (locked) whose access is protected
15
     by a mutex (gil_mutex), and whose changes are signalled by a condition
16
     variable (gil_cond). gil_mutex is taken for short periods of time,
17
     and therefore mostly uncontended.
18
19
   - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
20
     able to release the GIL on demand by another thread. A volatile boolean
21
     variable (gil_drop_request) is used for that purpose, which is checked
22
     at every turn of the eval loop. That variable is set after a wait of
23
     `interval` microseconds on `gil_cond` has timed out.
24
25
      [Actually, another volatile boolean variable (eval_breaker) is used
26
       which ORs several conditions into one. Volatile booleans are
27
       sufficient as inter-thread signalling means since Python is run
28
       on cache-coherent architectures only.]
29
30
   - A thread wanting to take the GIL will first let pass a given amount of
31
     time (`interval` microseconds) before setting gil_drop_request. This
32
     encourages a defined switching period, but doesn't enforce it since
33
     opcodes can take an arbitrary time to execute.
34
35
     The `interval` value is available for the user to read and modify
36
     using the Python API `sys.{get,set}switchinterval()`.
37
38
   - When a thread releases the GIL and gil_drop_request is set, that thread
39
     ensures that another GIL-awaiting thread gets scheduled.
40
     It does so by waiting on a condition variable (switch_cond) until
41
     the value of last_holder is changed to something else than its
42
     own thread state pointer, indicating that another thread was able to
43
     take the GIL.
44
45
     This is meant to prohibit the latency-adverse behaviour on multi-core
46
     machines where one thread would speculatively release the GIL, but still
47
     run and end up being the first to re-acquire it, making the "timeslices"
48
     much longer than expected.
49
     (Note: this mechanism is enabled with FORCE_SWITCHING above)
50
*/
51
52
#include "condvar.h"
53
54
#define MUTEX_INIT(mut) \
55
28
    if (PyMUTEX_INIT(&(mut))) { \
56
28
        Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
57
#define MUTEX_FINI(mut) \
58
0
    if (PyMUTEX_FINI(&(mut))) { \
59
0
        Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
60
#define MUTEX_LOCK(mut) \
61
23.8k
    if (PyMUTEX_LOCK(&(mut))) { \
62
23.8k
        Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
63
#define MUTEX_UNLOCK(mut) \
64
23.8k
    if (PyMUTEX_UNLOCK(&(mut))) { \
65
23.8k
        Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
66
67
#define COND_INIT(cond) \
68
28
    if (PyCOND_INIT(&(cond))) { \
69
28
        Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
70
#define COND_FINI(cond) \
71
0
    if (PyCOND_FINI(&(cond))) { \
72
0
        Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
73
#define COND_SIGNAL(cond) \
74
15.9k
    if (PyCOND_SIGNAL(&(cond))) { \
75
15.9k
        Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
76
#define COND_WAIT(cond, mut) \
77
0
    if (PyCOND_WAIT(&(cond), &(mut))) { \
78
0
        Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
79
#define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
80
0
    { \
81
0
        int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
82
0
        if (r < 0) \
83
0
            Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
84
0
        if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \
85
0
            timeout_result = 1; \
86
0
        else \
87
0
            timeout_result = 0; \
88
0
    } \
89
90
91
14
#define DEFAULT_INTERVAL 5000
92
93
static void _gil_initialize(struct _gil_runtime_state *gil)
94
14
{
95
14
    _Py_atomic_int uninitialized = {-1};
96
14
    gil->locked = uninitialized;
97
14
    gil->interval = DEFAULT_INTERVAL;
98
14
}
99
100
static int gil_created(struct _gil_runtime_state *gil)
101
28
{
102
28
    return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
103
28
}
104
105
static void create_gil(struct _gil_runtime_state *gil)
106
14
{
107
14
    MUTEX_INIT(gil->mutex);
108
14
#ifdef FORCE_SWITCHING
109
14
    MUTEX_INIT(gil->switch_mutex);
110
14
#endif
111
14
    COND_INIT(gil->cond);
112
14
#ifdef FORCE_SWITCHING
113
14
    COND_INIT(gil->switch_cond);
114
14
#endif
115
14
    _Py_atomic_store_relaxed(&gil->last_holder, 0);
116
14
    _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
117
14
    _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
118
14
}
119
120
static void destroy_gil(struct _gil_runtime_state *gil)
121
0
{
122
    /* some pthread-like implementations tie the mutex to the cond
123
     * and must have the cond destroyed first.
124
     */
125
0
    COND_FINI(gil->cond);
126
0
    MUTEX_FINI(gil->mutex);
127
0
#ifdef FORCE_SWITCHING
128
0
    COND_FINI(gil->switch_cond);
129
0
    MUTEX_FINI(gil->switch_mutex);
130
0
#endif
131
0
    _Py_atomic_store_explicit(&gil->locked, -1,
132
0
                              _Py_memory_order_release);
133
0
    _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
134
0
}
135
136
static void recreate_gil(struct _gil_runtime_state *gil)
137
0
{
138
0
    _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
139
    /* XXX should we destroy the old OS resources here? */
140
0
    create_gil(gil);
141
0
}
142
143
static void
144
drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
145
7.95k
{
146
7.95k
    struct _gil_runtime_state *gil = &ceval->gil;
147
7.95k
    if (!_Py_atomic_load_relaxed(&gil->locked)) {
148
0
        Py_FatalError("drop_gil: GIL is not locked");
149
0
    }
150
151
    /* tstate is allowed to be NULL (early interpreter init) */
152
7.95k
    if (tstate != NULL) {
153
        /* Sub-interpreter support: threads might have been switched
154
           under our feet using PyThreadState_Swap(). Fix the GIL last
155
           holder variable so that our heuristics work. */
156
7.95k
        _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
157
7.95k
    }
158
159
7.95k
    MUTEX_LOCK(gil->mutex);
160
7.95k
    _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
161
7.95k
    _Py_atomic_store_relaxed(&gil->locked, 0);
162
7.95k
    COND_SIGNAL(gil->cond);
163
7.95k
    MUTEX_UNLOCK(gil->mutex);
164
165
7.95k
#ifdef FORCE_SWITCHING
166
7.95k
    if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) {
167
0
        MUTEX_LOCK(gil->switch_mutex);
168
        /* Not switched yet => wait */
169
0
        if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
170
0
        {
171
0
            RESET_GIL_DROP_REQUEST(ceval);
172
            /* NOTE: if COND_WAIT does not atomically start waiting when
173
               releasing the mutex, another thread can run through, take
174
               the GIL and drop it again, and reset the condition
175
               before we even had a chance to wait for it. */
176
0
            COND_WAIT(gil->switch_cond, gil->switch_mutex);
177
0
        }
178
0
        MUTEX_UNLOCK(gil->switch_mutex);
179
0
    }
180
7.95k
#endif
181
7.95k
}
182
183
static void
184
take_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
185
7.96k
{
186
7.96k
    if (tstate == NULL) {
187
0
        Py_FatalError("take_gil: NULL tstate");
188
0
    }
189
190
7.96k
    struct _gil_runtime_state *gil = &ceval->gil;
191
7.96k
    int err = errno;
192
7.96k
    MUTEX_LOCK(gil->mutex);
193
194
7.96k
    if (!_Py_atomic_load_relaxed(&gil->locked)) {
195
7.96k
        goto _ready;
196
7.96k
    }
197
198
0
    while (_Py_atomic_load_relaxed(&gil->locked)) {
199
0
        int timed_out = 0;
200
0
        unsigned long saved_switchnum;
201
202
0
        saved_switchnum = gil->switch_number;
203
204
205
0
        unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
206
0
        COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
207
        /* If we timed out and no switch occurred in the meantime, it is time
208
           to ask the GIL-holding thread to drop it. */
209
0
        if (timed_out &&
210
0
            _Py_atomic_load_relaxed(&gil->locked) &&
211
0
            gil->switch_number == saved_switchnum)
212
0
        {
213
0
            SET_GIL_DROP_REQUEST(ceval);
214
0
        }
215
0
    }
216
7.96k
_ready:
217
7.96k
#ifdef FORCE_SWITCHING
218
    /* This mutex must be taken before modifying gil->last_holder:
219
       see drop_gil(). */
220
7.96k
    MUTEX_LOCK(gil->switch_mutex);
221
7.96k
#endif
222
    /* We now hold the GIL */
223
7.96k
    _Py_atomic_store_relaxed(&gil->locked, 1);
224
7.96k
    _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
225
226
7.96k
    if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
227
14
        _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
228
14
        ++gil->switch_number;
229
14
    }
230
231
7.96k
#ifdef FORCE_SWITCHING
232
7.96k
    COND_SIGNAL(gil->switch_cond);
233
7.96k
    MUTEX_UNLOCK(gil->switch_mutex);
234
7.96k
#endif
235
7.96k
    if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
236
0
        RESET_GIL_DROP_REQUEST(ceval);
237
0
    }
238
7.96k
    if (tstate->async_exc != NULL) {
239
0
        _PyEval_SignalAsyncExc(ceval);
240
0
    }
241
242
7.96k
    MUTEX_UNLOCK(gil->mutex);
243
7.96k
    errno = err;
244
7.96k
}
245
246
void _PyEval_SetSwitchInterval(unsigned long microseconds)
247
0
{
248
0
    _PyRuntime.ceval.gil.interval = microseconds;
249
0
}
250
251
unsigned long _PyEval_GetSwitchInterval()
252
0
{
253
0
    return _PyRuntime.ceval.gil.interval;
254
0
}