/src/cpython/Python/critical_section.c
Line | Count | Source |
1 | | #include "Python.h" |
2 | | |
3 | | #include "pycore_lock.h" |
4 | | #include "pycore_critical_section.h" |
5 | | |
6 | | #ifdef Py_GIL_DISABLED |
7 | | static_assert(_Alignof(PyCriticalSection) >= 4, |
8 | | "critical section must be aligned to at least 4 bytes"); |
9 | | #endif |
10 | | |
11 | | #ifdef Py_GIL_DISABLED |
12 | | static PyCriticalSection * |
13 | | untag_critical_section(uintptr_t tag) |
14 | | { |
15 | | return (PyCriticalSection *)(tag & ~_Py_CRITICAL_SECTION_MASK); |
16 | | } |
17 | | #endif |
18 | | |
19 | | void |
20 | | _PyCriticalSection_BeginSlow(PyThreadState *tstate, PyCriticalSection *c, PyMutex *m) |
21 | 0 | { |
22 | | #ifdef Py_GIL_DISABLED |
23 | | // As an optimisation for locking the same object recursively, skip |
24 | | // locking if the mutex is currently locked by the top-most critical |
25 | | // section. |
26 | | // If the top-most critical section is a two-mutex critical section, |
27 | | // then locking is skipped if either mutex is m. |
28 | | if (tstate->critical_section) { |
29 | | PyCriticalSection *prev = untag_critical_section(tstate->critical_section); |
30 | | if (prev->_cs_mutex == m) { |
31 | | c->_cs_mutex = NULL; |
32 | | c->_cs_prev = 0; |
33 | | return; |
34 | | } |
35 | | if (tstate->critical_section & _Py_CRITICAL_SECTION_TWO_MUTEXES) { |
36 | | PyCriticalSection2 *prev2 = (PyCriticalSection2 *) |
37 | | untag_critical_section(tstate->critical_section); |
38 | | if (prev2->_cs_mutex2 == m) { |
39 | | c->_cs_mutex = NULL; |
40 | | c->_cs_prev = 0; |
41 | | return; |
42 | | } |
43 | | } |
44 | | } |
45 | | c->_cs_mutex = NULL; |
46 | | c->_cs_prev = (uintptr_t)tstate->critical_section; |
47 | | tstate->critical_section = (uintptr_t)c; |
48 | | |
49 | | PyMutex_Lock(m); |
50 | | c->_cs_mutex = m; |
51 | | #endif |
52 | 0 | } |
53 | | |
54 | | void |
55 | | _PyCriticalSection2_BeginSlow(PyThreadState *tstate, PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2, |
56 | | int is_m1_locked) |
57 | 0 | { |
58 | | #ifdef Py_GIL_DISABLED |
59 | | c->_cs_base._cs_mutex = NULL; |
60 | | c->_cs_mutex2 = NULL; |
61 | | c->_cs_base._cs_prev = tstate->critical_section; |
62 | | tstate->critical_section = (uintptr_t)c | _Py_CRITICAL_SECTION_TWO_MUTEXES; |
63 | | |
64 | | if (!is_m1_locked) { |
65 | | PyMutex_Lock(m1); |
66 | | } |
67 | | PyMutex_Lock(m2); |
68 | | c->_cs_base._cs_mutex = m1; |
69 | | c->_cs_mutex2 = m2; |
70 | | #endif |
71 | 0 | } |
72 | | |
73 | | |
74 | | // Release all locks held by critical sections. This is called by |
75 | | // _PyThreadState_Detach. |
76 | | void |
77 | | _PyCriticalSection_SuspendAll(PyThreadState *tstate) |
78 | 0 | { |
79 | | #ifdef Py_GIL_DISABLED |
80 | | uintptr_t *tagptr = &tstate->critical_section; |
81 | | while (_PyCriticalSection_IsActive(*tagptr)) { |
82 | | PyCriticalSection *c = untag_critical_section(*tagptr); |
83 | | |
84 | | if (c->_cs_mutex) { |
85 | | PyMutex_Unlock(c->_cs_mutex); |
86 | | if ((*tagptr & _Py_CRITICAL_SECTION_TWO_MUTEXES)) { |
87 | | PyCriticalSection2 *c2 = (PyCriticalSection2 *)c; |
88 | | if (c2->_cs_mutex2) { |
89 | | PyMutex_Unlock(c2->_cs_mutex2); |
90 | | } |
91 | | } |
92 | | } |
93 | | |
94 | | *tagptr |= _Py_CRITICAL_SECTION_INACTIVE; |
95 | | tagptr = &c->_cs_prev; |
96 | | } |
97 | | #endif |
98 | 0 | } |
99 | | |
100 | | void |
101 | | _PyCriticalSection_Resume(PyThreadState *tstate) |
102 | 0 | { |
103 | | #ifdef Py_GIL_DISABLED |
104 | | uintptr_t p = tstate->critical_section; |
105 | | PyCriticalSection *c = untag_critical_section(p); |
106 | | assert(!_PyCriticalSection_IsActive(p)); |
107 | | |
108 | | PyMutex *m1 = c->_cs_mutex; |
109 | | c->_cs_mutex = NULL; |
110 | | |
111 | | PyMutex *m2 = NULL; |
112 | | PyCriticalSection2 *c2 = NULL; |
113 | | if ((p & _Py_CRITICAL_SECTION_TWO_MUTEXES)) { |
114 | | c2 = (PyCriticalSection2 *)c; |
115 | | m2 = c2->_cs_mutex2; |
116 | | c2->_cs_mutex2 = NULL; |
117 | | } |
118 | | |
119 | | if (m1) { |
120 | | PyMutex_Lock(m1); |
121 | | } |
122 | | if (m2) { |
123 | | PyMutex_Lock(m2); |
124 | | } |
125 | | |
126 | | c->_cs_mutex = m1; |
127 | | if (m2) { |
128 | | c2->_cs_mutex2 = m2; |
129 | | } |
130 | | |
131 | | tstate->critical_section &= ~_Py_CRITICAL_SECTION_INACTIVE; |
132 | | #endif |
133 | 0 | } |
134 | | |
135 | | #undef PyCriticalSection_Begin |
136 | | void |
137 | | PyCriticalSection_Begin(PyCriticalSection *c, PyObject *op) |
138 | 0 | { |
139 | | #ifdef Py_GIL_DISABLED |
140 | | _PyCriticalSection_Begin(_PyThreadState_GET(), c, op); |
141 | | #endif |
142 | 0 | } |
143 | | |
144 | | #undef PyCriticalSection_BeginMutex |
145 | | void |
146 | | PyCriticalSection_BeginMutex(PyCriticalSection *c, PyMutex *m) |
147 | 0 | { |
148 | | #ifdef Py_GIL_DISABLED |
149 | | _PyCriticalSection_BeginMutex(_PyThreadState_GET(), c, m); |
150 | | #endif |
151 | 0 | } |
152 | | |
153 | | #undef PyCriticalSection_End |
154 | | void |
155 | | PyCriticalSection_End(PyCriticalSection *c) |
156 | 0 | { |
157 | | #ifdef Py_GIL_DISABLED |
158 | | _PyCriticalSection_End(_PyThreadState_GET(), c); |
159 | | #endif |
160 | 0 | } |
161 | | |
162 | | #undef PyCriticalSection2_Begin |
163 | | void |
164 | | PyCriticalSection2_Begin(PyCriticalSection2 *c, PyObject *a, PyObject *b) |
165 | 0 | { |
166 | | #ifdef Py_GIL_DISABLED |
167 | | _PyCriticalSection2_Begin(_PyThreadState_GET(), c, a, b); |
168 | | #endif |
169 | 0 | } |
170 | | |
171 | | #undef PyCriticalSection2_BeginMutex |
172 | | void |
173 | | PyCriticalSection2_BeginMutex(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2) |
174 | 0 | { |
175 | | #ifdef Py_GIL_DISABLED |
176 | | _PyCriticalSection2_BeginMutex(_PyThreadState_GET(), c, m1, m2); |
177 | | #endif |
178 | 0 | } |
179 | | |
180 | | #undef PyCriticalSection2_End |
181 | | void |
182 | | PyCriticalSection2_End(PyCriticalSection2 *c) |
183 | 0 | { |
184 | | #ifdef Py_GIL_DISABLED |
185 | | _PyCriticalSection2_End(_PyThreadState_GET(), c); |
186 | | #endif |
187 | 0 | } |