/src/openvswitch/lib/ovs-thread.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2013, 2014, 2015, 2016 Nicira, Inc. |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at: |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <config.h> |
18 | | #include "ovs-thread.h" |
19 | | #include <errno.h> |
20 | | #include <poll.h> |
21 | | #ifndef _WIN32 |
22 | | #include <signal.h> |
23 | | #endif |
24 | | #include <stdlib.h> |
25 | | #include <unistd.h> |
26 | | #include "compiler.h" |
27 | | #include "fatal-signal.h" |
28 | | #include "hash.h" |
29 | | #include "openvswitch/list.h" |
30 | | #include "ovs-rcu.h" |
31 | | #include "openvswitch/poll-loop.h" |
32 | | #include "seq.h" |
33 | | #include "socket-util.h" |
34 | | #include "timeval.h" |
35 | | #include "util.h" |
36 | | |
37 | | #ifdef __CHECKER__ |
38 | | /* Omit the definitions in this file because they are somewhat difficult to |
39 | | * write without prompting "sparse" complaints, without ugliness or |
40 | | * cut-and-paste. Since "sparse" is just a checker, not a compiler, it |
41 | | * doesn't matter that we don't define them. */ |
42 | | #else |
43 | | #include "openvswitch/vlog.h" |
44 | | |
45 | | VLOG_DEFINE_THIS_MODULE(ovs_thread); |
46 | | |
47 | | /* If there is a reason that we cannot fork anymore (unless the fork will be |
48 | | * immediately followed by an exec), then this points to a string that |
49 | | * explains why. */ |
50 | | static const char *must_not_fork; |
51 | | |
52 | | /* True if we created any threads beyond the main initial thread. */ |
53 | | static bool multithreaded; |
54 | | |
55 | | #define LOCK_FUNCTION(TYPE, FUN) \ |
56 | | void \ |
57 | | ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \ |
58 | | const char *where) \ |
59 | | OVS_NO_THREAD_SAFETY_ANALYSIS \ |
60 | 62 | { \ |
61 | 62 | struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \ |
62 | 62 | int error; \ |
63 | 62 | \ |
64 | 62 | /* Verify that 'l' was initialized. */ \ |
65 | 62 | if (OVS_UNLIKELY(!l->where)) { \ |
66 | 0 | VLOG_ABORT("%s: %s() passed uninitialized ovs_"#TYPE, \ |
67 | 0 | where, __func__); \ |
68 | 0 | } \ |
69 | 62 | \ |
70 | 62 | error = pthread_##TYPE##_##FUN(&l->lock); \ |
71 | 62 | if (OVS_UNLIKELY(error)) { \ |
72 | 0 | VLOG_ABORT("%s: pthread_%s_%s failed: %s", where, #TYPE, #FUN, \ |
73 | 0 | ovs_strerror(error)); \ |
74 | 0 | } \ |
75 | 62 | l->where = where; \ |
76 | 62 | } Line | Count | Source | 60 | 62 | { \ | 61 | 62 | struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \ | 62 | 62 | int error; \ | 63 | 62 | \ | 64 | 62 | /* Verify that 'l' was initialized. */ \ | 65 | 62 | if (OVS_UNLIKELY(!l->where)) { \ | 66 | 0 | VLOG_ABORT("%s: %s() passed uninitialized ovs_"#TYPE, \ | 67 | 0 | where, __func__); \ | 68 | 0 | } \ | 69 | 62 | \ | 70 | 62 | error = pthread_##TYPE##_##FUN(&l->lock); \ | 71 | 62 | if (OVS_UNLIKELY(error)) { \ | 72 | 0 | VLOG_ABORT("%s: pthread_%s_%s failed: %s", where, #TYPE, #FUN, \ | 73 | 0 | ovs_strerror(error)); \ | 74 | 0 | } \ | 75 | 62 | l->where = where; \ | 76 | 62 | } |
Unexecuted instantiation: ovs_rwlock_rdlock_at Unexecuted instantiation: ovs_rwlock_wrlock_at Unexecuted instantiation: ovs_spin_lock_at |
77 | | LOCK_FUNCTION(mutex, lock); |
78 | | LOCK_FUNCTION(rwlock, rdlock); |
79 | | LOCK_FUNCTION(rwlock, wrlock); |
80 | | #ifdef HAVE_PTHREAD_SPIN_LOCK |
81 | | LOCK_FUNCTION(spin, lock); |
82 | | #endif |
83 | | |
84 | | #define TRY_LOCK_FUNCTION(TYPE, FUN) \ |
85 | | int \ |
86 | | ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \ |
87 | | const char *where) \ |
88 | | OVS_NO_THREAD_SAFETY_ANALYSIS \ |
89 | 0 | { \ |
90 | 0 | struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \ |
91 | 0 | int error; \ |
92 | 0 | \ |
93 | 0 | /* Verify that 'l' was initialized. */ \ |
94 | 0 | if (OVS_UNLIKELY(!l->where)) { \ |
95 | 0 | VLOG_ABORT("%s: %s() passed uninitialized ovs_"#TYPE, \ |
96 | 0 | where, __func__); \ |
97 | 0 | } \ |
98 | 0 | \ |
99 | 0 | error = pthread_##TYPE##_##FUN(&l->lock); \ |
100 | 0 | if (OVS_UNLIKELY(error) && error != EBUSY) { \ |
101 | 0 | VLOG_ABORT("%s: pthread_%s_%s failed: %s", where, #TYPE, #FUN, \ |
102 | 0 | ovs_strerror(error)); \ |
103 | 0 | } \ |
104 | 0 | if (!error) { \ |
105 | 0 | l->where = where; \ |
106 | 0 | } \ |
107 | 0 | return error; \ |
108 | 0 | } Unexecuted instantiation: ovs_mutex_trylock_at Unexecuted instantiation: ovs_rwlock_tryrdlock_at Unexecuted instantiation: ovs_rwlock_trywrlock_at Unexecuted instantiation: ovs_spin_trylock_at |
109 | | TRY_LOCK_FUNCTION(mutex, trylock); |
110 | | TRY_LOCK_FUNCTION(rwlock, tryrdlock); |
111 | | TRY_LOCK_FUNCTION(rwlock, trywrlock); |
112 | | #ifdef HAVE_PTHREAD_SPIN_LOCK |
113 | | TRY_LOCK_FUNCTION(spin, trylock); |
114 | | #endif |
115 | | |
116 | | #define UNLOCK_FUNCTION(TYPE, FUN, WHERE) \ |
117 | | void \ |
118 | | ovs_##TYPE##_##FUN(const struct ovs_##TYPE *l_) \ |
119 | | OVS_NO_THREAD_SAFETY_ANALYSIS \ |
120 | 62 | { \ |
121 | 62 | struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \ |
122 | 62 | int error; \ |
123 | 62 | \ |
124 | 62 | /* Verify that 'l' was initialized. */ \ |
125 | 62 | ovs_assert(l->where); \ |
126 | 62 | \ |
127 | 62 | l->where = WHERE; \ |
128 | 62 | error = pthread_##TYPE##_##FUN(&l->lock); \ |
129 | 62 | if (OVS_UNLIKELY(error)) { \ |
130 | 0 | VLOG_ABORT("%s: pthread_%s_%s failed: %s", l->where, #TYPE, #FUN, \ |
131 | 0 | ovs_strerror(error)); \ |
132 | 0 | } \ |
133 | 62 | } Line | Count | Source | 120 | 62 | { \ | 121 | 62 | struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \ | 122 | 62 | int error; \ | 123 | 62 | \ | 124 | 62 | /* Verify that 'l' was initialized. */ \ | 125 | 62 | ovs_assert(l->where); \ | 126 | 62 | \ | 127 | 62 | l->where = WHERE; \ | 128 | 62 | error = pthread_##TYPE##_##FUN(&l->lock); \ | 129 | 62 | if (OVS_UNLIKELY(error)) { \ | 130 | 0 | VLOG_ABORT("%s: pthread_%s_%s failed: %s", l->where, #TYPE, #FUN, \ | 131 | 0 | ovs_strerror(error)); \ | 132 | 0 | } \ | 133 | 62 | } |
Unexecuted instantiation: ovs_mutex_destroy Unexecuted instantiation: ovs_rwlock_unlock Unexecuted instantiation: ovs_rwlock_destroy Unexecuted instantiation: ovs_spin_unlock Unexecuted instantiation: ovs_spin_destroy |
134 | | UNLOCK_FUNCTION(mutex, unlock, "<unlocked>"); |
135 | | UNLOCK_FUNCTION(mutex, destroy, NULL); |
136 | | UNLOCK_FUNCTION(rwlock, unlock, "<unlocked>"); |
137 | | UNLOCK_FUNCTION(rwlock, destroy, NULL); |
138 | | #ifdef HAVE_PTHREAD_SPIN_LOCK |
139 | | UNLOCK_FUNCTION(spin, unlock, "<unlocked>"); |
140 | | UNLOCK_FUNCTION(spin, destroy, NULL); |
141 | | #endif |
142 | | |
143 | | #define XPTHREAD_FUNC1(FUNCTION, PARAM1) \ |
144 | | void \ |
145 | | x##FUNCTION(PARAM1 arg1) \ |
146 | 0 | { \ |
147 | 0 | int error = FUNCTION(arg1); \ |
148 | 0 | if (OVS_UNLIKELY(error)) { \ |
149 | 0 | VLOG_ABORT("%s failed: %s", #FUNCTION, \ |
150 | 0 | ovs_strerror(error)); \ |
151 | 0 | } \ |
152 | 0 | } Unexecuted instantiation: xpthread_mutexattr_init Unexecuted instantiation: xpthread_mutexattr_destroy Unexecuted instantiation: xpthread_rwlockattr_init Unexecuted instantiation: xpthread_rwlockattr_destroy Unexecuted instantiation: xpthread_cond_destroy Unexecuted instantiation: xpthread_cond_signal Unexecuted instantiation: xpthread_cond_broadcast Unexecuted instantiation: xpthread_key_delete |
153 | | #define XPTHREAD_FUNC2(FUNCTION, PARAM1, PARAM2) \ |
154 | | void \ |
155 | | x##FUNCTION(PARAM1 arg1, PARAM2 arg2) \ |
156 | 0 | { \ |
157 | 0 | int error = FUNCTION(arg1, arg2); \ |
158 | 0 | if (OVS_UNLIKELY(error)) { \ |
159 | 0 | VLOG_ABORT("%s failed: %s", #FUNCTION, \ |
160 | 0 | ovs_strerror(error)); \ |
161 | 0 | } \ |
162 | 0 | } Unexecuted instantiation: xpthread_mutexattr_settype Unexecuted instantiation: xpthread_mutexattr_gettype Unexecuted instantiation: xpthread_rwlockattr_setkind_np Unexecuted instantiation: xpthread_cond_init Unexecuted instantiation: xpthread_join Unexecuted instantiation: xpthread_key_create Unexecuted instantiation: xpthread_setspecific |
163 | | #define XPTHREAD_FUNC3(FUNCTION, PARAM1, PARAM2, PARAM3)\ |
164 | | void \ |
165 | | x##FUNCTION(PARAM1 arg1, PARAM2 arg2, PARAM3 arg3) \ |
166 | 0 | { \ |
167 | 0 | int error = FUNCTION(arg1, arg2, arg3); \ |
168 | 0 | if (OVS_UNLIKELY(error)) { \ |
169 | 0 | VLOG_ABORT("%s failed: %s", #FUNCTION, \ |
170 | 0 | ovs_strerror(error)); \ |
171 | 0 | } \ |
172 | 0 | } |
173 | | |
174 | | XPTHREAD_FUNC1(pthread_mutexattr_init, pthread_mutexattr_t *); |
175 | | XPTHREAD_FUNC1(pthread_mutexattr_destroy, pthread_mutexattr_t *); |
176 | | XPTHREAD_FUNC2(pthread_mutexattr_settype, pthread_mutexattr_t *, int); |
177 | | XPTHREAD_FUNC2(pthread_mutexattr_gettype, pthread_mutexattr_t *, int *); |
178 | | |
179 | | XPTHREAD_FUNC1(pthread_rwlockattr_init, pthread_rwlockattr_t *); |
180 | | XPTHREAD_FUNC1(pthread_rwlockattr_destroy, pthread_rwlockattr_t *); |
181 | | #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP |
182 | | XPTHREAD_FUNC2(pthread_rwlockattr_setkind_np, pthread_rwlockattr_t *, int); |
183 | | #endif |
184 | | |
185 | | XPTHREAD_FUNC2(pthread_cond_init, pthread_cond_t *, pthread_condattr_t *); |
186 | | XPTHREAD_FUNC1(pthread_cond_destroy, pthread_cond_t *); |
187 | | XPTHREAD_FUNC1(pthread_cond_signal, pthread_cond_t *); |
188 | | XPTHREAD_FUNC1(pthread_cond_broadcast, pthread_cond_t *); |
189 | | |
190 | | XPTHREAD_FUNC2(pthread_join, pthread_t, void **); |
191 | | |
192 | | typedef void destructor_func(void *); |
193 | | XPTHREAD_FUNC2(pthread_key_create, pthread_key_t *, destructor_func *); |
194 | | XPTHREAD_FUNC1(pthread_key_delete, pthread_key_t); |
195 | | XPTHREAD_FUNC2(pthread_setspecific, pthread_key_t, const void *); |
196 | | |
197 | | #ifndef _WIN32 |
198 | | XPTHREAD_FUNC3(pthread_sigmask, int, const sigset_t *, sigset_t *); |
199 | | #endif |
200 | | |
201 | | static void |
202 | | ovs_mutex_init__(const struct ovs_mutex *l_, int type) |
203 | 0 | { |
204 | 0 | struct ovs_mutex *l = CONST_CAST(struct ovs_mutex *, l_); |
205 | 0 | pthread_mutexattr_t attr; |
206 | 0 | int error; |
207 | |
|
208 | 0 | l->where = "<unlocked>"; |
209 | 0 | xpthread_mutexattr_init(&attr); |
210 | 0 | xpthread_mutexattr_settype(&attr, type); |
211 | 0 | error = pthread_mutex_init(&l->lock, &attr); |
212 | 0 | if (OVS_UNLIKELY(error)) { |
213 | 0 | VLOG_ABORT("pthread_mutex_init failed: %s", ovs_strerror(error)); |
214 | 0 | } |
215 | 0 | xpthread_mutexattr_destroy(&attr); |
216 | 0 | } |
217 | | |
218 | | /* Initializes 'mutex' as a normal (non-recursive) mutex. */ |
219 | | void |
220 | | ovs_mutex_init(const struct ovs_mutex *mutex) |
221 | 0 | { |
222 | 0 | ovs_mutex_init__(mutex, PTHREAD_MUTEX_ERRORCHECK); |
223 | 0 | } |
224 | | |
225 | | /* Initializes 'mutex' as a recursive mutex. */ |
226 | | void |
227 | | ovs_mutex_init_recursive(const struct ovs_mutex *mutex) |
228 | 0 | { |
229 | 0 | ovs_mutex_init__(mutex, PTHREAD_MUTEX_RECURSIVE); |
230 | 0 | } |
231 | | |
232 | | /* Initializes 'mutex' as a recursive mutex. */ |
233 | | void |
234 | | ovs_mutex_init_adaptive(const struct ovs_mutex *mutex) |
235 | 0 | { |
236 | 0 | #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP |
237 | 0 | ovs_mutex_init__(mutex, PTHREAD_MUTEX_ADAPTIVE_NP); |
238 | | #else |
239 | | ovs_mutex_init(mutex); |
240 | | #endif |
241 | 0 | } |
242 | | |
243 | | void |
244 | | ovs_rwlock_init(const struct ovs_rwlock *l_) |
245 | 0 | { |
246 | 0 | struct ovs_rwlock *l = CONST_CAST(struct ovs_rwlock *, l_); |
247 | 0 | int error; |
248 | |
|
249 | 0 | l->where = "<unlocked>"; |
250 | |
|
251 | 0 | #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP |
252 | 0 | pthread_rwlockattr_t attr; |
253 | 0 | xpthread_rwlockattr_init(&attr); |
254 | 0 | xpthread_rwlockattr_setkind_np( |
255 | 0 | &attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); |
256 | 0 | error = pthread_rwlock_init(&l->lock, &attr); |
257 | 0 | xpthread_rwlockattr_destroy(&attr); |
258 | | #else |
259 | | /* It is important to avoid passing a rwlockattr in this case because |
260 | | * Windows pthreads 2.9.1 (and earlier) fail and abort if passed one, even |
261 | | * one without any special attributes. */ |
262 | | error = pthread_rwlock_init(&l->lock, NULL); |
263 | | #endif |
264 | |
|
265 | 0 | if (OVS_UNLIKELY(error)) { |
266 | 0 | VLOG_ABORT("pthread_rwlock_init failed: %s", ovs_strerror(error)); |
267 | 0 | } |
268 | 0 | } |
269 | | |
270 | | /* Provides an error-checking wrapper around pthread_cond_wait(). |
271 | | * |
272 | | * If the wait can take a significant amount of time, consider bracketing this |
273 | | * call with calls to ovsrcu_quiesce_start() and ovsrcu_quiesce_end(). */ |
274 | | void |
275 | | ovs_mutex_cond_wait(pthread_cond_t *cond, const struct ovs_mutex *mutex_) |
276 | | OVS_NO_THREAD_SAFETY_ANALYSIS |
277 | 0 | { |
278 | 0 | struct ovs_mutex *mutex = CONST_CAST(struct ovs_mutex *, mutex_); |
279 | 0 | int error; |
280 | |
|
281 | 0 | error = pthread_cond_wait(cond, &mutex->lock); |
282 | |
|
283 | 0 | if (OVS_UNLIKELY(error)) { |
284 | 0 | VLOG_ABORT("pthread_cond_wait failed: %s", ovs_strerror(error)); |
285 | 0 | } |
286 | 0 | } |
287 | | |
288 | | #ifdef HAVE_PTHREAD_SPIN_LOCK |
289 | | static void |
290 | | ovs_spin_init__(const struct ovs_spin *l_, int pshared) |
291 | 0 | { |
292 | 0 | struct ovs_spin *l = CONST_CAST(struct ovs_spin *, l_); |
293 | 0 | int error; |
294 | |
|
295 | 0 | l->where = "<unlocked>"; |
296 | 0 | error = pthread_spin_init(&l->lock, pshared); |
297 | 0 | if (OVS_UNLIKELY(error)) { |
298 | 0 | VLOG_ABORT("pthread_spin_init failed: %s", ovs_strerror(error)); |
299 | 0 | } |
300 | 0 | } |
301 | | |
302 | | void |
303 | | ovs_spin_init(const struct ovs_spin *spin) |
304 | 0 | { |
305 | 0 | ovs_spin_init__(spin, PTHREAD_PROCESS_PRIVATE); |
306 | 0 | } |
307 | | #endif |
308 | | |
309 | | struct ovs_barrier_impl { |
310 | | uint32_t size; /* Number of threads to wait. */ |
311 | | atomic_count count; /* Number of threads already hit the barrier. */ |
312 | | struct seq *seq; |
313 | | struct ovs_refcount refcnt; |
314 | | }; |
315 | | |
316 | | static void |
317 | | ovs_barrier_impl_ref(struct ovs_barrier_impl *impl) |
318 | 0 | { |
319 | 0 | ovs_refcount_ref(&impl->refcnt); |
320 | 0 | } |
321 | | |
322 | | static void |
323 | | ovs_barrier_impl_unref(struct ovs_barrier_impl *impl) |
324 | 0 | { |
325 | 0 | if (ovs_refcount_unref(&impl->refcnt) == 1) { |
326 | 0 | seq_destroy(impl->seq); |
327 | 0 | free(impl); |
328 | 0 | } |
329 | 0 | } |
330 | | |
331 | | /* Initializes the 'barrier'. 'size' is the number of threads |
332 | | * expected to hit the barrier. */ |
333 | | void |
334 | | ovs_barrier_init(struct ovs_barrier *barrier, uint32_t size) |
335 | 0 | { |
336 | 0 | struct ovs_barrier_impl *impl; |
337 | |
|
338 | 0 | impl = xmalloc(sizeof *impl); |
339 | 0 | impl->size = size; |
340 | 0 | atomic_count_init(&impl->count, 0); |
341 | 0 | impl->seq = seq_create(); |
342 | 0 | ovs_refcount_init(&impl->refcnt); |
343 | |
|
344 | 0 | ovsrcu_set(&barrier->impl, impl); |
345 | 0 | } |
346 | | |
347 | | /* Destroys the 'barrier'. */ |
348 | | void |
349 | | ovs_barrier_destroy(struct ovs_barrier *barrier) |
350 | 0 | { |
351 | 0 | struct ovs_barrier_impl *impl; |
352 | |
|
353 | 0 | impl = ovsrcu_get(struct ovs_barrier_impl *, &barrier->impl); |
354 | 0 | ovsrcu_set(&barrier->impl, NULL); |
355 | 0 | ovs_barrier_impl_unref(impl); |
356 | 0 | } |
357 | | |
358 | | /* Makes the calling thread block on the 'barrier' until all |
359 | | * 'barrier->size' threads hit the barrier. |
360 | | * ovs_barrier provides the necessary acquire-release semantics to make |
361 | | * the effects of prior memory accesses of all the participating threads |
362 | | * visible on return and to prevent the following memory accesses to be |
363 | | * reordered before the ovs_barrier_block(). */ |
364 | | void |
365 | | ovs_barrier_block(struct ovs_barrier *barrier) |
366 | 0 | { |
367 | 0 | struct ovs_barrier_impl *impl; |
368 | 0 | uint32_t orig; |
369 | 0 | uint64_t seq; |
370 | |
|
371 | 0 | impl = ovsrcu_get(struct ovs_barrier_impl *, &barrier->impl); |
372 | 0 | ovs_barrier_impl_ref(impl); |
373 | |
|
374 | 0 | seq = seq_read(impl->seq); |
375 | 0 | orig = atomic_count_inc(&impl->count); |
376 | 0 | if (orig + 1 == impl->size) { |
377 | 0 | atomic_count_set(&impl->count, 0); |
378 | | /* seq_change() serves as a release barrier against the other threads, |
379 | | * so the zeroed count is visible to them as they continue. */ |
380 | 0 | seq_change(impl->seq); |
381 | 0 | } else { |
382 | | /* To prevent thread from waking up by other event, |
383 | | * keeps waiting for the change of 'barrier->seq'. */ |
384 | 0 | while (seq == seq_read(impl->seq)) { |
385 | 0 | seq_wait(impl->seq, seq); |
386 | 0 | poll_block(); |
387 | 0 | } |
388 | 0 | } |
389 | |
|
390 | 0 | ovs_barrier_impl_unref(impl); |
391 | 0 | } |
392 | | |
393 | | DEFINE_EXTERN_PER_THREAD_DATA(ovsthread_id, OVSTHREAD_ID_UNSET); |
394 | | |
395 | | struct ovsthread_aux { |
396 | | void *(*start)(void *); |
397 | | void *arg; |
398 | | char name[16]; |
399 | | }; |
400 | | |
401 | | unsigned int |
402 | | ovsthread_id_init(void) |
403 | 0 | { |
404 | 0 | static atomic_count next_id = ATOMIC_COUNT_INIT(0); |
405 | |
|
406 | 0 | ovs_assert(*ovsthread_id_get() == OVSTHREAD_ID_UNSET); |
407 | 0 | return *ovsthread_id_get() = atomic_count_inc(&next_id); |
408 | 0 | } |
409 | | |
410 | | static void * |
411 | | ovsthread_wrapper(void *aux_) |
412 | 0 | { |
413 | 0 | struct ovsthread_aux *auxp = aux_; |
414 | 0 | struct ovsthread_aux aux; |
415 | 0 | unsigned int id; |
416 | |
|
417 | 0 | id = ovsthread_id_init(); |
418 | |
|
419 | 0 | aux = *auxp; |
420 | 0 | free(auxp); |
421 | | |
422 | | /* The order of the following calls is important, because |
423 | | * ovsrcu_quiesce_end() saves a copy of the thread name. */ |
424 | 0 | char *subprogram_name = xasprintf("%s%u", aux.name, id); |
425 | 0 | set_subprogram_name(subprogram_name); |
426 | 0 | free(subprogram_name); |
427 | 0 | ovsrcu_quiesce_end(); |
428 | |
|
429 | 0 | return aux.start(aux.arg); |
430 | 0 | } |
431 | | |
432 | | static void |
433 | | set_min_stack_size(pthread_attr_t *attr, size_t min_stacksize) |
434 | 0 | { |
435 | 0 | size_t stacksize; |
436 | 0 | int error; |
437 | |
|
438 | 0 | error = pthread_attr_getstacksize(attr, &stacksize); |
439 | 0 | if (error) { |
440 | 0 | VLOG_ABORT("pthread_attr_getstacksize failed: %s", |
441 | 0 | ovs_strerror(error)); |
442 | 0 | } |
443 | | |
444 | 0 | if (stacksize < min_stacksize) { |
445 | 0 | error = pthread_attr_setstacksize(attr, min_stacksize); |
446 | 0 | if (error) { |
447 | 0 | VLOG_ABORT("pthread_attr_setstacksize failed: %s", |
448 | 0 | ovs_strerror(error)); |
449 | 0 | } |
450 | 0 | } |
451 | 0 | } |
452 | | |
453 | | /* Starts a thread that calls 'start(arg)'. Sets the thread's name to 'name' |
454 | | * (suffixed by its ovsthread_id()). Returns the new thread's pthread_t. */ |
455 | | pthread_t |
456 | | ovs_thread_create(const char *name, void *(*start)(void *), void *arg) |
457 | 0 | { |
458 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
459 | 0 | struct ovsthread_aux *aux; |
460 | 0 | pthread_t thread; |
461 | 0 | int error; |
462 | |
|
463 | 0 | forbid_forking("multiple threads exist"); |
464 | |
|
465 | 0 | if (ovsthread_once_start(&once)) { |
466 | | /* The first call to this function has to happen in the main thread. |
467 | | * Before the process becomes multithreaded we make sure that the |
468 | | * main thread is considered non quiescent. |
469 | | * |
470 | | * For other threads this is done in ovs_thread_wrapper(), but the |
471 | | * main thread has no such wrapper. |
472 | | * |
473 | | * There's no reason to call ovsrcu_quiesce_end() in subsequent |
474 | | * invocations of this function and it might introduce problems |
475 | | * for other threads. */ |
476 | 0 | ovsrcu_quiesce_end(); |
477 | 0 | ovsthread_once_done(&once); |
478 | 0 | } |
479 | |
|
480 | 0 | multithreaded = true; |
481 | 0 | aux = xmalloc(sizeof *aux); |
482 | 0 | aux->start = start; |
483 | 0 | aux->arg = arg; |
484 | 0 | ovs_strlcpy(aux->name, name, sizeof aux->name); |
485 | | |
486 | | /* Some small systems use a default stack size as small as 80 kB, but OVS |
487 | | * requires approximately 384 kB according to the following analysis: |
488 | | * https://mail.openvswitch.org/pipermail/ovs-dev/2016-January/308592.html |
489 | | * |
490 | | * We use 512 kB to give us some margin of error. */ |
491 | 0 | pthread_attr_t attr; |
492 | 0 | pthread_attr_init(&attr); |
493 | 0 | set_min_stack_size(&attr, 512 * 1024); |
494 | |
|
495 | 0 | error = pthread_create(&thread, &attr, ovsthread_wrapper, aux); |
496 | 0 | if (error) { |
497 | 0 | VLOG_ABORT("pthread_create failed: %s", ovs_strerror(error)); |
498 | 0 | } |
499 | 0 | pthread_attr_destroy(&attr); |
500 | 0 | return thread; |
501 | 0 | } |
502 | | |
503 | | bool |
504 | | ovsthread_once_start__(struct ovsthread_once *once) |
505 | 0 | { |
506 | 0 | ovs_mutex_lock(&once->mutex); |
507 | | /* Mutex synchronizes memory, so we get the current value of 'done'. */ |
508 | 0 | if (!once->done) { |
509 | 0 | return true; |
510 | 0 | } |
511 | 0 | ovs_mutex_unlock(&once->mutex); |
512 | 0 | return false; |
513 | 0 | } |
514 | | |
515 | | void |
516 | | ovsthread_once_done(struct ovsthread_once *once) |
517 | 0 | { |
518 | | /* We need release semantics here, so that the following store may not |
519 | | * be moved ahead of any of the preceding initialization operations. |
520 | | * A release atomic_thread_fence provides that prior memory accesses |
521 | | * will not be reordered to take place after the following store. */ |
522 | 0 | atomic_thread_fence(memory_order_release); |
523 | 0 | once->done = true; |
524 | 0 | ovs_mutex_unlock(&once->mutex); |
525 | 0 | } |
526 | | |
527 | | bool |
528 | | single_threaded(void) |
529 | 0 | { |
530 | 0 | return !multithreaded; |
531 | 0 | } |
532 | | |
533 | | /* Asserts that the process has not yet created any threads (beyond the initial |
534 | | * thread). |
535 | | * |
536 | | * ('where' is used in logging. Commonly one would use |
537 | | * assert_single_threaded() to automatically provide the caller's source file |
538 | | * and line number for 'where'.) */ |
539 | | void |
540 | | assert_single_threaded_at(const char *where) |
541 | 0 | { |
542 | 0 | if (multithreaded) { |
543 | 0 | VLOG_FATAL("%s: attempted operation not allowed when multithreaded", |
544 | 0 | where); |
545 | 0 | } |
546 | 0 | } |
547 | | |
548 | | #ifndef _WIN32 |
549 | | /* Forks the current process (checking that this is allowed). Aborts with |
550 | | * VLOG_FATAL if fork() returns an error, and otherwise returns the value |
551 | | * returned by fork(). |
552 | | * |
553 | | * ('where' is used in logging. Commonly one would use xfork() to |
554 | | * automatically provide the caller's source file and line number for |
555 | | * 'where'.) */ |
556 | | pid_t |
557 | | xfork_at(const char *where) |
558 | 0 | { |
559 | 0 | pid_t pid; |
560 | |
|
561 | 0 | if (must_not_fork) { |
562 | 0 | VLOG_FATAL("%s: attempted to fork but forking not allowed (%s)", |
563 | 0 | where, must_not_fork); |
564 | 0 | } |
565 | | |
566 | 0 | pid = fork(); |
567 | 0 | if (pid < 0) { |
568 | 0 | VLOG_FATAL("%s: fork failed (%s)", where, ovs_strerror(errno)); |
569 | 0 | } |
570 | 0 | return pid; |
571 | 0 | } |
572 | | #endif |
573 | | |
574 | | /* Notes that the process must not call fork() from now on, for the specified |
575 | | * 'reason'. (The process may still fork() if it execs itself immediately |
576 | | * afterward.) */ |
577 | | void |
578 | | forbid_forking(const char *reason) |
579 | 0 | { |
580 | 0 | ovs_assert(reason != NULL); |
581 | 0 | must_not_fork = reason; |
582 | 0 | } |
583 | | |
584 | | /* Returns true if the process is allowed to fork, false otherwise. */ |
585 | | bool |
586 | | may_fork(void) |
587 | 0 | { |
588 | 0 | return !must_not_fork; |
589 | 0 | } |
590 | | |
591 | | /* ovsthread_stats. */ |
592 | | |
593 | | void |
594 | | ovsthread_stats_init(struct ovsthread_stats *stats) |
595 | 0 | { |
596 | 0 | int i; |
597 | |
|
598 | 0 | ovs_mutex_init(&stats->mutex); |
599 | 0 | for (i = 0; i < ARRAY_SIZE(stats->buckets); i++) { |
600 | 0 | stats->buckets[i] = NULL; |
601 | 0 | } |
602 | 0 | } |
603 | | |
604 | | void |
605 | | ovsthread_stats_destroy(struct ovsthread_stats *stats) |
606 | 0 | { |
607 | 0 | ovs_mutex_destroy(&stats->mutex); |
608 | 0 | } |
609 | | |
610 | | void * |
611 | | ovsthread_stats_bucket_get(struct ovsthread_stats *stats, |
612 | | void *(*new_bucket)(void)) |
613 | 0 | { |
614 | 0 | unsigned int idx = ovsthread_id_self() & (ARRAY_SIZE(stats->buckets) - 1); |
615 | 0 | void *bucket = stats->buckets[idx]; |
616 | 0 | if (!bucket) { |
617 | 0 | ovs_mutex_lock(&stats->mutex); |
618 | 0 | bucket = stats->buckets[idx]; |
619 | 0 | if (!bucket) { |
620 | 0 | bucket = stats->buckets[idx] = new_bucket(); |
621 | 0 | } |
622 | 0 | ovs_mutex_unlock(&stats->mutex); |
623 | 0 | } |
624 | 0 | return bucket; |
625 | 0 | } |
626 | | |
627 | | size_t |
628 | | ovs_thread_stats_next_bucket(const struct ovsthread_stats *stats, size_t i) |
629 | 0 | { |
630 | 0 | for (; i < ARRAY_SIZE(stats->buckets); i++) { |
631 | 0 | if (stats->buckets[i]) { |
632 | 0 | break; |
633 | 0 | } |
634 | 0 | } |
635 | 0 | return i; |
636 | 0 | } |
637 | | |
638 | | |
639 | | static int |
640 | | count_cpu_cores__(void) |
641 | 0 | { |
642 | 0 | long int n_cores; |
643 | |
|
644 | 0 | #ifndef _WIN32 |
645 | 0 | n_cores = sysconf(_SC_NPROCESSORS_ONLN); |
646 | | #else |
647 | | SYSTEM_INFO sysinfo; |
648 | | GetSystemInfo(&sysinfo); |
649 | | n_cores = sysinfo.dwNumberOfProcessors; |
650 | | #endif |
651 | 0 | #ifdef __linux__ |
652 | 0 | if (n_cores > 0) { |
653 | 0 | cpu_set_t *set = CPU_ALLOC(n_cores); |
654 | |
|
655 | 0 | if (set) { |
656 | 0 | size_t size = CPU_ALLOC_SIZE(n_cores); |
657 | |
|
658 | 0 | if (!sched_getaffinity(0, size, set)) { |
659 | 0 | n_cores = CPU_COUNT_S(size, set); |
660 | 0 | } |
661 | 0 | CPU_FREE(set); |
662 | 0 | } |
663 | 0 | } |
664 | 0 | #endif |
665 | 0 | return n_cores > 0 ? n_cores : 0; |
666 | 0 | } |
667 | | |
668 | | /* It's unlikely that the available cpus change several times per second and |
669 | | * even if it does, it's not needed (or desired) to react to such changes so |
670 | | * quickly. */ |
671 | 0 | #define COUNT_CPU_UPDATE_TIME_MS 10000 |
672 | | |
673 | | static struct ovs_mutex cpu_cores_mutex = OVS_MUTEX_INITIALIZER; |
674 | | |
675 | | /* Returns the current total number of cores available to this process, or 0 |
676 | | * if the number cannot be determined. */ |
677 | | int |
678 | | count_cpu_cores(void) |
679 | 0 | { |
680 | 0 | static long long int last_updated = 0; |
681 | 0 | long long int now = time_msec(); |
682 | 0 | static int cpu_cores; |
683 | |
|
684 | 0 | ovs_mutex_lock(&cpu_cores_mutex); |
685 | 0 | if (!last_updated || now - last_updated >= COUNT_CPU_UPDATE_TIME_MS) { |
686 | 0 | last_updated = now; |
687 | 0 | cpu_cores = count_cpu_cores__(); |
688 | 0 | } |
689 | 0 | ovs_mutex_unlock(&cpu_cores_mutex); |
690 | 0 | return cpu_cores; |
691 | 0 | } |
692 | | |
693 | | /* Returns the total number of cores on the system, or 0 if the |
694 | | * number cannot be determined. */ |
695 | | int |
696 | | count_total_cores(void) |
697 | 0 | { |
698 | 0 | long int n_cores; |
699 | |
|
700 | 0 | #ifndef _WIN32 |
701 | 0 | n_cores = sysconf(_SC_NPROCESSORS_CONF); |
702 | | #else |
703 | | n_cores = 0; |
704 | | errno = ENOTSUP; |
705 | | #endif |
706 | |
|
707 | 0 | return n_cores > 0 ? n_cores : 0; |
708 | 0 | } |
709 | | |
710 | | /* Returns 'true' if current thread is PMD thread. */ |
711 | | bool |
712 | | thread_is_pmd(void) |
713 | 0 | { |
714 | 0 | const char *name = get_subprogram_name(); |
715 | 0 | return !strncmp(name, "pmd", 3); |
716 | 0 | } |
717 | | |
718 | | |
719 | | /* ovsthread_key. */ |
720 | | |
721 | 0 | #define L1_SIZE 1024 |
722 | 0 | #define L2_SIZE 1024 |
723 | 0 | #define MAX_KEYS (L1_SIZE * L2_SIZE) |
724 | | |
725 | | /* A piece of thread-specific data. */ |
726 | | struct ovsthread_key { |
727 | | struct ovs_list list_node; /* In 'inuse_keys' or 'free_keys'. */ |
728 | | void (*destructor)(void *); /* Called at thread exit. */ |
729 | | |
730 | | /* Indexes into the per-thread array in struct ovsthread_key_slots. |
731 | | * This key's data is stored in p1[index / L2_SIZE][index % L2_SIZE]. */ |
732 | | unsigned int index; |
733 | | }; |
734 | | |
735 | | /* Per-thread data structure. */ |
736 | | struct ovsthread_key_slots { |
737 | | struct ovs_list list_node; /* In 'slots_list'. */ |
738 | | void **p1[L1_SIZE]; |
739 | | }; |
740 | | |
741 | | /* Contains "struct ovsthread_key_slots *". */ |
742 | | static pthread_key_t tsd_key; |
743 | | |
744 | | /* Guards data structures below. */ |
745 | | static struct ovs_mutex key_mutex = OVS_MUTEX_INITIALIZER; |
746 | | |
747 | | /* 'inuse_keys' holds "struct ovsthread_key"s that have been created and not |
748 | | * yet destroyed. |
749 | | * |
750 | | * 'free_keys' holds "struct ovsthread_key"s that have been deleted and are |
751 | | * ready for reuse. (We keep them around only to be able to easily locate |
752 | | * free indexes.) |
753 | | * |
754 | | * Together, 'inuse_keys' and 'free_keys' hold an ovsthread_key for every index |
755 | | * from 0 to n_keys - 1, inclusive. */ |
756 | | static struct ovs_list inuse_keys OVS_GUARDED_BY(key_mutex) |
757 | | = OVS_LIST_INITIALIZER(&inuse_keys); |
758 | | static struct ovs_list free_keys OVS_GUARDED_BY(key_mutex) |
759 | | = OVS_LIST_INITIALIZER(&free_keys); |
760 | | static unsigned int n_keys OVS_GUARDED_BY(key_mutex); |
761 | | |
762 | | /* All existing struct ovsthread_key_slots. */ |
763 | | static struct ovs_list slots_list OVS_GUARDED_BY(key_mutex) |
764 | | = OVS_LIST_INITIALIZER(&slots_list); |
765 | | |
766 | | static void * |
767 | | clear_slot(struct ovsthread_key_slots *slots, unsigned int index) |
768 | 0 | { |
769 | 0 | void **p2 = slots->p1[index / L2_SIZE]; |
770 | 0 | if (p2) { |
771 | 0 | void **valuep = &p2[index % L2_SIZE]; |
772 | 0 | void *value = *valuep; |
773 | 0 | *valuep = NULL; |
774 | 0 | return value; |
775 | 0 | } else { |
776 | 0 | return NULL; |
777 | 0 | } |
778 | 0 | } |
779 | | |
780 | | static void |
781 | | ovsthread_key_destruct__(void *slots_) |
782 | 0 | { |
783 | 0 | struct ovsthread_key_slots *slots = slots_; |
784 | 0 | struct ovsthread_key *key; |
785 | 0 | unsigned int n; |
786 | 0 | int i; |
787 | |
|
788 | 0 | ovs_mutex_lock(&key_mutex); |
789 | 0 | ovs_list_remove(&slots->list_node); |
790 | 0 | LIST_FOR_EACH (key, list_node, &inuse_keys) { |
791 | 0 | void *value = clear_slot(slots, key->index); |
792 | 0 | if (value && key->destructor) { |
793 | 0 | key->destructor(value); |
794 | 0 | } |
795 | 0 | } |
796 | 0 | n = n_keys; |
797 | 0 | ovs_mutex_unlock(&key_mutex); |
798 | |
|
799 | 0 | for (i = 0; i < DIV_ROUND_UP(n, L2_SIZE); i++) { |
800 | 0 | free(slots->p1[i]); |
801 | 0 | } |
802 | 0 | free(slots); |
803 | 0 | } |
804 | | |
805 | | /* Cancels the callback to ovsthread_key_destruct__(). |
806 | | * |
807 | | * Cancelling the call to the destructor during the main thread exit |
808 | | * is needed while using pthreads-win32 library in Windows. It has been |
809 | | * observed that in pthreads-win32, a call to the destructor during |
810 | | * main thread exit causes undefined behavior. */ |
811 | | static void |
812 | | ovsthread_cancel_ovsthread_key_destruct__(void *aux OVS_UNUSED) |
813 | 0 | { |
814 | 0 | pthread_setspecific(tsd_key, NULL); |
815 | 0 | } |
816 | | |
817 | | /* Initializes '*keyp' as a thread-specific data key. The data items are |
818 | | * initially null in all threads. |
819 | | * |
820 | | * If a thread exits with non-null data, then 'destructor', if nonnull, will be |
821 | | * called passing the final data value as its argument. 'destructor' must not |
822 | | * call any thread-specific data functions in this API. |
823 | | * |
824 | | * This function is similar to xpthread_key_create(). */ |
825 | | void |
826 | | ovsthread_key_create(ovsthread_key_t *keyp, void (*destructor)(void *)) |
827 | 0 | { |
828 | 0 | static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER; |
829 | 0 | struct ovsthread_key *key; |
830 | |
|
831 | 0 | if (ovsthread_once_start(&once)) { |
832 | 0 | xpthread_key_create(&tsd_key, ovsthread_key_destruct__); |
833 | 0 | fatal_signal_add_hook(ovsthread_cancel_ovsthread_key_destruct__, |
834 | 0 | NULL, NULL, true); |
835 | 0 | ovsthread_once_done(&once); |
836 | 0 | } |
837 | |
|
838 | 0 | ovs_mutex_lock(&key_mutex); |
839 | 0 | if (ovs_list_is_empty(&free_keys)) { |
840 | 0 | key = xmalloc(sizeof *key); |
841 | 0 | key->index = n_keys++; |
842 | 0 | if (key->index >= MAX_KEYS) { |
843 | 0 | abort(); |
844 | 0 | } |
845 | 0 | } else { |
846 | 0 | key = CONTAINER_OF(ovs_list_pop_back(&free_keys), |
847 | 0 | struct ovsthread_key, list_node); |
848 | 0 | } |
849 | 0 | ovs_list_push_back(&inuse_keys, &key->list_node); |
850 | 0 | key->destructor = destructor; |
851 | 0 | ovs_mutex_unlock(&key_mutex); |
852 | |
|
853 | 0 | *keyp = key; |
854 | 0 | } |
855 | | |
856 | | /* Frees 'key'. The destructor supplied to ovsthread_key_create(), if any, is |
857 | | * not called. |
858 | | * |
859 | | * This function is similar to xpthread_key_delete(). */ |
860 | | void |
861 | | ovsthread_key_delete(ovsthread_key_t key) |
862 | 0 | { |
863 | 0 | struct ovsthread_key_slots *slots; |
864 | |
|
865 | 0 | ovs_mutex_lock(&key_mutex); |
866 | | |
867 | | /* Move 'key' from 'inuse_keys' to 'free_keys'. */ |
868 | 0 | ovs_list_remove(&key->list_node); |
869 | 0 | ovs_list_push_back(&free_keys, &key->list_node); |
870 | | |
871 | | /* Clear this slot in all threads. */ |
872 | 0 | LIST_FOR_EACH (slots, list_node, &slots_list) { |
873 | 0 | clear_slot(slots, key->index); |
874 | 0 | } |
875 | |
|
876 | 0 | ovs_mutex_unlock(&key_mutex); |
877 | 0 | } |
878 | | |
879 | | static void ** |
880 | | ovsthread_key_lookup__(const struct ovsthread_key *key) |
881 | 0 | { |
882 | 0 | struct ovsthread_key_slots *slots; |
883 | 0 | void **p2; |
884 | |
|
885 | 0 | slots = pthread_getspecific(tsd_key); |
886 | 0 | if (!slots) { |
887 | 0 | slots = xzalloc(sizeof *slots); |
888 | |
|
889 | 0 | ovs_mutex_lock(&key_mutex); |
890 | 0 | pthread_setspecific(tsd_key, slots); |
891 | 0 | ovs_list_push_back(&slots_list, &slots->list_node); |
892 | 0 | ovs_mutex_unlock(&key_mutex); |
893 | 0 | } |
894 | |
|
895 | 0 | p2 = slots->p1[key->index / L2_SIZE]; |
896 | 0 | if (!p2) { |
897 | 0 | p2 = xzalloc(L2_SIZE * sizeof *p2); |
898 | 0 | slots->p1[key->index / L2_SIZE] = p2; |
899 | 0 | } |
900 | |
|
901 | 0 | return &p2[key->index % L2_SIZE]; |
902 | 0 | } |
903 | | |
904 | | /* Sets the value of thread-specific data item 'key', in the current thread, to |
905 | | * 'value'. |
906 | | * |
907 | | * This function is similar to pthread_setspecific(). */ |
908 | | void |
909 | | ovsthread_setspecific(ovsthread_key_t key, const void *value) |
910 | 0 | { |
911 | 0 | *ovsthread_key_lookup__(key) = CONST_CAST(void *, value); |
912 | 0 | } |
913 | | |
914 | | /* Returns the value of thread-specific data item 'key' in the current thread. |
915 | | * |
916 | | * This function is similar to pthread_getspecific(). */ |
917 | | void * |
918 | | ovsthread_getspecific(ovsthread_key_t key) |
919 | 0 | { |
920 | 0 | return *ovsthread_key_lookup__(key); |
921 | 0 | } |
922 | | #endif |