/src/bind9/lib/isc/thread.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (C) Internet Systems Consortium, Inc. ("ISC") |
3 | | * |
4 | | * SPDX-License-Identifier: MPL-2.0 |
5 | | * |
6 | | * This Source Code Form is subject to the terms of the Mozilla Public |
7 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
8 | | * file, you can obtain one at https://mozilla.org/MPL/2.0/. |
9 | | * |
10 | | * See the COPYRIGHT file distributed with this work for additional |
11 | | * information regarding copyright ownership. |
12 | | */ |
13 | | |
14 | | /*! \file */ |
15 | | |
16 | | #if defined(HAVE_SCHED_H) |
17 | | #include <sched.h> |
18 | | #endif /* if defined(HAVE_SCHED_H) */ |
19 | | |
20 | | #if defined(HAVE_CPUSET_H) |
21 | | #include <sys/cpuset.h> |
22 | | #include <sys/param.h> |
23 | | #endif /* if defined(HAVE_CPUSET_H) */ |
24 | | |
25 | | #if defined(HAVE_SYS_PROCSET_H) |
26 | | #include <sys/processor.h> |
27 | | #include <sys/procset.h> |
28 | | #include <sys/types.h> |
29 | | #endif /* if defined(HAVE_SYS_PROCSET_H) */ |
30 | | |
31 | | #include <stdlib.h> |
32 | | |
33 | | #include <isc/atomic.h> |
34 | | #include <isc/iterated_hash.h> |
35 | | #include <isc/strerr.h> |
36 | | #include <isc/thread.h> |
37 | | #include <isc/tid.h> |
38 | | #include <isc/urcu.h> |
39 | | #include <isc/util.h> |
40 | | |
41 | | #include "thread_p.h" |
42 | | |
43 | | static struct call_rcu_data *isc__thread_call_rcu_data = NULL; |
44 | | |
45 | | pthread_attr_t isc__thread_attr; |
46 | | |
47 | | /* |
48 | | * We can't use isc_mem API here, because it's called too early and the |
49 | | * memory debugging flags can be changed later causing mismatch between flags |
50 | | * used for isc_mem_get() and isc_mem_put(). |
51 | | */ |
52 | | |
53 | | struct thread_wrap { |
54 | | struct rcu_head rcu_head; |
55 | | isc_threadfunc_t func; |
56 | | void *arg; |
57 | | }; |
58 | | |
59 | | static struct thread_wrap * |
60 | 0 | thread_wrap(isc_threadfunc_t func, void *arg) { |
61 | 0 | struct thread_wrap *wrap = malloc(sizeof(*wrap)); |
62 | 0 | RUNTIME_CHECK(wrap != NULL); |
63 | 0 | *wrap = (struct thread_wrap){ |
64 | 0 | .func = func, |
65 | 0 | .arg = arg, |
66 | 0 | }; |
67 | 0 | return wrap; |
68 | 0 | } |
69 | | |
70 | | static void * |
71 | 0 | thread_body(struct thread_wrap *wrap) { |
72 | 0 | isc_threadfunc_t func = wrap->func; |
73 | 0 | void *arg = wrap->arg; |
74 | 0 | void *ret = NULL; |
75 | | |
76 | | /* |
77 | | * Every thread starts with a malloc() call to prevent memory bloat |
78 | | * caused by a jemalloc quirk. We use CMM_ACCESS_ONCE() To stop an |
79 | | * optimizing compiler from stripping out free(malloc(1)). |
80 | | */ |
81 | 0 | void *jemalloc_enforce_init = NULL; |
82 | 0 | CMM_ACCESS_ONCE(jemalloc_enforce_init) = malloc(1); |
83 | 0 | free(jemalloc_enforce_init); |
84 | |
|
85 | 0 | free(wrap); |
86 | |
|
87 | 0 | ret = func(arg); |
88 | |
|
89 | 0 | return ret; |
90 | 0 | } |
91 | | |
92 | | static void * |
93 | 0 | thread_run(void *wrap) { |
94 | | /* |
95 | | * Get a thread-local digest context only in new threads. |
96 | | * The main thread is handled by isc__initialize(). |
97 | | */ |
98 | 0 | isc__iterated_hash_initialize(); |
99 | |
|
100 | 0 | rcu_register_thread(); |
101 | |
|
102 | 0 | set_thread_call_rcu_data(isc__thread_call_rcu_data); |
103 | |
|
104 | 0 | void *ret = thread_body(wrap); |
105 | |
|
106 | 0 | set_thread_call_rcu_data(NULL); |
107 | |
|
108 | 0 | rcu_unregister_thread(); |
109 | |
|
110 | 0 | isc__iterated_hash_shutdown(); |
111 | |
|
112 | 0 | return ret; |
113 | 0 | } |
114 | | |
115 | | void |
116 | 0 | isc_thread_main(isc_threadfunc_t func, void *arg) { |
117 | | /* |
118 | | * Either this thread has not yet been started, so it can become the |
119 | | * main thread, or it has already been annointed as the chosen zero |
120 | | */ |
121 | 0 | REQUIRE(isc_tid() == ISC_TID_UNKNOWN || isc_tid() == 0); |
122 | 0 | thread_body(thread_wrap(func, arg)); |
123 | 0 | } |
124 | | |
125 | | void |
126 | 0 | isc_thread_create(isc_threadfunc_t func, void *arg, isc_thread_t *thread) { |
127 | 0 | int ret = pthread_create(thread, &isc__thread_attr, thread_run, |
128 | 0 | thread_wrap(func, arg)); |
129 | 0 | PTHREADS_RUNTIME_CHECK(pthread_create, ret); |
130 | 0 | } |
131 | | |
132 | | void |
133 | 0 | isc_thread_join(isc_thread_t thread, void **resultp) { |
134 | 0 | int ret = pthread_join(thread, resultp); |
135 | |
|
136 | 0 | PTHREADS_RUNTIME_CHECK(pthread_join, ret); |
137 | 0 | } |
138 | | |
139 | | void |
140 | 0 | isc_thread_setname(isc_thread_t thread, const char *name) { |
141 | 0 | #if defined(HAVE_PTHREAD_SETNAME_NP) && !defined(__APPLE__) |
142 | | /* |
143 | | * macOS has pthread_setname_np but only works on the |
144 | | * current thread so it's not used here |
145 | | */ |
146 | | #if defined(__NetBSD__) |
147 | | (void)pthread_setname_np(thread, name, NULL); |
148 | | #else /* if defined(__NetBSD__) */ |
149 | 0 | (void)pthread_setname_np(thread, name); |
150 | 0 | #endif /* if defined(__NetBSD__) */ |
151 | | #elif defined(HAVE_PTHREAD_SET_NAME_NP) |
152 | | (void)pthread_set_name_np(thread, name); |
153 | | #else /* if defined(HAVE_PTHREAD_SETNAME_NP) && !defined(__APPLE__) */ |
154 | | UNUSED(thread); |
155 | | UNUSED(name); |
156 | | #endif /* if defined(HAVE_PTHREAD_SETNAME_NP) && !defined(__APPLE__) */ |
157 | 0 | } |
158 | | |
159 | | void |
160 | 0 | isc_thread_yield(void) { |
161 | 0 | #if defined(HAVE_SCHED_YIELD) |
162 | 0 | sched_yield(); |
163 | | #elif defined(HAVE_PTHREAD_YIELD) |
164 | | pthread_yield(); |
165 | | #elif defined(HAVE_PTHREAD_YIELD_NP) |
166 | | pthread_yield_np(); |
167 | | #endif /* if defined(HAVE_SCHED_YIELD) */ |
168 | 0 | } |
169 | | |
170 | | size_t |
171 | 2 | isc_thread_getstacksize(void) { |
172 | 2 | size_t stacksize = 0; |
173 | | |
174 | 2 | #if HAVE_PTHREAD_ATTR_GETSTACKSIZE |
175 | 2 | int ret = pthread_attr_getstacksize(&isc__thread_attr, &stacksize); |
176 | 2 | PTHREADS_RUNTIME_CHECK(pthread_attr_getstacksize, ret); |
177 | 2 | #endif /* HAVE_PTHREAD_ATTR_GETSTACKSIZE */ |
178 | | |
179 | 2 | return stacksize; |
180 | 2 | } |
181 | | |
182 | | void |
183 | 0 | isc_thread_setstacksize(size_t stacksize ISC_ATTR_UNUSED) { |
184 | 0 | #if HAVE_PTHREAD_ATTR_SETSTACKSIZE |
185 | 0 | int ret = pthread_attr_setstacksize(&isc__thread_attr, stacksize); |
186 | 0 | PTHREADS_RUNTIME_CHECK(pthread_attr_setstacksize, ret); |
187 | 0 | #endif /* HAVE_PTHREAD_ATTR_SETSTACKSIZE */ |
188 | 0 | } |
189 | | |
190 | | void |
191 | 0 | isc__thread_initialize(void) { |
192 | 0 | isc__thread_call_rcu_data = create_call_rcu_data(0, -1); |
193 | 0 | set_thread_call_rcu_data(isc__thread_call_rcu_data); |
194 | 0 | } |
195 | | |
196 | | void |
197 | 0 | isc__thread_shutdown(void) { |
198 | 0 | set_thread_call_rcu_data(NULL); |
199 | | call_rcu_data_free(isc__thread_call_rcu_data); |
200 | 0 | } |