/src/CMake/Utilities/cmlibuv/src/threadpool.c
Line | Count | Source |
1 | | /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. |
2 | | * |
3 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
4 | | * of this software and associated documentation files (the "Software"), to |
5 | | * deal in the Software without restriction, including without limitation the |
6 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
7 | | * sell copies of the Software, and to permit persons to whom the Software is |
8 | | * furnished to do so, subject to the following conditions: |
9 | | * |
10 | | * The above copyright notice and this permission notice shall be included in |
11 | | * all copies or substantial portions of the Software. |
12 | | * |
13 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
16 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
17 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
18 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
19 | | * IN THE SOFTWARE. |
20 | | */ |
21 | | |
22 | | #include "uv-common.h" |
23 | | |
24 | | #if !defined(_WIN32) |
25 | | # include "unix/internal.h" |
26 | | #endif |
27 | | |
28 | | #include <stdlib.h> |
29 | | |
30 | 0 | #define MAX_THREADPOOL_SIZE 1024 |
31 | | |
32 | | static uv_once_t once = UV_ONCE_INIT; |
33 | | static uv_cond_t cond; |
34 | | static uv_mutex_t mutex; |
35 | | static unsigned int idle_threads; |
36 | | static unsigned int slow_io_work_running; |
37 | | static unsigned int nthreads; |
38 | | static uv_thread_t* threads; |
39 | | static uv_thread_t default_threads[4]; |
40 | | static QUEUE exit_message; |
41 | | static QUEUE wq; |
42 | | static QUEUE run_slow_work_message; |
43 | | static QUEUE slow_io_pending_wq; |
44 | | |
45 | 0 | static unsigned int slow_work_thread_threshold(void) { |
46 | 0 | return (nthreads + 1) / 2; |
47 | 0 | } |
48 | | |
49 | 0 | static void uv__cancelled(struct uv__work* w) { |
50 | 0 | abort(); |
51 | 0 | } |
52 | | |
53 | | |
54 | | /* To avoid deadlock with uv_cancel() it's crucial that the worker |
55 | | * never holds the global mutex and the loop-local mutex at the same time. |
56 | | */ |
57 | 0 | static void worker(void* arg) { |
58 | 0 | struct uv__work* w; |
59 | 0 | QUEUE* q; |
60 | 0 | int is_slow_work; |
61 | |
|
62 | 0 | uv_sem_post((uv_sem_t*) arg); |
63 | 0 | arg = NULL; |
64 | |
|
65 | 0 | uv_mutex_lock(&mutex); |
66 | 0 | for (;;) { |
67 | | /* `mutex` should always be locked at this point. */ |
68 | | |
69 | | /* Keep waiting while either no work is present or only slow I/O |
70 | | and we're at the threshold for that. */ |
71 | 0 | while (QUEUE_EMPTY(&wq) || |
72 | 0 | (QUEUE_HEAD(&wq) == &run_slow_work_message && |
73 | 0 | QUEUE_NEXT(&run_slow_work_message) == &wq && |
74 | 0 | slow_io_work_running >= slow_work_thread_threshold())) { |
75 | 0 | idle_threads += 1; |
76 | 0 | uv_cond_wait(&cond, &mutex); |
77 | 0 | idle_threads -= 1; |
78 | 0 | } |
79 | |
|
80 | 0 | q = QUEUE_HEAD(&wq); |
81 | 0 | if (q == &exit_message) { |
82 | 0 | uv_cond_signal(&cond); |
83 | 0 | uv_mutex_unlock(&mutex); |
84 | 0 | break; |
85 | 0 | } |
86 | | |
87 | 0 | QUEUE_REMOVE(q); |
88 | 0 | QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */ |
89 | |
|
90 | 0 | is_slow_work = 0; |
91 | 0 | if (q == &run_slow_work_message) { |
92 | | /* If we're at the slow I/O threshold, re-schedule until after all |
93 | | other work in the queue is done. */ |
94 | 0 | if (slow_io_work_running >= slow_work_thread_threshold()) { |
95 | 0 | QUEUE_INSERT_TAIL(&wq, q); |
96 | 0 | continue; |
97 | 0 | } |
98 | | |
99 | | /* If we encountered a request to run slow I/O work but there is none |
100 | | to run, that means it's cancelled => Start over. */ |
101 | 0 | if (QUEUE_EMPTY(&slow_io_pending_wq)) |
102 | 0 | continue; |
103 | | |
104 | 0 | is_slow_work = 1; |
105 | 0 | slow_io_work_running++; |
106 | |
|
107 | 0 | q = QUEUE_HEAD(&slow_io_pending_wq); |
108 | 0 | QUEUE_REMOVE(q); |
109 | 0 | QUEUE_INIT(q); |
110 | | |
111 | | /* If there is more slow I/O work, schedule it to be run as well. */ |
112 | 0 | if (!QUEUE_EMPTY(&slow_io_pending_wq)) { |
113 | 0 | QUEUE_INSERT_TAIL(&wq, &run_slow_work_message); |
114 | 0 | if (idle_threads > 0) |
115 | 0 | uv_cond_signal(&cond); |
116 | 0 | } |
117 | 0 | } |
118 | | |
119 | 0 | uv_mutex_unlock(&mutex); |
120 | |
|
121 | 0 | w = QUEUE_DATA(q, struct uv__work, wq); |
122 | 0 | w->work(w); |
123 | |
|
124 | 0 | uv_mutex_lock(&w->loop->wq_mutex); |
125 | 0 | w->work = NULL; /* Signal uv_cancel() that the work req is done |
126 | | executing. */ |
127 | 0 | QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq); |
128 | 0 | uv_async_send(&w->loop->wq_async); |
129 | 0 | uv_mutex_unlock(&w->loop->wq_mutex); |
130 | | |
131 | | /* Lock `mutex` since that is expected at the start of the next |
132 | | * iteration. */ |
133 | 0 | uv_mutex_lock(&mutex); |
134 | 0 | if (is_slow_work) { |
135 | | /* `slow_io_work_running` is protected by `mutex`. */ |
136 | 0 | slow_io_work_running--; |
137 | 0 | } |
138 | 0 | } |
139 | 0 | } |
140 | | |
141 | | |
142 | 0 | static void post(QUEUE* q, enum uv__work_kind kind) { |
143 | 0 | uv_mutex_lock(&mutex); |
144 | 0 | if (kind == UV__WORK_SLOW_IO) { |
145 | | /* Insert into a separate queue. */ |
146 | 0 | QUEUE_INSERT_TAIL(&slow_io_pending_wq, q); |
147 | 0 | if (!QUEUE_EMPTY(&run_slow_work_message)) { |
148 | | /* Running slow I/O tasks is already scheduled => Nothing to do here. |
149 | | The worker that runs said other task will schedule this one as well. */ |
150 | 0 | uv_mutex_unlock(&mutex); |
151 | 0 | return; |
152 | 0 | } |
153 | 0 | q = &run_slow_work_message; |
154 | 0 | } |
155 | | |
156 | 0 | QUEUE_INSERT_TAIL(&wq, q); |
157 | 0 | if (idle_threads > 0) |
158 | 0 | uv_cond_signal(&cond); |
159 | 0 | uv_mutex_unlock(&mutex); |
160 | 0 | } |
161 | | |
162 | | |
163 | | #ifdef __MVS__ |
164 | | /* TODO(itodorov) - zos: revisit when Woz compiler is available. */ |
165 | | __attribute__((destructor)) |
166 | | #endif |
167 | 0 | void uv__threadpool_cleanup(void) { |
168 | 0 | unsigned int i; |
169 | |
|
170 | 0 | if (nthreads == 0) |
171 | 0 | return; |
172 | | |
173 | 0 | #ifndef __MVS__ |
174 | | /* TODO(gabylb) - zos: revisit when Woz compiler is available. */ |
175 | 0 | post(&exit_message, UV__WORK_CPU); |
176 | 0 | #endif |
177 | |
|
178 | 0 | for (i = 0; i < nthreads; i++) |
179 | 0 | if (uv_thread_join(threads + i)) |
180 | 0 | abort(); |
181 | | |
182 | 0 | if (threads != default_threads) |
183 | 0 | uv__free(threads); |
184 | |
|
185 | 0 | uv_mutex_destroy(&mutex); |
186 | 0 | uv_cond_destroy(&cond); |
187 | |
|
188 | 0 | threads = NULL; |
189 | 0 | nthreads = 0; |
190 | 0 | } |
191 | | |
192 | | |
193 | 0 | static void init_threads(void) { |
194 | 0 | unsigned int i; |
195 | 0 | const char* val; |
196 | 0 | uv_sem_t sem; |
197 | |
|
198 | 0 | nthreads = ARRAY_SIZE(default_threads); |
199 | 0 | val = getenv("UV_THREADPOOL_SIZE"); |
200 | 0 | if (val != NULL) |
201 | 0 | nthreads = atoi(val); |
202 | 0 | if (nthreads == 0) |
203 | 0 | nthreads = 1; |
204 | 0 | if (nthreads > MAX_THREADPOOL_SIZE) |
205 | 0 | nthreads = MAX_THREADPOOL_SIZE; |
206 | |
|
207 | 0 | threads = default_threads; |
208 | 0 | if (nthreads > ARRAY_SIZE(default_threads)) { |
209 | 0 | threads = uv__malloc(nthreads * sizeof(threads[0])); |
210 | 0 | if (threads == NULL) { |
211 | 0 | nthreads = ARRAY_SIZE(default_threads); |
212 | 0 | threads = default_threads; |
213 | 0 | } |
214 | 0 | } |
215 | |
|
216 | 0 | if (uv_cond_init(&cond)) |
217 | 0 | abort(); |
218 | | |
219 | 0 | if (uv_mutex_init(&mutex)) |
220 | 0 | abort(); |
221 | | |
222 | 0 | QUEUE_INIT(&wq); |
223 | 0 | QUEUE_INIT(&slow_io_pending_wq); |
224 | 0 | QUEUE_INIT(&run_slow_work_message); |
225 | |
|
226 | 0 | if (uv_sem_init(&sem, 0)) |
227 | 0 | abort(); |
228 | | |
229 | 0 | for (i = 0; i < nthreads; i++) |
230 | 0 | if (uv_thread_create(threads + i, worker, &sem)) |
231 | 0 | abort(); |
232 | | |
233 | 0 | for (i = 0; i < nthreads; i++) |
234 | 0 | uv_sem_wait(&sem); |
235 | |
|
236 | 0 | uv_sem_destroy(&sem); |
237 | 0 | } |
238 | | |
239 | | |
240 | | #ifndef _WIN32 |
241 | 0 | static void reset_once(void) { |
242 | 0 | uv_once_t child_once = UV_ONCE_INIT; |
243 | 0 | memcpy(&once, &child_once, sizeof(child_once)); |
244 | 0 | } |
245 | | #endif |
246 | | |
247 | | |
248 | 0 | static void init_once(void) { |
249 | 0 | #ifndef _WIN32 |
250 | | /* Re-initialize the threadpool after fork. |
251 | | * Note that this discards the global mutex and condition as well |
252 | | * as the work queue. |
253 | | */ |
254 | 0 | if (pthread_atfork(NULL, NULL, &reset_once)) |
255 | 0 | abort(); |
256 | 0 | #endif |
257 | 0 | init_threads(); |
258 | 0 | } |
259 | | |
260 | | |
261 | | void uv__work_submit(uv_loop_t* loop, |
262 | | struct uv__work* w, |
263 | | enum uv__work_kind kind, |
264 | | void (*work)(struct uv__work* w), |
265 | 0 | void (*done)(struct uv__work* w, int status)) { |
266 | 0 | uv_once(&once, init_once); |
267 | 0 | w->loop = loop; |
268 | 0 | w->work = work; |
269 | 0 | w->done = done; |
270 | 0 | post(&w->wq, kind); |
271 | 0 | } |
272 | | |
273 | | |
274 | 0 | static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { |
275 | 0 | int cancelled; |
276 | |
|
277 | 0 | uv_mutex_lock(&mutex); |
278 | 0 | uv_mutex_lock(&w->loop->wq_mutex); |
279 | |
|
280 | 0 | cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL; |
281 | 0 | if (cancelled) |
282 | 0 | QUEUE_REMOVE(&w->wq); |
283 | |
|
284 | 0 | uv_mutex_unlock(&w->loop->wq_mutex); |
285 | 0 | uv_mutex_unlock(&mutex); |
286 | |
|
287 | 0 | if (!cancelled) |
288 | 0 | return UV_EBUSY; |
289 | | |
290 | 0 | w->work = uv__cancelled; |
291 | 0 | uv_mutex_lock(&loop->wq_mutex); |
292 | 0 | QUEUE_INSERT_TAIL(&loop->wq, &w->wq); |
293 | 0 | uv_async_send(&loop->wq_async); |
294 | 0 | uv_mutex_unlock(&loop->wq_mutex); |
295 | |
|
296 | 0 | return 0; |
297 | 0 | } |
298 | | |
299 | | |
300 | 0 | void uv__work_done(uv_async_t* handle) { |
301 | 0 | struct uv__work* w; |
302 | 0 | uv_loop_t* loop; |
303 | 0 | QUEUE* q; |
304 | 0 | QUEUE wq; |
305 | 0 | int err; |
306 | |
|
307 | 0 | loop = container_of(handle, uv_loop_t, wq_async); |
308 | 0 | uv_mutex_lock(&loop->wq_mutex); |
309 | 0 | QUEUE_MOVE(&loop->wq, &wq); |
310 | 0 | uv_mutex_unlock(&loop->wq_mutex); |
311 | |
|
312 | 0 | while (!QUEUE_EMPTY(&wq)) { |
313 | 0 | q = QUEUE_HEAD(&wq); |
314 | 0 | QUEUE_REMOVE(q); |
315 | |
|
316 | 0 | w = container_of(q, struct uv__work, wq); |
317 | 0 | err = (w->work == uv__cancelled) ? UV_ECANCELED : 0; |
318 | 0 | w->done(w, err); |
319 | 0 | } |
320 | 0 | } |
321 | | |
322 | | |
323 | 0 | static void uv__queue_work(struct uv__work* w) { |
324 | 0 | uv_work_t* req = container_of(w, uv_work_t, work_req); |
325 | |
|
326 | 0 | req->work_cb(req); |
327 | 0 | } |
328 | | |
329 | | |
330 | 0 | static void uv__queue_done(struct uv__work* w, int err) { |
331 | 0 | uv_work_t* req; |
332 | |
|
333 | 0 | req = container_of(w, uv_work_t, work_req); |
334 | 0 | uv__req_unregister(req->loop, req); |
335 | |
|
336 | 0 | if (req->after_work_cb == NULL) |
337 | 0 | return; |
338 | | |
339 | 0 | req->after_work_cb(req, err); |
340 | 0 | } |
341 | | |
342 | | |
343 | | int uv_queue_work(uv_loop_t* loop, |
344 | | uv_work_t* req, |
345 | | uv_work_cb work_cb, |
346 | 0 | uv_after_work_cb after_work_cb) { |
347 | 0 | if (work_cb == NULL) |
348 | 0 | return UV_EINVAL; |
349 | | |
350 | 0 | uv__req_init(loop, req, UV_WORK); |
351 | 0 | req->loop = loop; |
352 | 0 | req->work_cb = work_cb; |
353 | 0 | req->after_work_cb = after_work_cb; |
354 | 0 | uv__work_submit(loop, |
355 | 0 | &req->work_req, |
356 | 0 | UV__WORK_CPU, |
357 | 0 | uv__queue_work, |
358 | 0 | uv__queue_done); |
359 | 0 | return 0; |
360 | 0 | } |
361 | | |
362 | | |
363 | 0 | int uv_cancel(uv_req_t* req) { |
364 | 0 | struct uv__work* wreq; |
365 | 0 | uv_loop_t* loop; |
366 | |
|
367 | 0 | switch (req->type) { |
368 | 0 | case UV_FS: |
369 | 0 | loop = ((uv_fs_t*) req)->loop; |
370 | 0 | wreq = &((uv_fs_t*) req)->work_req; |
371 | 0 | break; |
372 | 0 | case UV_GETADDRINFO: |
373 | 0 | loop = ((uv_getaddrinfo_t*) req)->loop; |
374 | 0 | wreq = &((uv_getaddrinfo_t*) req)->work_req; |
375 | 0 | break; |
376 | 0 | case UV_GETNAMEINFO: |
377 | 0 | loop = ((uv_getnameinfo_t*) req)->loop; |
378 | 0 | wreq = &((uv_getnameinfo_t*) req)->work_req; |
379 | 0 | break; |
380 | 0 | case UV_RANDOM: |
381 | 0 | loop = ((uv_random_t*) req)->loop; |
382 | 0 | wreq = &((uv_random_t*) req)->work_req; |
383 | 0 | break; |
384 | 0 | case UV_WORK: |
385 | 0 | loop = ((uv_work_t*) req)->loop; |
386 | 0 | wreq = &((uv_work_t*) req)->work_req; |
387 | 0 | break; |
388 | 0 | default: |
389 | 0 | return UV_EINVAL; |
390 | 0 | } |
391 | | |
392 | 0 | return uv__work_cancel(loop, req, wreq); |
393 | 0 | } |