Coverage Report

Created: 2026-04-29 07:01

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/CMake/Utilities/cmlibuv/src/threadpool.c
Line
Count
Source
1
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2
 *
3
 * Permission is hereby granted, free of charge, to any person obtaining a copy
4
 * of this software and associated documentation files (the "Software"), to
5
 * deal in the Software without restriction, including without limitation the
6
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7
 * sell copies of the Software, and to permit persons to whom the Software is
8
 * furnished to do so, subject to the following conditions:
9
 *
10
 * The above copyright notice and this permission notice shall be included in
11
 * all copies or substantial portions of the Software.
12
 *
13
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19
 * IN THE SOFTWARE.
20
 */
21
22
#include "uv-common.h"
23
24
#if !defined(_WIN32)
25
# include "unix/internal.h"
26
#endif
27
28
#include <stdlib.h>
29
30
0
#define MAX_THREADPOOL_SIZE 1024
31
32
static uv_once_t once = UV_ONCE_INIT;
33
static uv_cond_t cond;
34
static uv_mutex_t mutex;
35
static unsigned int idle_threads;
36
static unsigned int slow_io_work_running;
37
static unsigned int nthreads;
38
static uv_thread_t* threads;
39
static uv_thread_t default_threads[4];
40
static struct uv__queue exit_message;
41
static struct uv__queue wq;
42
static struct uv__queue run_slow_work_message;
43
static struct uv__queue slow_io_pending_wq;
44
45
0
static unsigned int slow_work_thread_threshold(void) {
46
0
  return (nthreads + 1) / 2;
47
0
}
48
49
0
static void uv__cancelled(struct uv__work* w) {
50
0
  abort();
51
0
}
52
53
54
/* To avoid deadlock with uv_cancel() it's crucial that the worker
55
 * never holds the global mutex and the loop-local mutex at the same time.
56
 */
57
0
static void worker(void* arg) {
58
0
  struct uv__work* w;
59
0
  struct uv__queue* q;
60
0
  int is_slow_work;
61
62
0
  uv_thread_setname("libuv-worker");
63
0
  uv_sem_post((uv_sem_t*) arg);
64
0
  arg = NULL;
65
66
0
  uv_mutex_lock(&mutex);
67
0
  for (;;) {
68
    /* `mutex` should always be locked at this point. */
69
70
    /* Keep waiting while either no work is present or only slow I/O
71
       and we're at the threshold for that. */
72
0
    while (uv__queue_empty(&wq) ||
73
0
           (uv__queue_head(&wq) == &run_slow_work_message &&
74
0
            uv__queue_next(&run_slow_work_message) == &wq &&
75
0
            slow_io_work_running >= slow_work_thread_threshold())) {
76
0
      idle_threads += 1;
77
0
      uv_cond_wait(&cond, &mutex);
78
0
      idle_threads -= 1;
79
0
    }
80
81
0
    q = uv__queue_head(&wq);
82
0
    if (q == &exit_message) {
83
0
      uv_cond_signal(&cond);
84
0
      uv_mutex_unlock(&mutex);
85
0
      break;
86
0
    }
87
88
0
    uv__queue_remove(q);
89
0
    uv__queue_init(q);  /* Signal uv_cancel() that the work req is executing. */
90
91
0
    is_slow_work = 0;
92
0
    if (q == &run_slow_work_message) {
93
      /* If we're at the slow I/O threshold, re-schedule until after all
94
         other work in the queue is done. */
95
0
      if (slow_io_work_running >= slow_work_thread_threshold()) {
96
0
        uv__queue_insert_tail(&wq, q);
97
0
        continue;
98
0
      }
99
100
      /* If we encountered a request to run slow I/O work but there is none
101
         to run, that means it's cancelled => Start over. */
102
0
      if (uv__queue_empty(&slow_io_pending_wq))
103
0
        continue;
104
105
0
      is_slow_work = 1;
106
0
      slow_io_work_running++;
107
108
0
      q = uv__queue_head(&slow_io_pending_wq);
109
0
      uv__queue_remove(q);
110
0
      uv__queue_init(q);
111
112
      /* If there is more slow I/O work, schedule it to be run as well. */
113
0
      if (!uv__queue_empty(&slow_io_pending_wq)) {
114
0
        uv__queue_insert_tail(&wq, &run_slow_work_message);
115
0
        if (idle_threads > 0)
116
0
          uv_cond_signal(&cond);
117
0
      }
118
0
    }
119
120
0
    uv_mutex_unlock(&mutex);
121
122
0
    w = uv__queue_data(q, struct uv__work, wq);
123
0
    w->work(w);
124
125
0
    uv_mutex_lock(&w->loop->wq_mutex);
126
0
    w->work = NULL;  /* Signal uv_cancel() that the work req is done
127
                        executing. */
128
0
    uv__queue_insert_tail(&w->loop->wq, &w->wq);
129
0
    uv_async_send(&w->loop->wq_async);
130
0
    uv_mutex_unlock(&w->loop->wq_mutex);
131
132
    /* Lock `mutex` since that is expected at the start of the next
133
     * iteration. */
134
0
    uv_mutex_lock(&mutex);
135
0
    if (is_slow_work) {
136
      /* `slow_io_work_running` is protected by `mutex`. */
137
0
      slow_io_work_running--;
138
0
    }
139
0
  }
140
0
}
141
142
143
0
static void post(struct uv__queue* q, enum uv__work_kind kind) {
144
0
  uv_mutex_lock(&mutex);
145
0
  if (kind == UV__WORK_SLOW_IO) {
146
    /* Insert into a separate queue. */
147
0
    uv__queue_insert_tail(&slow_io_pending_wq, q);
148
0
    if (!uv__queue_empty(&run_slow_work_message)) {
149
      /* Running slow I/O tasks is already scheduled => Nothing to do here.
150
         The worker that runs said other task will schedule this one as well. */
151
0
      uv_mutex_unlock(&mutex);
152
0
      return;
153
0
    }
154
0
    q = &run_slow_work_message;
155
0
  }
156
157
0
  uv__queue_insert_tail(&wq, q);
158
0
  if (idle_threads > 0)
159
0
    uv_cond_signal(&cond);
160
0
  uv_mutex_unlock(&mutex);
161
0
}
162
163
164
#ifdef __MVS__
165
/* TODO(itodorov) - zos: revisit when Woz compiler is available. */
166
__attribute__((destructor))
167
#endif
168
0
void uv__threadpool_cleanup(void) {
169
0
  unsigned int i;
170
171
0
  if (nthreads == 0)
172
0
    return;
173
174
0
#ifndef __MVS__
175
  /* TODO(gabylb) - zos: revisit when Woz compiler is available. */
176
0
  post(&exit_message, UV__WORK_CPU);
177
0
#endif
178
179
0
  for (i = 0; i < nthreads; i++)
180
0
    if (uv_thread_join(threads + i))
181
0
      abort();
182
183
0
  if (threads != default_threads)
184
0
    uv__free(threads);
185
186
0
  uv_mutex_destroy(&mutex);
187
0
  uv_cond_destroy(&cond);
188
189
0
  threads = NULL;
190
0
  nthreads = 0;
191
0
}
192
193
194
0
static void init_threads(void) {
195
0
  uv_thread_options_t config;
196
0
  unsigned int i;
197
0
  size_t buflen;
198
0
  char buf[16];
199
0
  const char* val;
200
0
  int err;
201
202
0
  uv_sem_t sem;
203
204
0
  nthreads = ARRAY_SIZE(default_threads);
205
206
0
  buflen = ARRAY_SIZE(buf);
207
0
  err = uv_os_getenv("UV_THREADPOOL_SIZE", buf, &buflen);
208
0
  val = NULL;
209
0
  if (err == 0)
210
0
    val = buf;
211
  
212
0
  if (val != NULL)
213
0
    nthreads = atoi(val);
214
0
  if (nthreads == 0)
215
0
    nthreads = 1;
216
0
  if (nthreads > MAX_THREADPOOL_SIZE)
217
0
    nthreads = MAX_THREADPOOL_SIZE;
218
219
0
  threads = default_threads;
220
0
  if (nthreads > ARRAY_SIZE(default_threads)) {
221
0
    threads = uv__malloc(nthreads * sizeof(threads[0]));
222
0
    if (threads == NULL) {
223
0
      nthreads = ARRAY_SIZE(default_threads);
224
0
      threads = default_threads;
225
0
    }
226
0
  }
227
228
0
  if (uv_cond_init(&cond))
229
0
    abort();
230
231
0
  if (uv_mutex_init(&mutex))
232
0
    abort();
233
234
0
  uv__queue_init(&wq);
235
0
  uv__queue_init(&slow_io_pending_wq);
236
0
  uv__queue_init(&run_slow_work_message);
237
238
0
  if (uv_sem_init(&sem, 0))
239
0
    abort();
240
241
0
  config.flags = UV_THREAD_HAS_STACK_SIZE;
242
0
  config.stack_size = 8u << 20;  /* 8 MB */
243
244
0
  for (i = 0; i < nthreads; i++)
245
0
    if (uv_thread_create_ex(threads + i, &config, worker, &sem))
246
0
      abort();
247
248
0
  for (i = 0; i < nthreads; i++)
249
0
    uv_sem_wait(&sem);
250
251
0
  uv_sem_destroy(&sem);
252
0
}
253
254
255
#ifndef _WIN32
256
0
static void reset_once(void) {
257
0
  uv_once_t child_once = UV_ONCE_INIT;
258
0
  memcpy(&once, &child_once, sizeof(child_once));
259
0
}
260
#endif
261
262
263
0
static void init_once(void) {
264
0
#ifndef _WIN32
265
  /* Re-initialize the threadpool after fork.
266
   * Note that this discards the global mutex and condition as well
267
   * as the work queue.
268
   */
269
0
  if (pthread_atfork(NULL, NULL, &reset_once))
270
0
    abort();
271
0
#endif
272
0
  init_threads();
273
0
}
274
275
276
void uv__work_submit(uv_loop_t* loop,
277
                     struct uv__work* w,
278
                     enum uv__work_kind kind,
279
                     void (*work)(struct uv__work* w),
280
0
                     void (*done)(struct uv__work* w, int status)) {
281
0
  uv_once(&once, init_once);
282
0
  w->loop = loop;
283
0
  w->work = work;
284
0
  w->done = done;
285
0
  post(&w->wq, kind);
286
0
}
287
288
289
/* TODO(bnoordhuis) teach libuv how to cancel file operations
290
 * that go through io_uring instead of the thread pool.
291
 */
292
0
static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
293
0
  int cancelled;
294
295
0
  uv_once(&once, init_once);  /* Ensure |mutex| is initialized. */
296
0
  uv_mutex_lock(&mutex);
297
0
  uv_mutex_lock(&w->loop->wq_mutex);
298
299
0
  cancelled = !uv__queue_empty(&w->wq) && w->work != NULL;
300
0
  if (cancelled)
301
0
    uv__queue_remove(&w->wq);
302
303
0
  uv_mutex_unlock(&w->loop->wq_mutex);
304
0
  uv_mutex_unlock(&mutex);
305
306
0
  if (!cancelled)
307
0
    return UV_EBUSY;
308
309
0
  w->work = uv__cancelled;
310
0
  uv_mutex_lock(&loop->wq_mutex);
311
0
  uv__queue_insert_tail(&loop->wq, &w->wq);
312
0
  uv_async_send(&loop->wq_async);
313
0
  uv_mutex_unlock(&loop->wq_mutex);
314
315
0
  return 0;
316
0
}
317
318
319
0
void uv__work_done(uv_async_t* handle) {
320
0
  struct uv__work* w;
321
0
  uv_loop_t* loop;
322
0
  struct uv__queue* q;
323
0
  struct uv__queue wq;
324
0
  int err;
325
0
  int nevents;
326
327
0
  loop = container_of(handle, uv_loop_t, wq_async);
328
0
  uv_mutex_lock(&loop->wq_mutex);
329
0
  uv__queue_move(&loop->wq, &wq);
330
0
  uv_mutex_unlock(&loop->wq_mutex);
331
332
0
  nevents = 0;
333
334
0
  while (!uv__queue_empty(&wq)) {
335
0
    q = uv__queue_head(&wq);
336
0
    uv__queue_remove(q);
337
338
0
    w = container_of(q, struct uv__work, wq);
339
0
    err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
340
0
    w->done(w, err);
341
0
    nevents++;
342
0
  }
343
344
  /* This check accomplishes 2 things:
345
   * 1. Even if the queue was empty, the call to uv__work_done() should count
346
   *    as an event. Which will have been added by the event loop when
347
   *    calling this callback.
348
   * 2. Prevents accidental wrap around in case nevents == 0 events == 0.
349
   */
350
0
  if (nevents > 1) {
351
    /* Subtract 1 to counter the call to uv__work_done(). */
352
0
    uv__metrics_inc_events(loop, nevents - 1);
353
0
    if (uv__get_internal_fields(loop)->current_timeout == 0)
354
0
      uv__metrics_inc_events_waiting(loop, nevents - 1);
355
0
  }
356
0
}
357
358
359
0
static void uv__queue_work(struct uv__work* w) {
360
0
  uv_work_t* req = container_of(w, uv_work_t, work_req);
361
362
0
  req->work_cb(req);
363
0
}
364
365
366
0
static void uv__queue_done(struct uv__work* w, int err) {
367
0
  uv_work_t* req;
368
369
0
  req = container_of(w, uv_work_t, work_req);
370
0
  uv__req_unregister(req->loop);
371
372
0
  if (req->after_work_cb == NULL)
373
0
    return;
374
375
0
  req->after_work_cb(req, err);
376
0
}
377
378
379
int uv_queue_work(uv_loop_t* loop,
380
                  uv_work_t* req,
381
                  uv_work_cb work_cb,
382
0
                  uv_after_work_cb after_work_cb) {
383
0
  if (work_cb == NULL)
384
0
    return UV_EINVAL;
385
386
0
  uv__req_init(loop, req, UV_WORK);
387
0
  req->loop = loop;
388
0
  req->work_cb = work_cb;
389
0
  req->after_work_cb = after_work_cb;
390
0
  uv__work_submit(loop,
391
0
                  &req->work_req,
392
0
                  UV__WORK_CPU,
393
0
                  uv__queue_work,
394
0
                  uv__queue_done);
395
0
  return 0;
396
0
}
397
398
399
0
int uv_cancel(uv_req_t* req) {
400
0
  struct uv__work* wreq;
401
0
  uv_loop_t* loop;
402
403
0
  switch (req->type) {
404
0
  case UV_FS:
405
0
    loop =  ((uv_fs_t*) req)->loop;
406
0
    wreq = &((uv_fs_t*) req)->work_req;
407
0
    break;
408
0
  case UV_GETADDRINFO:
409
0
    loop =  ((uv_getaddrinfo_t*) req)->loop;
410
0
    wreq = &((uv_getaddrinfo_t*) req)->work_req;
411
0
    break;
412
0
  case UV_GETNAMEINFO:
413
0
    loop = ((uv_getnameinfo_t*) req)->loop;
414
0
    wreq = &((uv_getnameinfo_t*) req)->work_req;
415
0
    break;
416
0
  case UV_RANDOM:
417
0
    loop = ((uv_random_t*) req)->loop;
418
0
    wreq = &((uv_random_t*) req)->work_req;
419
0
    break;
420
0
  case UV_WORK:
421
0
    loop =  ((uv_work_t*) req)->loop;
422
0
    wreq = &((uv_work_t*) req)->work_req;
423
0
    break;
424
0
  default:
425
0
    return UV_EINVAL;
426
0
  }
427
428
0
  return uv__work_cancel(loop, req, wreq);
429
0
}