/src/opencv/3rdparty/openjpeg/openjp2/thread.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * The copyright in this software is being made available under the 2-clauses |
3 | | * BSD License, included below. This software may be subject to other third |
4 | | * party and contributor rights, including patent rights, and no such rights |
5 | | * are granted under this license. |
6 | | * |
7 | | * Copyright (c) 2016, Even Rouault |
8 | | * All rights reserved. |
9 | | * |
10 | | * Redistribution and use in source and binary forms, with or without |
11 | | * modification, are permitted provided that the following conditions |
12 | | * are met: |
13 | | * 1. Redistributions of source code must retain the above copyright |
14 | | * notice, this list of conditions and the following disclaimer. |
15 | | * 2. Redistributions in binary form must reproduce the above copyright |
16 | | * notice, this list of conditions and the following disclaimer in the |
17 | | * documentation and/or other materials provided with the distribution. |
18 | | * |
19 | | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' |
20 | | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
21 | | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
22 | | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
23 | | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | | * POSSIBILITY OF SUCH DAMAGE. |
30 | | */ |
31 | | |
32 | | #include <assert.h> |
33 | | |
34 | | #ifdef MUTEX_win32 |
35 | | |
36 | | /* Some versions of x86_64-w64-mingw32-gc -m32 resolve InterlockedCompareExchange() */ |
37 | | /* as __sync_val_compare_and_swap_4 but fails to link it. As this protects against */ |
38 | | /* a rather unlikely race, skip it */ |
39 | | #if !(defined(__MINGW32__) && defined(__i386__)) |
40 | | #define HAVE_INTERLOCKED_COMPARE_EXCHANGE 1 |
41 | | #endif |
42 | | |
43 | | #include <windows.h> |
44 | | #include <process.h> |
45 | | |
46 | | #include "opj_includes.h" |
47 | | |
48 | | OPJ_BOOL OPJ_CALLCONV opj_has_thread_support(void) |
49 | | { |
50 | | return OPJ_TRUE; |
51 | | } |
52 | | |
53 | | int OPJ_CALLCONV opj_get_num_cpus(void) |
54 | | { |
55 | | SYSTEM_INFO info; |
56 | | DWORD dwNum; |
57 | | GetSystemInfo(&info); |
58 | | dwNum = info.dwNumberOfProcessors; |
59 | | if (dwNum < 1) { |
60 | | return 1; |
61 | | } |
62 | | return (int)dwNum; |
63 | | } |
64 | | |
65 | | struct opj_mutex_t { |
66 | | CRITICAL_SECTION cs; |
67 | | }; |
68 | | |
69 | | opj_mutex_t* opj_mutex_create(void) |
70 | | { |
71 | | opj_mutex_t* mutex = (opj_mutex_t*) opj_malloc(sizeof(opj_mutex_t)); |
72 | | if (!mutex) { |
73 | | return NULL; |
74 | | } |
75 | | InitializeCriticalSectionAndSpinCount(&(mutex->cs), 4000); |
76 | | return mutex; |
77 | | } |
78 | | |
79 | | void opj_mutex_lock(opj_mutex_t* mutex) |
80 | | { |
81 | | EnterCriticalSection(&(mutex->cs)); |
82 | | } |
83 | | |
84 | | void opj_mutex_unlock(opj_mutex_t* mutex) |
85 | | { |
86 | | LeaveCriticalSection(&(mutex->cs)); |
87 | | } |
88 | | |
89 | | void opj_mutex_destroy(opj_mutex_t* mutex) |
90 | | { |
91 | | if (!mutex) { |
92 | | return; |
93 | | } |
94 | | DeleteCriticalSection(&(mutex->cs)); |
95 | | opj_free(mutex); |
96 | | } |
97 | | |
98 | | struct opj_cond_waiter_list_t { |
99 | | HANDLE hEvent; |
100 | | struct opj_cond_waiter_list_t* next; |
101 | | }; |
102 | | typedef struct opj_cond_waiter_list_t opj_cond_waiter_list_t; |
103 | | |
104 | | struct opj_cond_t { |
105 | | opj_mutex_t *internal_mutex; |
106 | | opj_cond_waiter_list_t *waiter_list; |
107 | | }; |
108 | | |
109 | | static DWORD TLSKey = 0; |
110 | | static volatile LONG inTLSLockedSection = 0; |
111 | | static volatile int TLSKeyInit = OPJ_FALSE; |
112 | | |
113 | | opj_cond_t* opj_cond_create(void) |
114 | | { |
115 | | opj_cond_t* cond = (opj_cond_t*) opj_malloc(sizeof(opj_cond_t)); |
116 | | if (!cond) { |
117 | | return NULL; |
118 | | } |
119 | | |
120 | | /* Make sure that the TLS key is allocated in a thread-safe way */ |
121 | | /* We cannot use a global mutex/critical section since its creation itself would not be */ |
122 | | /* thread-safe, so use InterlockedCompareExchange trick */ |
123 | | while (OPJ_TRUE) { |
124 | | |
125 | | #if HAVE_INTERLOCKED_COMPARE_EXCHANGE |
126 | | if (InterlockedCompareExchange(&inTLSLockedSection, 1, 0) == 0) |
127 | | #endif |
128 | | { |
129 | | if (!TLSKeyInit) { |
130 | | TLSKey = TlsAlloc(); |
131 | | TLSKeyInit = OPJ_TRUE; |
132 | | } |
133 | | #if HAVE_INTERLOCKED_COMPARE_EXCHANGE |
134 | | InterlockedCompareExchange(&inTLSLockedSection, 0, 1); |
135 | | #endif |
136 | | break; |
137 | | } |
138 | | } |
139 | | |
140 | | if (TLSKey == TLS_OUT_OF_INDEXES) { |
141 | | opj_free(cond); |
142 | | return NULL; |
143 | | } |
144 | | cond->internal_mutex = opj_mutex_create(); |
145 | | if (cond->internal_mutex == NULL) { |
146 | | opj_free(cond); |
147 | | return NULL; |
148 | | } |
149 | | cond->waiter_list = NULL; |
150 | | return cond; |
151 | | } |
152 | | |
153 | | void opj_cond_wait(opj_cond_t* cond, opj_mutex_t* mutex) |
154 | | { |
155 | | opj_cond_waiter_list_t* item; |
156 | | HANDLE hEvent = (HANDLE) TlsGetValue(TLSKey); |
157 | | if (hEvent == NULL) { |
158 | | hEvent = CreateEvent(NULL, /* security attributes */ |
159 | | 0, /* manual reset = no */ |
160 | | 0, /* initial state = unsignaled */ |
161 | | NULL /* no name */); |
162 | | assert(hEvent); |
163 | | |
164 | | TlsSetValue(TLSKey, hEvent); |
165 | | } |
166 | | |
167 | | /* Insert the waiter into the waiter list of the condition */ |
168 | | opj_mutex_lock(cond->internal_mutex); |
169 | | |
170 | | item = (opj_cond_waiter_list_t*)opj_malloc(sizeof(opj_cond_waiter_list_t)); |
171 | | assert(item != NULL); |
172 | | |
173 | | item->hEvent = hEvent; |
174 | | item->next = cond->waiter_list; |
175 | | |
176 | | cond->waiter_list = item; |
177 | | |
178 | | opj_mutex_unlock(cond->internal_mutex); |
179 | | |
180 | | /* Release the client mutex before waiting for the event being signaled */ |
181 | | opj_mutex_unlock(mutex); |
182 | | |
183 | | /* Ideally we would check that we do not get WAIT_FAILED but it is hard */ |
184 | | /* to report a failure. */ |
185 | | WaitForSingleObject(hEvent, INFINITE); |
186 | | |
187 | | /* Reacquire the client mutex */ |
188 | | opj_mutex_lock(mutex); |
189 | | } |
190 | | |
191 | | void opj_cond_signal(opj_cond_t* cond) |
192 | | { |
193 | | opj_cond_waiter_list_t* psIter; |
194 | | |
195 | | /* Signal the first registered event, and remove it from the list */ |
196 | | opj_mutex_lock(cond->internal_mutex); |
197 | | |
198 | | psIter = cond->waiter_list; |
199 | | if (psIter != NULL) { |
200 | | SetEvent(psIter->hEvent); |
201 | | cond->waiter_list = psIter->next; |
202 | | opj_free(psIter); |
203 | | } |
204 | | |
205 | | opj_mutex_unlock(cond->internal_mutex); |
206 | | } |
207 | | |
208 | | void opj_cond_destroy(opj_cond_t* cond) |
209 | | { |
210 | | if (!cond) { |
211 | | return; |
212 | | } |
213 | | opj_mutex_destroy(cond->internal_mutex); |
214 | | assert(cond->waiter_list == NULL); |
215 | | opj_free(cond); |
216 | | } |
217 | | |
218 | | struct opj_thread_t { |
219 | | opj_thread_fn thread_fn; |
220 | | void* user_data; |
221 | | HANDLE hThread; |
222 | | }; |
223 | | |
224 | | static unsigned int __stdcall opj_thread_callback_adapter(void *info) |
225 | | { |
226 | | opj_thread_t* thread = (opj_thread_t*) info; |
227 | | HANDLE hEvent = NULL; |
228 | | |
229 | | thread->thread_fn(thread->user_data); |
230 | | |
231 | | /* Free the handle possible allocated by a cond */ |
232 | | while (OPJ_TRUE) { |
233 | | /* Make sure TLSKey is not being created just at that moment... */ |
234 | | #if HAVE_INTERLOCKED_COMPARE_EXCHANGE |
235 | | if (InterlockedCompareExchange(&inTLSLockedSection, 1, 0) == 0) |
236 | | #endif |
237 | | { |
238 | | if (TLSKeyInit) { |
239 | | hEvent = (HANDLE) TlsGetValue(TLSKey); |
240 | | } |
241 | | #if HAVE_INTERLOCKED_COMPARE_EXCHANGE |
242 | | InterlockedCompareExchange(&inTLSLockedSection, 0, 1); |
243 | | #endif |
244 | | break; |
245 | | } |
246 | | } |
247 | | if (hEvent) { |
248 | | CloseHandle(hEvent); |
249 | | } |
250 | | |
251 | | return 0; |
252 | | } |
253 | | |
254 | | opj_thread_t* opj_thread_create(opj_thread_fn thread_fn, void* user_data) |
255 | | { |
256 | | opj_thread_t* thread; |
257 | | |
258 | | assert(thread_fn); |
259 | | |
260 | | thread = (opj_thread_t*) opj_malloc(sizeof(opj_thread_t)); |
261 | | if (!thread) { |
262 | | return NULL; |
263 | | } |
264 | | thread->thread_fn = thread_fn; |
265 | | thread->user_data = user_data; |
266 | | |
267 | | thread->hThread = (HANDLE)_beginthreadex(NULL, 0, |
268 | | opj_thread_callback_adapter, thread, 0, NULL); |
269 | | |
270 | | if (thread->hThread == NULL) { |
271 | | opj_free(thread); |
272 | | return NULL; |
273 | | } |
274 | | return thread; |
275 | | } |
276 | | |
277 | | void opj_thread_join(opj_thread_t* thread) |
278 | | { |
279 | | WaitForSingleObject(thread->hThread, INFINITE); |
280 | | CloseHandle(thread->hThread); |
281 | | |
282 | | opj_free(thread); |
283 | | } |
284 | | |
285 | | #elif MUTEX_pthread |
286 | | |
287 | | #include <pthread.h> |
288 | | #include <stdlib.h> |
289 | | #include <unistd.h> |
290 | | |
291 | | /* Moved after all system includes, and in particular pthread.h, so as to */ |
292 | | /* avoid poisoning issuing with malloc() use in pthread.h with ulibc (#1013) */ |
293 | | #include "opj_includes.h" |
294 | | |
295 | | OPJ_BOOL OPJ_CALLCONV opj_has_thread_support(void) |
296 | 0 | { |
297 | 0 | return OPJ_TRUE; |
298 | 0 | } |
299 | | |
300 | | int OPJ_CALLCONV opj_get_num_cpus(void) |
301 | 0 | { |
302 | 0 | #ifdef _SC_NPROCESSORS_ONLN |
303 | 0 | return (int)sysconf(_SC_NPROCESSORS_ONLN); |
304 | | #else |
305 | | return 1; |
306 | | #endif |
307 | 0 | } |
308 | | |
309 | | struct opj_mutex_t { |
310 | | pthread_mutex_t mutex; |
311 | | }; |
312 | | |
313 | | opj_mutex_t* opj_mutex_create(void) |
314 | 8.99k | { |
315 | 8.99k | opj_mutex_t* mutex = (opj_mutex_t*) opj_calloc(1U, sizeof(opj_mutex_t)); |
316 | 8.99k | if (mutex != NULL) { |
317 | 8.99k | if (pthread_mutex_init(&mutex->mutex, NULL) != 0) { |
318 | 0 | opj_free(mutex); |
319 | 0 | mutex = NULL; |
320 | 0 | } |
321 | 8.99k | } |
322 | 8.99k | return mutex; |
323 | 8.99k | } |
324 | | |
325 | | void opj_mutex_lock(opj_mutex_t* mutex) |
326 | 183k | { |
327 | 183k | pthread_mutex_lock(&(mutex->mutex)); |
328 | 183k | } |
329 | | |
330 | | void opj_mutex_unlock(opj_mutex_t* mutex) |
331 | 183k | { |
332 | 183k | pthread_mutex_unlock(&(mutex->mutex)); |
333 | 183k | } |
334 | | |
335 | | void opj_mutex_destroy(opj_mutex_t* mutex) |
336 | 17.2k | { |
337 | 17.2k | if (!mutex) { |
338 | 8.24k | return; |
339 | 8.24k | } |
340 | 8.99k | pthread_mutex_destroy(&(mutex->mutex)); |
341 | 8.99k | opj_free(mutex); |
342 | 8.99k | } |
343 | | |
344 | | struct opj_cond_t { |
345 | | pthread_cond_t cond; |
346 | | }; |
347 | | |
348 | | opj_cond_t* opj_cond_create(void) |
349 | 0 | { |
350 | 0 | opj_cond_t* cond = (opj_cond_t*) opj_malloc(sizeof(opj_cond_t)); |
351 | 0 | if (!cond) { |
352 | 0 | return NULL; |
353 | 0 | } |
354 | 0 | if (pthread_cond_init(&(cond->cond), NULL) != 0) { |
355 | 0 | opj_free(cond); |
356 | 0 | return NULL; |
357 | 0 | } |
358 | 0 | return cond; |
359 | 0 | } |
360 | | |
361 | | void opj_cond_wait(opj_cond_t* cond, opj_mutex_t* mutex) |
362 | 0 | { |
363 | 0 | pthread_cond_wait(&(cond->cond), &(mutex->mutex)); |
364 | 0 | } |
365 | | |
366 | | void opj_cond_signal(opj_cond_t* cond) |
367 | 0 | { |
368 | 0 | int ret = pthread_cond_signal(&(cond->cond)); |
369 | 0 | (void)ret; |
370 | 0 | assert(ret == 0); |
371 | 0 | } |
372 | | |
373 | | void opj_cond_destroy(opj_cond_t* cond) |
374 | 0 | { |
375 | 0 | if (!cond) { |
376 | 0 | return; |
377 | 0 | } |
378 | 0 | pthread_cond_destroy(&(cond->cond)); |
379 | 0 | opj_free(cond); |
380 | 0 | } |
381 | | |
382 | | |
383 | | struct opj_thread_t { |
384 | | opj_thread_fn thread_fn; |
385 | | void* user_data; |
386 | | pthread_t thread; |
387 | | }; |
388 | | |
389 | | static void* opj_thread_callback_adapter(void* info) |
390 | 0 | { |
391 | 0 | opj_thread_t* thread = (opj_thread_t*) info; |
392 | 0 | thread->thread_fn(thread->user_data); |
393 | 0 | return NULL; |
394 | 0 | } |
395 | | |
396 | | opj_thread_t* opj_thread_create(opj_thread_fn thread_fn, void* user_data) |
397 | 0 | { |
398 | 0 | pthread_attr_t attr; |
399 | 0 | opj_thread_t* thread; |
400 | |
|
401 | 0 | assert(thread_fn); |
402 | |
|
403 | 0 | thread = (opj_thread_t*) opj_malloc(sizeof(opj_thread_t)); |
404 | 0 | if (!thread) { |
405 | 0 | return NULL; |
406 | 0 | } |
407 | 0 | thread->thread_fn = thread_fn; |
408 | 0 | thread->user_data = user_data; |
409 | |
|
410 | 0 | pthread_attr_init(&attr); |
411 | 0 | pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); |
412 | 0 | if (pthread_create(&(thread->thread), &attr, |
413 | 0 | opj_thread_callback_adapter, (void *) thread) != 0) { |
414 | 0 | opj_free(thread); |
415 | 0 | return NULL; |
416 | 0 | } |
417 | 0 | return thread; |
418 | 0 | } |
419 | | |
420 | | void opj_thread_join(opj_thread_t* thread) |
421 | 0 | { |
422 | 0 | void* status; |
423 | 0 | pthread_join(thread->thread, &status); |
424 | |
|
425 | 0 | opj_free(thread); |
426 | 0 | } |
427 | | |
428 | | #else |
429 | | /* Stub implementation */ |
430 | | |
431 | | #include "opj_includes.h" |
432 | | |
433 | | OPJ_BOOL OPJ_CALLCONV opj_has_thread_support(void) |
434 | | { |
435 | | return OPJ_FALSE; |
436 | | } |
437 | | |
438 | | int OPJ_CALLCONV opj_get_num_cpus(void) |
439 | | { |
440 | | return 1; |
441 | | } |
442 | | |
443 | | opj_mutex_t* opj_mutex_create(void) |
444 | | { |
445 | | return NULL; |
446 | | } |
447 | | |
448 | | void opj_mutex_lock(opj_mutex_t* mutex) |
449 | | { |
450 | | (void) mutex; |
451 | | } |
452 | | |
453 | | void opj_mutex_unlock(opj_mutex_t* mutex) |
454 | | { |
455 | | (void) mutex; |
456 | | } |
457 | | |
458 | | void opj_mutex_destroy(opj_mutex_t* mutex) |
459 | | { |
460 | | (void) mutex; |
461 | | } |
462 | | |
463 | | opj_cond_t* opj_cond_create(void) |
464 | | { |
465 | | return NULL; |
466 | | } |
467 | | |
468 | | void opj_cond_wait(opj_cond_t* cond, opj_mutex_t* mutex) |
469 | | { |
470 | | (void) cond; |
471 | | (void) mutex; |
472 | | } |
473 | | |
474 | | void opj_cond_signal(opj_cond_t* cond) |
475 | | { |
476 | | (void) cond; |
477 | | } |
478 | | |
479 | | void opj_cond_destroy(opj_cond_t* cond) |
480 | | { |
481 | | (void) cond; |
482 | | } |
483 | | |
484 | | opj_thread_t* opj_thread_create(opj_thread_fn thread_fn, void* user_data) |
485 | | { |
486 | | (void) thread_fn; |
487 | | (void) user_data; |
488 | | return NULL; |
489 | | } |
490 | | |
491 | | void opj_thread_join(opj_thread_t* thread) |
492 | | { |
493 | | (void) thread; |
494 | | } |
495 | | |
496 | | #endif |
497 | | |
498 | | typedef struct { |
499 | | int key; |
500 | | void* value; |
501 | | opj_tls_free_func opj_free_func; |
502 | | } opj_tls_key_val_t; |
503 | | |
504 | | struct opj_tls_t { |
505 | | opj_tls_key_val_t* key_val; |
506 | | int key_val_count; |
507 | | }; |
508 | | |
509 | | static opj_tls_t* opj_tls_new(void) |
510 | 8.24k | { |
511 | 8.24k | return (opj_tls_t*) opj_calloc(1, sizeof(opj_tls_t)); |
512 | 8.24k | } |
513 | | |
514 | | static void opj_tls_destroy(opj_tls_t* tls) |
515 | 8.24k | { |
516 | 8.24k | int i; |
517 | 8.24k | if (!tls) { |
518 | 0 | return; |
519 | 0 | } |
520 | 14.9k | for (i = 0; i < tls->key_val_count; i++) { |
521 | 6.67k | if (tls->key_val[i].opj_free_func) { |
522 | 6.67k | tls->key_val[i].opj_free_func(tls->key_val[i].value); |
523 | 6.67k | } |
524 | 6.67k | } |
525 | 8.24k | opj_free(tls->key_val); |
526 | 8.24k | opj_free(tls); |
527 | 8.24k | } |
528 | | |
529 | | void* opj_tls_get(opj_tls_t* tls, int key) |
530 | 16.8M | { |
531 | 16.8M | int i; |
532 | 16.8M | for (i = 0; i < tls->key_val_count; i++) { |
533 | 16.8M | if (tls->key_val[i].key == key) { |
534 | 16.8M | return tls->key_val[i].value; |
535 | 16.8M | } |
536 | 16.8M | } |
537 | 6.67k | return NULL; |
538 | 16.8M | } |
539 | | |
540 | | OPJ_BOOL opj_tls_set(opj_tls_t* tls, int key, void* value, |
541 | | opj_tls_free_func opj_free_func) |
542 | 6.67k | { |
543 | 6.67k | opj_tls_key_val_t* new_key_val; |
544 | 6.67k | int i; |
545 | | |
546 | 6.67k | if (tls->key_val_count == INT_MAX) { |
547 | 0 | return OPJ_FALSE; |
548 | 0 | } |
549 | 6.67k | for (i = 0; i < tls->key_val_count; i++) { |
550 | 0 | if (tls->key_val[i].key == key) { |
551 | 0 | if (tls->key_val[i].opj_free_func) { |
552 | 0 | tls->key_val[i].opj_free_func(tls->key_val[i].value); |
553 | 0 | } |
554 | 0 | tls->key_val[i].value = value; |
555 | 0 | tls->key_val[i].opj_free_func = opj_free_func; |
556 | 0 | return OPJ_TRUE; |
557 | 0 | } |
558 | 0 | } |
559 | 6.67k | new_key_val = (opj_tls_key_val_t*) opj_realloc(tls->key_val, |
560 | 6.67k | ((size_t)tls->key_val_count + 1U) * sizeof(opj_tls_key_val_t)); |
561 | 6.67k | if (!new_key_val) { |
562 | 0 | return OPJ_FALSE; |
563 | 0 | } |
564 | 6.67k | tls->key_val = new_key_val; |
565 | 6.67k | new_key_val[tls->key_val_count].key = key; |
566 | 6.67k | new_key_val[tls->key_val_count].value = value; |
567 | 6.67k | new_key_val[tls->key_val_count].opj_free_func = opj_free_func; |
568 | 6.67k | tls->key_val_count ++; |
569 | 6.67k | return OPJ_TRUE; |
570 | 6.67k | } |
571 | | |
572 | | |
573 | | typedef struct { |
574 | | opj_job_fn job_fn; |
575 | | void *user_data; |
576 | | } opj_worker_thread_job_t; |
577 | | |
578 | | typedef struct { |
579 | | opj_thread_pool_t *tp; |
580 | | opj_thread_t *thread; |
581 | | int marked_as_waiting; |
582 | | |
583 | | opj_mutex_t *mutex; |
584 | | opj_cond_t *cond; |
585 | | } opj_worker_thread_t; |
586 | | |
587 | | typedef enum { |
588 | | OPJWTS_OK, |
589 | | OPJWTS_STOP, |
590 | | OPJWTS_ERROR |
591 | | } opj_worker_thread_state; |
592 | | |
593 | | struct opj_job_list_t { |
594 | | opj_worker_thread_job_t* job; |
595 | | struct opj_job_list_t* next; |
596 | | }; |
597 | | typedef struct opj_job_list_t opj_job_list_t; |
598 | | |
599 | | struct opj_worker_thread_list_t { |
600 | | opj_worker_thread_t* worker_thread; |
601 | | struct opj_worker_thread_list_t* next; |
602 | | }; |
603 | | typedef struct opj_worker_thread_list_t opj_worker_thread_list_t; |
604 | | |
605 | | struct opj_thread_pool_t { |
606 | | opj_worker_thread_t* worker_threads; |
607 | | int worker_threads_count; |
608 | | opj_cond_t* cond; |
609 | | opj_mutex_t* mutex; |
610 | | volatile opj_worker_thread_state state; |
611 | | opj_job_list_t* job_queue; |
612 | | volatile int pending_jobs_count; |
613 | | opj_worker_thread_list_t* waiting_worker_thread_list; |
614 | | int waiting_worker_thread_count; |
615 | | opj_tls_t* tls; |
616 | | int signaling_threshold; |
617 | | }; |
618 | | |
619 | | static OPJ_BOOL opj_thread_pool_setup(opj_thread_pool_t* tp, int num_threads); |
620 | | static opj_worker_thread_job_t* opj_thread_pool_get_next_job( |
621 | | opj_thread_pool_t* tp, |
622 | | opj_worker_thread_t* worker_thread, |
623 | | OPJ_BOOL signal_job_finished); |
624 | | |
625 | | opj_thread_pool_t* opj_thread_pool_create(int num_threads) |
626 | 8.24k | { |
627 | 8.24k | opj_thread_pool_t* tp; |
628 | | |
629 | 8.24k | tp = (opj_thread_pool_t*) opj_calloc(1, sizeof(opj_thread_pool_t)); |
630 | 8.24k | if (!tp) { |
631 | 0 | return NULL; |
632 | 0 | } |
633 | 8.24k | tp->state = OPJWTS_OK; |
634 | | |
635 | 8.24k | if (num_threads <= 0) { |
636 | 8.24k | tp->tls = opj_tls_new(); |
637 | 8.24k | if (!tp->tls) { |
638 | 0 | opj_free(tp); |
639 | 0 | tp = NULL; |
640 | 0 | } |
641 | 8.24k | return tp; |
642 | 8.24k | } |
643 | | |
644 | 0 | tp->mutex = opj_mutex_create(); |
645 | 0 | if (!tp->mutex) { |
646 | 0 | opj_free(tp); |
647 | 0 | return NULL; |
648 | 0 | } |
649 | 0 | if (!opj_thread_pool_setup(tp, num_threads)) { |
650 | 0 | opj_thread_pool_destroy(tp); |
651 | 0 | return NULL; |
652 | 0 | } |
653 | 0 | return tp; |
654 | 0 | } |
655 | | |
656 | | static void opj_worker_thread_function(void* user_data) |
657 | 0 | { |
658 | 0 | opj_worker_thread_t* worker_thread; |
659 | 0 | opj_thread_pool_t* tp; |
660 | 0 | opj_tls_t* tls; |
661 | 0 | OPJ_BOOL job_finished = OPJ_FALSE; |
662 | |
|
663 | 0 | worker_thread = (opj_worker_thread_t*) user_data; |
664 | 0 | tp = worker_thread->tp; |
665 | 0 | tls = opj_tls_new(); |
666 | |
|
667 | 0 | while (OPJ_TRUE) { |
668 | 0 | opj_worker_thread_job_t* job = opj_thread_pool_get_next_job(tp, worker_thread, |
669 | 0 | job_finished); |
670 | 0 | if (job == NULL) { |
671 | 0 | break; |
672 | 0 | } |
673 | | |
674 | 0 | if (job->job_fn) { |
675 | 0 | job->job_fn(job->user_data, tls); |
676 | 0 | } |
677 | 0 | opj_free(job); |
678 | 0 | job_finished = OPJ_TRUE; |
679 | 0 | } |
680 | |
|
681 | 0 | opj_tls_destroy(tls); |
682 | 0 | } |
683 | | |
684 | | static OPJ_BOOL opj_thread_pool_setup(opj_thread_pool_t* tp, int num_threads) |
685 | 0 | { |
686 | 0 | int i; |
687 | 0 | OPJ_BOOL bRet = OPJ_TRUE; |
688 | |
|
689 | 0 | assert(num_threads > 0); |
690 | |
|
691 | 0 | tp->cond = opj_cond_create(); |
692 | 0 | if (tp->cond == NULL) { |
693 | 0 | return OPJ_FALSE; |
694 | 0 | } |
695 | | |
696 | 0 | tp->worker_threads = (opj_worker_thread_t*) opj_calloc((size_t)num_threads, |
697 | 0 | sizeof(opj_worker_thread_t)); |
698 | 0 | if (tp->worker_threads == NULL) { |
699 | 0 | return OPJ_FALSE; |
700 | 0 | } |
701 | 0 | tp->worker_threads_count = num_threads; |
702 | |
|
703 | 0 | for (i = 0; i < num_threads; i++) { |
704 | 0 | tp->worker_threads[i].tp = tp; |
705 | |
|
706 | 0 | tp->worker_threads[i].mutex = opj_mutex_create(); |
707 | 0 | if (tp->worker_threads[i].mutex == NULL) { |
708 | 0 | tp->worker_threads_count = i; |
709 | 0 | bRet = OPJ_FALSE; |
710 | 0 | break; |
711 | 0 | } |
712 | | |
713 | 0 | tp->worker_threads[i].cond = opj_cond_create(); |
714 | 0 | if (tp->worker_threads[i].cond == NULL) { |
715 | 0 | opj_mutex_destroy(tp->worker_threads[i].mutex); |
716 | 0 | tp->worker_threads_count = i; |
717 | 0 | bRet = OPJ_FALSE; |
718 | 0 | break; |
719 | 0 | } |
720 | | |
721 | 0 | tp->worker_threads[i].marked_as_waiting = OPJ_FALSE; |
722 | |
|
723 | 0 | tp->worker_threads[i].thread = opj_thread_create(opj_worker_thread_function, |
724 | 0 | &(tp->worker_threads[i])); |
725 | 0 | if (tp->worker_threads[i].thread == NULL) { |
726 | 0 | opj_mutex_destroy(tp->worker_threads[i].mutex); |
727 | 0 | opj_cond_destroy(tp->worker_threads[i].cond); |
728 | 0 | tp->worker_threads_count = i; |
729 | 0 | bRet = OPJ_FALSE; |
730 | 0 | break; |
731 | 0 | } |
732 | 0 | } |
733 | | |
734 | | /* Wait all threads to be started */ |
735 | | /* printf("waiting for all threads to be started\n"); */ |
736 | 0 | opj_mutex_lock(tp->mutex); |
737 | 0 | while (tp->waiting_worker_thread_count < tp->worker_threads_count) { |
738 | 0 | opj_cond_wait(tp->cond, tp->mutex); |
739 | 0 | } |
740 | 0 | opj_mutex_unlock(tp->mutex); |
741 | | /* printf("all threads started\n"); */ |
742 | |
|
743 | 0 | if (tp->state == OPJWTS_ERROR) { |
744 | 0 | bRet = OPJ_FALSE; |
745 | 0 | } |
746 | |
|
747 | 0 | return bRet; |
748 | 0 | } |
749 | | |
750 | | /* |
751 | | void opj_waiting() |
752 | | { |
753 | | printf("waiting!\n"); |
754 | | } |
755 | | */ |
756 | | |
757 | | static opj_worker_thread_job_t* opj_thread_pool_get_next_job( |
758 | | opj_thread_pool_t* tp, |
759 | | opj_worker_thread_t* worker_thread, |
760 | | OPJ_BOOL signal_job_finished) |
761 | 0 | { |
762 | 0 | while (OPJ_TRUE) { |
763 | 0 | opj_job_list_t* top_job_iter; |
764 | |
|
765 | 0 | opj_mutex_lock(tp->mutex); |
766 | |
|
767 | 0 | if (signal_job_finished) { |
768 | 0 | signal_job_finished = OPJ_FALSE; |
769 | 0 | tp->pending_jobs_count --; |
770 | | /*printf("tp=%p, remaining jobs: %d\n", tp, tp->pending_jobs_count);*/ |
771 | 0 | if (tp->pending_jobs_count <= tp->signaling_threshold) { |
772 | 0 | opj_cond_signal(tp->cond); |
773 | 0 | } |
774 | 0 | } |
775 | |
|
776 | 0 | if (tp->state == OPJWTS_STOP) { |
777 | 0 | opj_mutex_unlock(tp->mutex); |
778 | 0 | return NULL; |
779 | 0 | } |
780 | 0 | top_job_iter = tp->job_queue; |
781 | 0 | if (top_job_iter) { |
782 | 0 | opj_worker_thread_job_t* job; |
783 | 0 | tp->job_queue = top_job_iter->next; |
784 | |
|
785 | 0 | job = top_job_iter->job; |
786 | 0 | opj_mutex_unlock(tp->mutex); |
787 | 0 | opj_free(top_job_iter); |
788 | 0 | return job; |
789 | 0 | } |
790 | | |
791 | | /* opj_waiting(); */ |
792 | 0 | if (!worker_thread->marked_as_waiting) { |
793 | 0 | opj_worker_thread_list_t* item; |
794 | |
|
795 | 0 | worker_thread->marked_as_waiting = OPJ_TRUE; |
796 | 0 | tp->waiting_worker_thread_count ++; |
797 | 0 | assert(tp->waiting_worker_thread_count <= tp->worker_threads_count); |
798 | |
|
799 | 0 | item = (opj_worker_thread_list_t*) opj_malloc(sizeof(opj_worker_thread_list_t)); |
800 | 0 | if (item == NULL) { |
801 | 0 | tp->state = OPJWTS_ERROR; |
802 | 0 | opj_cond_signal(tp->cond); |
803 | |
|
804 | 0 | opj_mutex_unlock(tp->mutex); |
805 | 0 | return NULL; |
806 | 0 | } |
807 | | |
808 | 0 | item->worker_thread = worker_thread; |
809 | 0 | item->next = tp->waiting_worker_thread_list; |
810 | 0 | tp->waiting_worker_thread_list = item; |
811 | 0 | } |
812 | | |
813 | | /* printf("signaling that worker thread is ready\n"); */ |
814 | 0 | opj_cond_signal(tp->cond); |
815 | |
|
816 | 0 | opj_mutex_lock(worker_thread->mutex); |
817 | 0 | opj_mutex_unlock(tp->mutex); |
818 | | |
819 | | /* printf("waiting for job\n"); */ |
820 | 0 | opj_cond_wait(worker_thread->cond, worker_thread->mutex); |
821 | |
|
822 | 0 | opj_mutex_unlock(worker_thread->mutex); |
823 | | /* printf("got job\n"); */ |
824 | 0 | } |
825 | 0 | } |
826 | | |
827 | | OPJ_BOOL opj_thread_pool_submit_job(opj_thread_pool_t* tp, |
828 | | opj_job_fn job_fn, |
829 | | void* user_data) |
830 | 16.8M | { |
831 | 16.8M | opj_worker_thread_job_t* job; |
832 | 16.8M | opj_job_list_t* item; |
833 | | |
834 | 16.8M | if (tp->mutex == NULL) { |
835 | 16.8M | job_fn(user_data, tp->tls); |
836 | 16.8M | return OPJ_TRUE; |
837 | 16.8M | } |
838 | | |
839 | 0 | job = (opj_worker_thread_job_t*)opj_malloc(sizeof(opj_worker_thread_job_t)); |
840 | 0 | if (job == NULL) { |
841 | 0 | return OPJ_FALSE; |
842 | 0 | } |
843 | 0 | job->job_fn = job_fn; |
844 | 0 | job->user_data = user_data; |
845 | |
|
846 | 0 | item = (opj_job_list_t*) opj_malloc(sizeof(opj_job_list_t)); |
847 | 0 | if (item == NULL) { |
848 | 0 | opj_free(job); |
849 | 0 | return OPJ_FALSE; |
850 | 0 | } |
851 | 0 | item->job = job; |
852 | |
|
853 | 0 | opj_mutex_lock(tp->mutex); |
854 | |
|
855 | 0 | tp->signaling_threshold = 100 * tp->worker_threads_count; |
856 | 0 | while (tp->pending_jobs_count > tp->signaling_threshold) { |
857 | | /* printf("%d jobs enqueued. Waiting\n", tp->pending_jobs_count); */ |
858 | 0 | opj_cond_wait(tp->cond, tp->mutex); |
859 | | /* printf("...%d jobs enqueued.\n", tp->pending_jobs_count); */ |
860 | 0 | } |
861 | |
|
862 | 0 | item->next = tp->job_queue; |
863 | 0 | tp->job_queue = item; |
864 | 0 | tp->pending_jobs_count ++; |
865 | |
|
866 | 0 | if (tp->waiting_worker_thread_list) { |
867 | 0 | opj_worker_thread_t* worker_thread; |
868 | 0 | opj_worker_thread_list_t* next; |
869 | 0 | opj_worker_thread_list_t* to_opj_free; |
870 | |
|
871 | 0 | worker_thread = tp->waiting_worker_thread_list->worker_thread; |
872 | |
|
873 | 0 | assert(worker_thread->marked_as_waiting); |
874 | 0 | worker_thread->marked_as_waiting = OPJ_FALSE; |
875 | |
|
876 | 0 | next = tp->waiting_worker_thread_list->next; |
877 | 0 | to_opj_free = tp->waiting_worker_thread_list; |
878 | 0 | tp->waiting_worker_thread_list = next; |
879 | 0 | tp->waiting_worker_thread_count --; |
880 | |
|
881 | 0 | opj_mutex_lock(worker_thread->mutex); |
882 | 0 | opj_mutex_unlock(tp->mutex); |
883 | 0 | opj_cond_signal(worker_thread->cond); |
884 | 0 | opj_mutex_unlock(worker_thread->mutex); |
885 | |
|
886 | 0 | opj_free(to_opj_free); |
887 | 0 | } else { |
888 | 0 | opj_mutex_unlock(tp->mutex); |
889 | 0 | } |
890 | |
|
891 | 0 | return OPJ_TRUE; |
892 | 0 | } |
893 | | |
894 | | void opj_thread_pool_wait_completion(opj_thread_pool_t* tp, |
895 | | int max_remaining_jobs) |
896 | 8.99k | { |
897 | 8.99k | if (tp->mutex == NULL) { |
898 | 8.99k | return; |
899 | 8.99k | } |
900 | | |
901 | 0 | if (max_remaining_jobs < 0) { |
902 | 0 | max_remaining_jobs = 0; |
903 | 0 | } |
904 | 0 | opj_mutex_lock(tp->mutex); |
905 | 0 | tp->signaling_threshold = max_remaining_jobs; |
906 | 0 | while (tp->pending_jobs_count > max_remaining_jobs) { |
907 | | /*printf("tp=%p, jobs before wait = %d, max_remaining_jobs = %d\n", tp, tp->pending_jobs_count, max_remaining_jobs);*/ |
908 | 0 | opj_cond_wait(tp->cond, tp->mutex); |
909 | | /*printf("tp=%p, jobs after wait = %d\n", tp, tp->pending_jobs_count);*/ |
910 | 0 | } |
911 | 0 | opj_mutex_unlock(tp->mutex); |
912 | 0 | } |
913 | | |
914 | | int opj_thread_pool_get_thread_count(opj_thread_pool_t* tp) |
915 | 16.8M | { |
916 | 16.8M | return tp->worker_threads_count; |
917 | 16.8M | } |
918 | | |
919 | | void opj_thread_pool_destroy(opj_thread_pool_t* tp) |
920 | 8.24k | { |
921 | 8.24k | if (!tp) { |
922 | 0 | return; |
923 | 0 | } |
924 | 8.24k | if (tp->cond) { |
925 | 0 | int i; |
926 | 0 | opj_thread_pool_wait_completion(tp, 0); |
927 | |
|
928 | 0 | opj_mutex_lock(tp->mutex); |
929 | 0 | tp->state = OPJWTS_STOP; |
930 | 0 | opj_mutex_unlock(tp->mutex); |
931 | |
|
932 | 0 | for (i = 0; i < tp->worker_threads_count; i++) { |
933 | 0 | opj_mutex_lock(tp->worker_threads[i].mutex); |
934 | 0 | opj_cond_signal(tp->worker_threads[i].cond); |
935 | 0 | opj_mutex_unlock(tp->worker_threads[i].mutex); |
936 | 0 | opj_thread_join(tp->worker_threads[i].thread); |
937 | 0 | opj_cond_destroy(tp->worker_threads[i].cond); |
938 | 0 | opj_mutex_destroy(tp->worker_threads[i].mutex); |
939 | 0 | } |
940 | |
|
941 | 0 | opj_free(tp->worker_threads); |
942 | |
|
943 | 0 | while (tp->waiting_worker_thread_list != NULL) { |
944 | 0 | opj_worker_thread_list_t* next = tp->waiting_worker_thread_list->next; |
945 | 0 | opj_free(tp->waiting_worker_thread_list); |
946 | 0 | tp->waiting_worker_thread_list = next; |
947 | 0 | } |
948 | |
|
949 | 0 | opj_cond_destroy(tp->cond); |
950 | 0 | } |
951 | 8.24k | opj_mutex_destroy(tp->mutex); |
952 | 8.24k | opj_tls_destroy(tp->tls); |
953 | 8.24k | opj_free(tp); |
954 | 8.24k | } |