Coverage Report

Created: 2025-06-24 07:01

/src/ghostpdl/base/gp_psync.c
Line
Count
Source (jump to first uncovered line)
1
/* Copyright (C) 2001-2023 Artifex Software, Inc.
2
   All Rights Reserved.
3
4
   This software is provided AS-IS with no warranty, either express or
5
   implied.
6
7
   This software is distributed under license and may not be copied,
8
   modified or distributed except as expressly authorized under the terms
9
   of the license contained in the file LICENSE in this distribution.
10
11
   Refer to licensing information at http://www.artifex.com or contact
12
   Artifex Software, Inc.,  39 Mesa Street, Suite 108A, San Francisco,
13
   CA 94129, USA, for further information.
14
*/
15
16
17
/* POSIX pthreads threads / semaphore / monitor implementation */
18
#include "std.h"
19
#include "string_.h"
20
#include "malloc_.h"
21
#include "unistd_.h" /* for __USE_UNIX98 */
22
#include <pthread.h>
23
#include "gserrors.h"
24
#include "gpsync.h"
25
#include "assert_.h"
26
#include "gp.h"
27
#include "globals.h"
28
/*
29
 * Thanks to Larry Jones <larry.jones@sdrc.com> for this revision of
30
 * Aladdin's original code into a form that depends only on POSIX APIs.
31
 */
32
33
/*
34
 * Some old versions of the pthreads library define
35
 * pthread_attr_setdetachstate as taking a Boolean rather than an enum.
36
 * Compensate for this here.
37
 */
38
#ifndef PTHREAD_CREATE_DETACHED
39
#  define PTHREAD_CREATE_DETACHED 1
40
#endif
41
42
static struct
43
{
44
    pthread_once_t once;
45
    pthread_mutex_t mutex;
46
    gs_globals globals;
47
#ifdef DEBUG
48
    pthread_key_t tlsKey;
49
#endif
50
} GhostscriptGlobals = { PTHREAD_ONCE_INIT, PTHREAD_MUTEX_INITIALIZER };
51
52
static void init_globals(void)
53
17
{
54
17
    if (pthread_mutex_init(&GhostscriptGlobals.mutex, NULL))
55
0
        exit(1);
56
#ifdef DEBUG
57
    if (pthread_key_create(&GhostscriptGlobals.tlsKey, NULL))
58
        exit(1);
59
#endif
60
17
    gs_globals_init(&GhostscriptGlobals.globals);
61
17
}
62
63
gs_globals *gp_get_globals(void)
64
487k
{
65
487k
    if (pthread_once(&GhostscriptGlobals.once, init_globals))
66
0
        return NULL;
67
68
487k
    return &GhostscriptGlobals.globals;
69
487k
}
70
71
void gp_global_lock(gs_globals *globals)
72
0
{
73
0
    if (globals == NULL)
74
0
        return;
75
0
    pthread_mutex_lock(&GhostscriptGlobals.mutex);
76
0
}
77
78
void gp_global_unlock(gs_globals *globals)
79
0
{
80
0
    if (globals == NULL)
81
0
        return;
82
0
    pthread_mutex_unlock(&GhostscriptGlobals.mutex);
83
0
}
84
85
void gp_set_debug_mem_ptr(gs_memory_t *mem)
86
812k
{
87
#ifdef DEBUG
88
    pthread_setspecific(GhostscriptGlobals.tlsKey, mem);
89
#endif
90
812k
}
91
92
gs_memory_t *gp_get_debug_mem_ptr(void)
93
116k
{
94
#ifdef DEBUG
95
    return (gs_memory_t *)pthread_getspecific(GhostscriptGlobals.tlsKey);
96
#else
97
116k
    return NULL;
98
116k
#endif
99
116k
}
100
101
/* ------- Synchronization primitives -------- */
102
103
/* Semaphore supports wait/signal semantics */
104
105
typedef struct pt_semaphore_t {
106
    int count;
107
    pthread_mutex_t mutex;
108
    pthread_cond_t cond;
109
} pt_semaphore_t;
110
111
uint
112
gp_semaphore_sizeof(void)
113
3.53M
{
114
3.53M
    return sizeof(pt_semaphore_t);
115
3.53M
}
116
117
/*
118
 * This procedure should really check errno and return something
119
 * more informative....
120
 */
121
#define SEM_ERROR_CODE(scode)\
122
5.54G
  (scode != 0 ? gs_note_error(gs_error_ioerror) : 0)
123
124
int
125
gp_semaphore_open(gp_semaphore * sema)
126
7.06M
{
127
7.06M
    pt_semaphore_t * const sem = (pt_semaphore_t *)sema;
128
7.06M
    int scode;
129
130
#ifdef MEMENTO
131
    if (Memento_squeezing()) {
132
         /* If squeezing, we nobble all the locking functions to do nothing.
133
          * We also ensure we never actually create threads (elsewhere),
134
          * so this is still safe. */
135
        memset(&sem->mutex, 0, sizeof(sem->mutex));
136
        memset(&sem->cond, 0, sizeof(sem->cond));
137
        return 0;
138
    }
139
#endif
140
141
7.06M
    if (!sema)
142
3.53M
        return -1;    /* semaphores are not movable */
143
3.53M
    sem->count = 0;
144
3.53M
    scode = pthread_mutex_init(&sem->mutex, NULL);
145
3.53M
    if (scode == 0)
146
3.53M
    {
147
3.53M
        scode = pthread_cond_init(&sem->cond, NULL);
148
3.53M
        if (scode)
149
0
            pthread_mutex_destroy(&sem->mutex);
150
3.53M
    }
151
3.53M
    if (scode)
152
0
        memset(sem, 0, sizeof(*sem));
153
3.53M
    return SEM_ERROR_CODE(scode);
154
7.06M
}
155
156
int
157
gp_semaphore_close(gp_semaphore * sema)
158
3.53M
{
159
3.53M
    pt_semaphore_t * const sem = (pt_semaphore_t *)sema;
160
3.53M
    int scode, scode2;
161
162
#ifdef MEMENTO
163
    if (Memento_squeezing())
164
        return 0;
165
#endif
166
167
3.53M
    scode = pthread_cond_destroy(&sem->cond);
168
3.53M
    scode2 = pthread_mutex_destroy(&sem->mutex);
169
3.53M
    if (scode == 0)
170
3.53M
        scode = scode2;
171
3.53M
    return SEM_ERROR_CODE(scode);
172
3.53M
}
173
174
int
175
gp_semaphore_wait(gp_semaphore * sema)
176
0
{
177
0
    pt_semaphore_t * const sem = (pt_semaphore_t *)sema;
178
0
    int scode, scode2;
179
180
#ifdef MEMENTO
181
    if (Memento_squeezing()) {
182
         /* If squeezing, we nobble all the locking functions to do nothing.
183
          * We also ensure we never actually create threads (elsewhere),
184
          * so this is still safe. */
185
        return 0;
186
    }
187
#endif
188
189
0
    scode = pthread_mutex_lock(&sem->mutex);
190
0
    if (scode != 0)
191
0
        return SEM_ERROR_CODE(scode);
192
0
    while (sem->count == 0) {
193
0
        scode = pthread_cond_wait(&sem->cond, &sem->mutex);
194
0
        if (scode != 0)
195
0
            break;
196
0
    }
197
0
    if (scode == 0)
198
0
        --sem->count;
199
0
    scode2 = pthread_mutex_unlock(&sem->mutex);
200
0
    if (scode == 0)
201
0
        scode = scode2;
202
0
    return SEM_ERROR_CODE(scode);
203
0
}
204
205
int
206
gp_semaphore_signal(gp_semaphore * sema)
207
0
{
208
0
    pt_semaphore_t * const sem = (pt_semaphore_t *)sema;
209
0
    int scode, scode2;
210
211
#ifdef MEMENTO
212
    if (Memento_squeezing())
213
        return 0;
214
#endif
215
216
0
    scode = pthread_mutex_lock(&sem->mutex);
217
0
    if (scode != 0)
218
0
        return SEM_ERROR_CODE(scode);
219
0
    if (sem->count++ == 0)
220
0
        scode = pthread_cond_signal(&sem->cond);
221
0
    scode2 = pthread_mutex_unlock(&sem->mutex);
222
0
    if (scode == 0)
223
0
        scode = scode2;
224
0
    return SEM_ERROR_CODE(scode);
225
0
}
226
227
/* Monitor supports enter/leave semantics */
228
229
/*
230
 * We need PTHREAD_MUTEX_RECURSIVE behavior, but this isn't
231
 * supported on all pthread platforms, so if it's available
232
 * we'll use it, otherwise we'll emulate it.
233
 * GS_RECURSIVE_MUTEXATTR is set by the configure script
234
 * on Unix-like machines to the attribute setting for
235
 * PTHREAD_MUTEX_RECURSIVE - on linux this is usually
236
 * PTHREAD_MUTEX_RECURSIVE_NP
237
 */
238
typedef struct gp_pthread_recursive_s {
239
    pthread_mutex_t mutex;  /* actual mutex */
240
#ifndef GS_RECURSIVE_MUTEXATTR
241
    pthread_t self_id;  /* owner */
242
    int lcount;
243
#endif
244
} gp_pthread_recursive_t;
245
246
uint
247
gp_monitor_sizeof(void)
248
22.1M
{
249
22.1M
    return sizeof(gp_pthread_recursive_t);
250
22.1M
}
251
252
int
253
gp_monitor_open(gp_monitor * mona)
254
44.3M
{
255
44.3M
    pthread_mutex_t *mon;
256
44.3M
    int scode;
257
44.3M
    pthread_mutexattr_t attr;
258
44.3M
    pthread_mutexattr_t *attrp = NULL;
259
260
44.3M
    if (!mona)
261
22.1M
        return -1;    /* monitors are not movable */
262
263
#ifdef MEMENTO
264
    if (Memento_squeezing()) {
265
         memset(mona, 0, sizeof(*mona));
266
         return 0;
267
    }
268
#endif
269
270
22.1M
#ifdef GS_RECURSIVE_MUTEXATTR
271
22.1M
    attrp = &attr;
272
22.1M
    scode = pthread_mutexattr_init(attrp);
273
22.1M
    if (scode < 0) goto done;
274
275
22.1M
    scode = pthread_mutexattr_settype(attrp, GS_RECURSIVE_MUTEXATTR);
276
22.1M
    if (scode < 0) {
277
0
        goto done;
278
0
    }
279
#else
280
    ((gp_pthread_recursive_t *)mona)->self_id = 0;  /* Not valid unless mutex is locked */
281
    ((gp_pthread_recursive_t *)mona)->lcount = 0;
282
#endif
283
284
22.1M
    mon = &((gp_pthread_recursive_t *)mona)->mutex;
285
22.1M
    scode = pthread_mutex_init(mon, attrp);
286
22.1M
    if (attrp)
287
22.1M
        (void)pthread_mutexattr_destroy(attrp);
288
22.1M
done:
289
22.1M
    return SEM_ERROR_CODE(scode);
290
22.1M
}
291
292
int
293
gp_monitor_close(gp_monitor * mona)
294
22.1M
{
295
22.1M
    pthread_mutex_t * const mon = &((gp_pthread_recursive_t *)mona)->mutex;
296
22.1M
    int scode;
297
298
#ifdef MEMENTO
299
    if (Memento_squeezing())
300
         return 0;
301
#endif
302
303
22.1M
    scode = pthread_mutex_destroy(mon);
304
22.1M
    return SEM_ERROR_CODE(scode);
305
22.1M
}
306
307
int
308
gp_monitor_enter(gp_monitor * mona)
309
2.74G
{
310
2.74G
    pthread_mutex_t * const mon = (pthread_mutex_t *)mona;
311
2.74G
    int scode;
312
313
#ifdef MEMENTO
314
    if (Memento_squeezing()) {
315
         return 0;
316
    }
317
#endif
318
319
2.74G
#ifdef GS_RECURSIVE_MUTEXATTR
320
2.74G
    scode = pthread_mutex_lock(mon);
321
#else
322
    assert(((gp_pthread_recursive_t *)mona)->lcount >= 0);
323
324
    if ((scode = pthread_mutex_trylock(mon)) == 0) {
325
        ((gp_pthread_recursive_t *)mona)->self_id = pthread_self();
326
        ((gp_pthread_recursive_t *)mona)->lcount++;
327
    } else {
328
        if (pthread_equal(pthread_self(),((gp_pthread_recursive_t *)mona)->self_id)) {
329
            ((gp_pthread_recursive_t *)mona)->lcount++;
330
            scode = 0;
331
        }
332
        else {
333
            /* we were not the owner, wait */
334
            scode = pthread_mutex_lock(mon);
335
            ((gp_pthread_recursive_t *)mona)->self_id = pthread_self();
336
            ((gp_pthread_recursive_t *)mona)->lcount++;
337
        }
338
    }
339
#endif
340
2.74G
    return SEM_ERROR_CODE(scode);
341
2.74G
}
342
343
int
344
gp_monitor_leave(gp_monitor * mona)
345
2.74G
{
346
2.74G
    pthread_mutex_t * const mon = (pthread_mutex_t *)mona;
347
2.74G
    int scode = 0;
348
349
#ifdef MEMENTO
350
    if (Memento_squeezing())
351
         return 0;
352
#endif
353
354
2.74G
#ifdef GS_RECURSIVE_MUTEXATTR
355
2.74G
    scode = pthread_mutex_unlock(mon);
356
#else
357
    assert(((gp_pthread_recursive_t *)mona)->lcount > 0 && ((gp_pthread_recursive_t *)mona)->self_id != 0);
358
359
    if (pthread_equal(pthread_self(),((gp_pthread_recursive_t *)mona)->self_id)) {
360
      if ((--((gp_pthread_recursive_t *)mona)->lcount) == 0) {
361
          ((gp_pthread_recursive_t *)mona)->self_id = 0;  /* Not valid unless mutex is locked */
362
          scode = pthread_mutex_unlock(mon);
363
364
      }
365
    }
366
    else {
367
        scode = -1 /* should be EPERM */;
368
    }
369
#endif
370
2.74G
    return SEM_ERROR_CODE(scode);
371
2.74G
}
372
373
/* --------- Thread primitives ---------- */
374
375
/*
376
 * In order to deal with the type mismatch between our thread API, where
377
 * the starting procedure returns void, and the API defined by pthreads,
378
 * where the procedure returns void *, we need to create a wrapper
379
 * closure.
380
 */
381
typedef struct gp_thread_creation_closure_s {
382
    gp_thread_creation_callback_t proc;  /* actual start procedure */
383
    void *proc_data;      /* closure data for proc */
384
#ifdef DEBUG
385
    gs_memory_t *mem;
386
#endif
387
} gp_thread_creation_closure_t;
388
389
/* Wrapper procedure called to start the new thread. */
390
static void *
391
gp_thread_begin_wrapper(void *thread_data /* gp_thread_creation_closure_t * */)
392
0
{
393
0
    gp_thread_creation_closure_t closure;
394
395
0
    closure = *(gp_thread_creation_closure_t *)thread_data;
396
0
    free(thread_data);
397
#ifdef DEBUG
398
    pthread_setspecific(GhostscriptGlobals.tlsKey, closure.mem);
399
#endif
400
0
    DISCARD(closure.proc(closure.proc_data));
401
0
    return NULL;    /* return value is ignored */
402
0
}
403
404
int
405
gp_create_thread(gp_thread_creation_callback_t proc, void *proc_data)
406
0
{
407
0
    gp_thread_creation_closure_t *closure;
408
0
    pthread_t ignore_thread;
409
0
    pthread_attr_t attr;
410
0
    int code;
411
412
#ifdef MEMENTO
413
    if (Memento_squeezing()) {
414
        eprintf("Can't create threads when memory squeezing with forks\n");
415
        Memento_bt();
416
        return_error(gs_error_VMerror);
417
    }
418
#endif
419
420
0
    closure = (gp_thread_creation_closure_t *)malloc(sizeof(*closure));
421
0
    if (!closure)
422
0
        return_error(gs_error_VMerror);
423
0
    closure->proc = proc;
424
0
    closure->proc_data = proc_data;
425
#ifdef DEBUG
426
    closure->mem = pthread_getspecific(GhostscriptGlobals.tlsKey);
427
#endif
428
0
    pthread_attr_init(&attr);
429
0
    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
430
0
    code = pthread_create(&ignore_thread, &attr, gp_thread_begin_wrapper,
431
0
                          closure);
432
0
    if (code) {
433
0
        free(closure);
434
0
        return_error(gs_error_ioerror);
435
0
    }
436
0
    return 0;
437
0
}
438
439
int
440
gp_thread_start(gp_thread_creation_callback_t proc, void *proc_data,
441
                gp_thread_id *thread)
442
0
{
443
0
    gp_thread_creation_closure_t *closure =
444
0
        (gp_thread_creation_closure_t *)malloc(sizeof(*closure));
445
0
    pthread_t new_thread;
446
0
    pthread_attr_t attr;
447
0
    int code;
448
449
0
    if (!closure)
450
0
        return_error(gs_error_VMerror);
451
0
    closure->proc = proc;
452
0
    closure->proc_data = proc_data;
453
#ifdef DEBUG
454
    closure->mem = pthread_getspecific(GhostscriptGlobals.tlsKey);
455
#endif
456
0
    pthread_attr_init(&attr);
457
0
    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
458
0
    code = pthread_create(&new_thread, &attr, gp_thread_begin_wrapper,
459
0
                          closure);
460
0
    if (code) {
461
0
        *thread = NULL;
462
0
        free(closure);
463
0
        return_error(gs_error_ioerror);
464
0
    }
465
0
    *thread = (gp_thread_id)new_thread;
466
0
    return 0;
467
0
}
468
469
void gp_thread_finish(gp_thread_id thread)
470
0
{
471
0
    if (thread == NULL)
472
0
        return;
473
0
    pthread_join((pthread_t)thread, NULL);
474
0
}
475
476
void (gp_monitor_label)(gp_monitor * mona, const char *name)
477
0
{
478
0
    pthread_mutex_t * const mon = &((gp_pthread_recursive_t *)mona)->mutex;
479
480
0
    (void)mon;
481
0
    (void)name;
482
0
    Bobbin_label_mutex(mon, name);
483
0
}
484
485
void (gp_semaphore_label)(gp_semaphore * sema, const char *name)
486
0
{
487
0
    pt_semaphore_t * const sem = (pt_semaphore_t *)sema;
488
489
0
    (void)sem;
490
0
    (void)name;
491
0
    Bobbin_label_mutex(&sem->mutex, name);
492
0
    Bobbin_label_cond(&sem->cond, name);
493
0
}
494
495
void (gp_thread_label)(gp_thread_id thread, const char *name)
496
0
{
497
0
    (void)thread;
498
0
    (void)name;
499
0
    Bobbin_label_thread((pthread_t)thread, name);
500
0
}