Coverage Report

Created: 2025-06-24 07:01

/src/ghostpdl/base/gsmalloc.c
Line
Count
Source (jump to first uncovered line)
1
/* Copyright (C) 2001-2024 Artifex Software, Inc.
2
   All Rights Reserved.
3
4
   This software is provided AS-IS with no warranty, either express or
5
   implied.
6
7
   This software is distributed under license and may not be copied,
8
   modified or distributed except as expressly authorized under the terms
9
   of the license contained in the file LICENSE in this distribution.
10
11
   Refer to licensing information at http://www.artifex.com or contact
12
   Artifex Software, Inc.,  39 Mesa Street, Suite 108A, San Francisco,
13
   CA 94129, USA, for further information.
14
*/
15
16
17
/* C heap allocator */
18
#include "malloc_.h"
19
#include "gdebug.h"
20
#include "gserrors.h"
21
#include "gstypes.h"
22
#include "gsmemory.h"
23
#include "gsmdebug.h"
24
#include "gsstruct.h"   /* for st_bytes */
25
#include "gsmalloc.h"
26
#include "gsmemret.h"   /* retrying wrapper */
27
#include "gp.h"
28
29
/* ------ Heap allocator ------ */
30
31
/*
32
 * An implementation of Ghostscript's memory manager interface
33
 * that works directly with the C heap.  We keep track of all allocated
34
 * blocks so we can free them at cleanup time.
35
 */
36
/* Raw memory procedures */
37
static gs_memory_proc_alloc_bytes(gs_heap_alloc_bytes);
38
static gs_memory_proc_resize_object(gs_heap_resize_object);
39
static gs_memory_proc_free_object(gs_heap_free_object);
40
static gs_memory_proc_stable(gs_heap_stable);
41
static gs_memory_proc_status(gs_heap_status);
42
static gs_memory_proc_free_all(gs_heap_free_all);
43
44
/* Object memory procedures */
45
static gs_memory_proc_alloc_struct(gs_heap_alloc_struct);
46
static gs_memory_proc_alloc_byte_array(gs_heap_alloc_byte_array);
47
static gs_memory_proc_alloc_struct_array(gs_heap_alloc_struct_array);
48
static gs_memory_proc_object_size(gs_heap_object_size);
49
static gs_memory_proc_object_type(gs_heap_object_type);
50
static gs_memory_proc_alloc_string(gs_heap_alloc_string);
51
static gs_memory_proc_resize_string(gs_heap_resize_string);
52
static gs_memory_proc_free_string(gs_heap_free_string);
53
static gs_memory_proc_register_root(gs_heap_register_root);
54
static gs_memory_proc_unregister_root(gs_heap_unregister_root);
55
static gs_memory_proc_enable_free(gs_heap_enable_free);
56
static gs_memory_proc_set_object_type(gs_heap_set_object_type);
57
static gs_memory_proc_defer_frees(gs_heap_defer_frees);
58
static const gs_memory_procs_t gs_malloc_memory_procs =
59
{
60
    /* Raw memory procedures */
61
    gs_heap_alloc_bytes,
62
    gs_heap_resize_object,
63
    gs_heap_free_object,
64
    gs_heap_stable,
65
    gs_heap_status,
66
    gs_heap_free_all,
67
    gs_ignore_consolidate_free,
68
    /* Object memory procedures */
69
    gs_heap_alloc_bytes,
70
    gs_heap_alloc_struct,
71
    gs_heap_alloc_struct,
72
    gs_heap_alloc_byte_array,
73
    gs_heap_alloc_byte_array,
74
    gs_heap_alloc_struct_array,
75
    gs_heap_alloc_struct_array,
76
    gs_heap_object_size,
77
    gs_heap_object_type,
78
    gs_heap_alloc_string,
79
    gs_heap_alloc_string,
80
    gs_heap_resize_string,
81
    gs_heap_free_string,
82
    gs_heap_register_root,
83
    gs_heap_unregister_root,
84
    gs_heap_enable_free,
85
    gs_heap_set_object_type,
86
    gs_heap_defer_frees
87
};
88
89
/* We must make sure that malloc_blocks leave the block aligned. */
90
/*typedef struct gs_malloc_block_s gs_malloc_block_t; */
91
#define malloc_block_data\
92
        gs_malloc_block_t *next;\
93
        gs_malloc_block_t *prev;\
94
        size_t size;\
95
        gs_memory_type_ptr_t type;\
96
        client_name_t cname
97
struct malloc_block_data_s {
98
    malloc_block_data;
99
};
100
struct gs_malloc_block_s {
101
    malloc_block_data;
102
/* ANSI C does not allow zero-size arrays, so we need the following */
103
/* unnecessary and wasteful workaround: */
104
#define _npad (-size_of(struct malloc_block_data_s) & (ARCH_ALIGN_MEMORY_MOD - 1))
105
    byte _pad[(_npad == 0 ? ARCH_ALIGN_MEMORY_MOD : _npad)];
106
#undef _npad
107
};
108
109
/* Initialize a malloc allocator. */
110
static long heap_available(void);
111
gs_malloc_memory_t *
112
gs_malloc_memory_init(void)
113
162k
{
114
162k
    gs_malloc_memory_t *mem =
115
162k
        (gs_malloc_memory_t *)Memento_label(malloc(sizeof(gs_malloc_memory_t)), "gs_malloc_memory_t");
116
117
162k
    if (mem == NULL)
118
0
        return NULL;
119
120
162k
    mem->stable_memory = 0; /* just for tidyness, never referenced */
121
162k
    mem->procs = gs_malloc_memory_procs;
122
162k
    mem->allocated = 0;
123
162k
    mem->limit = max_size_t;
124
162k
    mem->used = 0;
125
162k
    mem->max_used = 0;
126
162k
    mem->gs_lib_ctx = 0;
127
162k
    mem->non_gc_memory = (gs_memory_t *)mem;
128
162k
    mem->thread_safe_memory = (gs_memory_t *)mem; /* this allocator is thread safe */
129
    /* Allocate a monitor to serialize access to structures within */
130
162k
    mem->monitor = NULL;  /* prevent use during initial allocation */
131
162k
    mem->monitor = gx_monitor_label(gx_monitor_alloc((gs_memory_t *)mem), "heap");
132
162k
    if (mem->monitor == NULL) {
133
0
        free(mem);
134
0
        return NULL;
135
0
    }
136
137
162k
    return mem;
138
162k
}
139
/*
140
 * Estimate the amount of available memory by probing with mallocs.
141
 * We may under-estimate by a lot, but that's better than winding up with
142
 * a seriously inflated address space.  This is quite a hack!
143
 */
144
61.5M
#define max_malloc_probes 20
145
117M
#define malloc_probe_size 64000
146
static long
147
heap_available()
148
2.92M
{
149
2.92M
    long avail = 0;
150
2.92M
    void *probes[max_malloc_probes];
151
2.92M
    uint n;
152
153
61.5M
    for (n = 0; n < max_malloc_probes; n++) {
154
58.5M
        if ((probes[n] = malloc(malloc_probe_size)) == 0)
155
0
            break;
156
58.5M
        if_debug2('a', "[a]heap_available probe[%d]="PRI_INTPTR"\n",
157
58.5M
                  n, (intptr_t) probes[n]);
158
58.5M
        avail += malloc_probe_size;
159
58.5M
    }
160
61.5M
    while (n)
161
58.5M
        free(probes[--n]);
162
2.92M
    return avail;
163
2.92M
}
164
165
/* Allocate various kinds of blocks. */
166
static byte *
167
gs_heap_alloc_bytes(gs_memory_t * mem, size_t size, client_name_t cname)
168
907M
{
169
907M
    gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
170
907M
    byte *ptr = 0;
171
172
#ifdef DEBUG
173
    const char *msg;
174
    static const char *const ok_msg = "OK";
175
176
#  define set_msg(str) (msg = (str))
177
#else
178
2.72G
#  define set_msg(str) DO_NOTHING
179
907M
#endif
180
181
        /* Exclusive acces so our decisions and changes are 'atomic' */
182
907M
    if (mmem->monitor)
183
907M
        gx_monitor_enter(mmem->monitor);
184
907M
    if (size > mmem->limit - sizeof(gs_malloc_block_t)) {
185
        /* Definitely too large to allocate; also avoids overflow. */
186
195
        set_msg("exceeded limit");
187
907M
    } else {
188
907M
        size_t added = size + sizeof(gs_malloc_block_t);
189
190
907M
        if (added <= size || added > mmem->limit || mmem->limit - added < mmem->used)
191
907M
            set_msg("exceeded limit");
192
907M
        else if ((ptr = (byte *) Memento_label(malloc(added), cname)) == 0)
193
907M
            set_msg("failed");
194
907M
        else {
195
907M
            gs_malloc_block_t *bp = (gs_malloc_block_t *) ptr;
196
197
            /*
198
             * We would like to check that malloc aligns blocks at least as
199
             * strictly as the compiler (as defined by ARCH_ALIGN_MEMORY_MOD).
200
             * However, Microsoft VC 6 does not satisfy this requirement.
201
             * See gsmemory.h for more explanation.
202
             */
203
907M
            set_msg(ok_msg);
204
907M
            if (mmem->allocated)
205
907M
                mmem->allocated->prev = bp;
206
907M
            bp->next = mmem->allocated;
207
907M
            bp->prev = 0;
208
907M
            bp->size = size;
209
907M
            bp->type = &st_bytes;
210
907M
            bp->cname = cname;
211
907M
            mmem->allocated = bp;
212
907M
            ptr = (byte *) (bp + 1);
213
907M
            mmem->used += size + sizeof(gs_malloc_block_t);
214
907M
            if (mmem->used > mmem->max_used)
215
111M
                mmem->max_used = mmem->used;
216
907M
        }
217
907M
    }
218
907M
    if (mmem->monitor)
219
907M
        gx_monitor_leave(mmem->monitor);  /* Done with exclusive access */
220
    /* We don't want to 'fill' under mutex to keep the window smaller */
221
907M
    if (ptr)
222
907M
        gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
223
#ifdef DEBUG
224
    if (gs_debug_c('a') || msg != ok_msg)
225
        dmlprintf6(mem, "[a+]gs_malloc(%s)(%"PRIuSIZE") = "PRI_INTPTR": %s, used=%"PRIuSIZE", max=%"PRIuSIZE"\n",
226
                   client_name_string(cname), size, (intptr_t)ptr, msg, mmem->used, mmem->max_used);
227
#endif
228
907M
    return ptr;
229
907M
#undef set_msg
230
907M
}
231
static void *
232
gs_heap_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
233
                     client_name_t cname)
234
2.81M
{
235
2.81M
    void *ptr =
236
2.81M
    gs_heap_alloc_bytes(mem, gs_struct_type_size(pstype), cname);
237
238
2.81M
    if (ptr == 0)
239
0
        return 0;
240
2.81M
    ((gs_malloc_block_t *) ptr)[-1].type = pstype;
241
2.81M
    return ptr;
242
2.81M
}
243
static byte *
244
gs_heap_alloc_byte_array(gs_memory_t * mem, size_t num_elements, size_t elt_size,
245
                         client_name_t cname)
246
1.11M
{
247
1.11M
    size_t lsize = (size_t) num_elements * elt_size;
248
249
1.11M
    if (elt_size != 0 && lsize/elt_size != num_elements)
250
0
        return 0;
251
1.11M
    return gs_heap_alloc_bytes(mem, (size_t) lsize, cname);
252
1.11M
}
253
static void *
254
gs_heap_alloc_struct_array(gs_memory_t * mem, size_t num_elements,
255
                           gs_memory_type_ptr_t pstype, client_name_t cname)
256
1.25k
{
257
1.25k
    void *ptr =
258
1.25k
    gs_heap_alloc_byte_array(mem, num_elements,
259
1.25k
                             gs_struct_type_size(pstype), cname);
260
261
1.25k
    if (ptr == 0)
262
0
        return 0;
263
1.25k
    ((gs_malloc_block_t *) ptr)[-1].type = pstype;
264
1.25k
    return ptr;
265
1.25k
}
266
static void *
267
gs_heap_resize_object(gs_memory_t * mem, void *obj, size_t new_num_elements,
268
                      client_name_t cname)
269
809k
{
270
809k
    gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
271
809k
    gs_malloc_block_t *ptr = (gs_malloc_block_t *) obj - 1;
272
809k
    gs_memory_type_ptr_t pstype = ptr->type;
273
809k
    size_t old_size = gs_object_size(mem, obj) + sizeof(gs_malloc_block_t);
274
809k
    size_t new_size =
275
809k
        gs_struct_type_size(pstype) * new_num_elements +
276
809k
        sizeof(gs_malloc_block_t);
277
809k
    gs_malloc_block_t *new_ptr;
278
279
809k
    if (new_size == old_size)
280
108k
        return obj;
281
701k
    if (mmem->monitor)
282
701k
        gx_monitor_enter(mmem->monitor);  /* Exclusive access */
283
701k
    if (new_size > mmem->limit - sizeof(gs_malloc_block_t)) {
284
        /* too large to allocate; also avoids overflow. */
285
0
        if (mmem->monitor)
286
0
            gx_monitor_leave(mmem->monitor);  /* Done with exclusive access */
287
0
        return 0;
288
0
    }
289
701k
    new_ptr = (gs_malloc_block_t *) gs_realloc(ptr, old_size, new_size);
290
701k
    if (new_ptr == 0) {
291
0
        if (mmem->monitor)
292
0
            gx_monitor_leave(mmem->monitor);  /* Done with exclusive access */
293
0
        return 0;
294
0
    }
295
701k
    if (new_ptr->prev)
296
701k
        new_ptr->prev->next = new_ptr;
297
1
    else
298
1
        mmem->allocated = new_ptr;
299
701k
    if (new_ptr->next)
300
701k
        new_ptr->next->prev = new_ptr;
301
701k
    new_ptr->size = new_size - sizeof(gs_malloc_block_t);
302
701k
    mmem->used -= old_size;
303
701k
    mmem->used += new_size;
304
701k
    if (mmem->monitor)
305
701k
        gx_monitor_leave(mmem->monitor);  /* Done with exclusive access */
306
701k
    if (new_size > old_size)
307
701k
        gs_alloc_fill((byte *) new_ptr + old_size,
308
701k
                      gs_alloc_fill_alloc, new_size - old_size);
309
701k
    return new_ptr + 1;
310
701k
}
311
static size_t
312
gs_heap_object_size(gs_memory_t * mem, const void *ptr)
313
809k
{
314
809k
    return ((const gs_malloc_block_t *)ptr)[-1].size;
315
809k
}
316
static gs_memory_type_ptr_t
317
gs_heap_object_type(const gs_memory_t * mem, const void *ptr)
318
0
{
319
0
    return ((const gs_malloc_block_t *)ptr)[-1].type;
320
0
}
321
static void
322
gs_heap_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
323
916M
{
324
916M
    gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
325
916M
    gs_malloc_block_t *bp;
326
916M
    gs_memory_type_ptr_t pstype;
327
916M
    struct_proc_finalize((*finalize));
328
329
916M
    if_debug3m('a', mem, "[a-]gs_free(%s) "PRI_INTPTR"(%"PRIuSIZE")\n",
330
916M
               client_name_string(cname), (intptr_t)ptr,
331
916M
               (ptr == 0 ? 0 : ((gs_malloc_block_t *) ptr)[-1].size));
332
916M
    if (ptr == 0)
333
8.76M
        return;
334
907M
    pstype = ((gs_malloc_block_t *) ptr)[-1].type;
335
907M
    finalize = pstype->finalize;
336
907M
    if (finalize != 0) {
337
331k
        if_debug3m('u', mem, "[u]finalizing %s "PRI_INTPTR" (%s)\n",
338
331k
                   struct_type_name_string(pstype),
339
331k
                   (intptr_t)ptr, client_name_string(cname));
340
331k
        (*finalize) (mem, ptr);
341
331k
    }
342
907M
    if (mmem->monitor)
343
907M
        gx_monitor_enter(mmem->monitor);  /* Exclusive access */
344
    /* Previously, we used to search through every allocated block to find
345
     * the block we are freeing. This gives us safety in that an attempt to
346
     * free an unallocated block will not go wrong. This does radically
347
     * slow down frees though, so we replace it with this simpler code; we
348
     * now assume that the block is valid, and hence avoid the search.
349
     */
350
907M
#if 1
351
907M
    bp = &((gs_malloc_block_t *)ptr)[-1];
352
907M
    if (bp->prev)
353
369M
        bp->prev->next = bp->next;
354
907M
    if (bp->next)
355
907M
        bp->next->prev = bp->prev;
356
907M
    if (bp == mmem->allocated) {
357
538M
        mmem->allocated = bp->next;
358
538M
        if (mmem->allocated)
359
538M
            mmem->allocated->prev = NULL;
360
538M
    }
361
907M
    mmem->used -= bp->size + sizeof(gs_malloc_block_t);
362
907M
    if (mmem->monitor)
363
907M
        gx_monitor_leave(mmem->monitor);  /* Done with exclusive access */
364
907M
    gs_alloc_fill(bp, gs_alloc_fill_free,
365
907M
                  bp->size + sizeof(gs_malloc_block_t));
366
907M
    free(bp);
367
#else
368
    bp = mmem->allocated; /* If 'finalize' releases a memory,
369
                             this function could be called recursively and
370
                             change mmem->allocated. */
371
    if (ptr == bp + 1) {
372
        mmem->allocated = bp->next;
373
        mmem->used -= bp->size + sizeof(gs_malloc_block_t);
374
375
        if (mmem->allocated)
376
            mmem->allocated->prev = 0;
377
        if (mmem->monitor)
378
            gx_monitor_leave(mmem->monitor);  /* Done with exclusive access */
379
        gs_alloc_fill(bp, gs_alloc_fill_free,
380
                      bp->size + sizeof(gs_malloc_block_t));
381
        free(bp);
382
    } else {
383
        gs_malloc_block_t *np;
384
385
        /*
386
         * bp == 0 at this point is an error, but we'd rather have an
387
         * error message than an invalid access.
388
         */
389
        if (bp) {
390
            for (; (np = bp->next) != 0; bp = np) {
391
                if (ptr == np + 1) {
392
                    bp->next = np->next;
393
                    if (np->next)
394
                        np->next->prev = bp;
395
                    mmem->used -= np->size + sizeof(gs_malloc_block_t);
396
                    if (mmem->monitor)
397
                        gx_monitor_leave(mmem->monitor);  /* Done with exclusive access */
398
                    gs_alloc_fill(np, gs_alloc_fill_free,
399
                                  np->size + sizeof(gs_malloc_block_t));
400
                    free(np);
401
                    return;
402
                }
403
            }
404
        }
405
        if (mmem->monitor)
406
            gx_monitor_leave(mmem->monitor);  /* Done with exclusive access */
407
        lprintf2("%s: free "PRI_INTPTR" not found!\n",
408
                 client_name_string(cname), (intptr_t) ptr);
409
        free((char *)((gs_malloc_block_t *) ptr - 1));
410
    }
411
#endif
412
907M
}
413
static byte *
414
gs_heap_alloc_string(gs_memory_t * mem, size_t nbytes, client_name_t cname)
415
19.0M
{
416
19.0M
    return gs_heap_alloc_bytes(mem, nbytes, cname);
417
19.0M
}
418
static byte *
419
gs_heap_resize_string(gs_memory_t * mem, byte * data, size_t old_num, size_t new_num,
420
                      client_name_t cname)
421
0
{
422
0
    if (gs_heap_object_type(mem, data) != &st_bytes)
423
0
        if_debug2m('a', mem, "%s: resizing non-string "PRI_INTPTR"!\n",
424
0
                 client_name_string(cname), (intptr_t)data);
425
0
    return gs_heap_resize_object(mem, data, new_num, cname);
426
0
}
427
static void
428
gs_heap_free_string(gs_memory_t * mem, byte * data, size_t nbytes,
429
                    client_name_t cname)
430
164k
{
431
    /****** SHOULD CHECK SIZE IF DEBUGGING ******/
432
164k
    gs_heap_free_object(mem, data, cname);
433
164k
}
434
static int
435
gs_heap_register_root(gs_memory_t * mem, gs_gc_root_t ** rp,
436
                      gs_ptr_type_t ptype, void **up, client_name_t cname)
437
0
{
438
0
    return 0;
439
0
}
440
static void
441
gs_heap_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp,
442
                        client_name_t cname)
443
0
{
444
0
}
445
static gs_memory_t *
446
gs_heap_stable(gs_memory_t *mem)
447
250k
{
448
250k
    return mem;     /* heap memory is stable */
449
250k
}
450
451
/*
452
 * NB: In a multi-threaded application, this is only a 'snapshot'
453
 *     since other threads may change the heap_status. The heap_available()
454
 *     probe is just an approximation anyway. To pacify helgrind, we lock
455
 *     around the modificatons to the gs_memory_status that is returned.
456
 */
457
static void
458
gs_heap_status(gs_memory_t * mem, gs_memory_status_t * pstat)
459
2.92M
{
460
2.92M
    gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem;
461
2.92M
    long avail_snapshot = heap_available();
462
463
2.92M
    if (mmem->monitor)
464
2.92M
        gx_monitor_enter(mmem->monitor);
465
2.92M
    pstat->allocated = mmem->used + avail_snapshot;
466
2.92M
    pstat->used = mmem->used;
467
2.92M
    pstat->max_used = mmem->max_used;
468
2.92M
    pstat->limit = mmem->limit;
469
2.92M
    pstat->is_thread_safe = true; /* this allocator has a mutex (monitor) and IS thread safe */
470
2.92M
    if (mmem->monitor)
471
2.92M
        gx_monitor_leave(mmem->monitor);  /* Done with exclusive access */
472
2.92M
}
473
static void
474
gs_heap_enable_free(gs_memory_t * mem, bool enable)
475
0
{
476
0
    if (enable)
477
0
        mem->procs.free_object = gs_heap_free_object,
478
0
            mem->procs.free_string = gs_heap_free_string;
479
0
    else
480
0
        mem->procs.free_object = gs_ignore_free_object,
481
0
            mem->procs.free_string = gs_ignore_free_string;
482
0
}
483
484
static void gs_heap_set_object_type(gs_memory_t *mem, void *ptr, gs_memory_type_ptr_t type)
485
0
{
486
0
    gs_malloc_block_t *bp = (gs_malloc_block_t *) ptr;
487
488
0
    if (ptr == 0)
489
0
        return;
490
0
    bp[-1].type = type;
491
0
}
492
493
static void gs_heap_defer_frees(gs_memory_t *mem, int defer)
494
0
{
495
0
}
496
497
/* Release all memory acquired by this allocator. */
498
static void
499
gs_heap_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
500
162k
{
501
162k
    gs_malloc_memory_t *const mmem = (gs_malloc_memory_t *) mem;
502
162k
    gx_monitor_t *mon = mmem->monitor;
503
504
    /*
505
     * We don't perform locking during this process since the 'monitor'
506
     * is contained in this allocator, and will get freed along the way.
507
     * It is only called at exit, and there better not be any threads
508
     * accessing this allocator.
509
     */
510
162k
    mmem->monitor = NULL;   /* delete reference to this monitor */
511
162k
    gx_monitor_free(mon); /* free the monitor */
512
162k
#ifndef MEMENTO
513
    /* Normally gs calls this on closedown, and it frees every block that
514
     * has ever been allocated. This is not helpful for leak checking. */
515
162k
    if (free_mask & FREE_ALL_DATA) {
516
162k
        gs_malloc_block_t *bp = mmem->allocated;
517
162k
        gs_malloc_block_t *np;
518
519
162k
        for (; bp != 0; bp = np) {
520
56
            np = bp->next;
521
56
            if_debug3m('a', mem, "[a]gs_heap_free_all(%s) "PRI_INTPTR"(%"PRIuSIZE")\n",
522
56
                       client_name_string(bp->cname), (intptr_t)(bp + 1),
523
56
                       bp->size);
524
56
            gs_alloc_fill(bp + 1, gs_alloc_fill_free, bp->size);
525
56
            free(bp);
526
56
        }
527
162k
    }
528
162k
#endif
529
162k
    if (free_mask & FREE_ALL_ALLOCATOR)
530
162k
        free(mem);
531
162k
}
532
533
/* ------ Wrapping ------ */
534
535
/* Create the retrying and the locked wrapper for the heap allocator. */
536
int
537
gs_malloc_wrap(gs_memory_t **wrapped, gs_malloc_memory_t *contents)
538
0
{
539
#  ifdef USE_RETRY_MEMORY_WRAPPER
540
    /*
541
     * This is deprecated since 'retry' for clist reversion/cycling
542
     * will ONLY work for monochrome, simple PS or PCL, not for a
543
     * color device and not for PDF or XPS with transparency
544
     */
545
    {
546
        gs_memory_retrying_t *rmem;
547
        rmem = (gs_memory_retrying_t *)
548
            gs_alloc_bytes_immovable((gs_memory_t *)lmem,
549
                                     sizeof(gs_memory_retrying_t),
550
                                     "gs_malloc_wrap(retrying)");
551
        if (rmem == 0) {
552
            gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)");
553
            return_error(gs_error_VMerror);
554
        }
555
        code = gs_memory_retrying_init(rmem, (gs_memory_t *)lmem);
556
        if (code < 0) {
557
            gs_free_object((gs_memory_t *)lmem, rmem, "gs_malloc_wrap(retrying)");
558
            gs_free_object(cmem, lmem, "gs_malloc_wrap(locked)");
559
            return code;
560
        }
561
562
        *wrapped = (gs_memory_t *)rmem;
563
    }
564
#  endif /* retrying */
565
0
    return 0;
566
0
}
567
568
/* Get the wrapped contents. */
569
gs_malloc_memory_t *
570
gs_malloc_wrapped_contents(gs_memory_t *wrapped)
571
162k
{
572
#ifdef USE_RETRY_MEMORY_WRAPPER
573
    gs_memory_retrying_t *rmem = (gs_memory_retrying_t *)wrapped;
574
575
    return (gs_malloc_memory_t *)gs_memory_retrying_target(rmem);
576
#else /* retrying */
577
162k
    return (gs_malloc_memory_t *)wrapped;
578
162k
#endif /* retrying */
579
162k
}
580
581
/* Free the wrapper, and return the wrapped contents. */
582
gs_malloc_memory_t *
583
gs_malloc_unwrap(gs_memory_t *wrapped)
584
0
{
585
#ifdef USE_RETRY_MEMORY_WRAPPER
586
    gs_memory_retrying_t *rmem = (gs_memory_retrying_t *)wrapped;
587
    gs_memory_t *contents = gs_memory_retrying_target(rmem);
588
589
    gs_free_object(wrapped rmem, "gs_malloc_unwrap(retrying)");
590
    return (gs_malloc_memory_t *)contents;
591
#else
592
0
    return (gs_malloc_memory_t *)wrapped;
593
0
#endif
594
0
}
595
596
/* Create the default allocator, and return it. */
597
gs_memory_t *
598
gs_malloc_init(void)
599
0
{
600
0
    return gs_malloc_init_with_context(NULL);
601
0
}
602
603
gs_memory_t *
604
gs_malloc_init_with_context(gs_lib_ctx_t *ctx)
605
162k
{
606
162k
    gs_malloc_memory_t *malloc_memory_default = gs_malloc_memory_init();
607
162k
    gs_memory_t *memory_t_default;
608
609
162k
    if (malloc_memory_default == NULL)
610
0
        return NULL;
611
612
162k
    if (gs_lib_ctx_init(ctx, (gs_memory_t *)malloc_memory_default) != 0) {
613
0
        gs_malloc_release((gs_memory_t *)malloc_memory_default);
614
0
        return NULL;
615
0
    }
616
617
#if defined(USE_RETRY_MEMORY_WRAPPER)
618
    gs_malloc_wrap(&memory_t_default, malloc_memory_default);
619
#else
620
162k
    memory_t_default = (gs_memory_t *)malloc_memory_default;
621
162k
#endif
622
162k
    memory_t_default->stable_memory = memory_t_default;
623
162k
    return memory_t_default;
624
162k
}
625
626
/* Release the default allocator. */
627
void
628
gs_malloc_release(gs_memory_t *mem)
629
162k
{
630
162k
    gs_malloc_memory_t * malloc_memory_default;
631
632
162k
    if (mem == NULL)
633
0
        return;
634
635
#ifdef USE_RETRY_MEMORY_WRAPPER
636
    malloc_memory_default = gs_malloc_unwrap(mem);
637
#else
638
162k
    malloc_memory_default = (gs_malloc_memory_t *)mem;
639
162k
#endif
640
162k
    gs_lib_ctx_fin((gs_memory_t *)malloc_memory_default);
641
642
162k
    gs_malloc_memory_release(malloc_memory_default);
643
162k
}