Coverage Report

Created: 2025-06-10 07:06

/src/ghostpdl/base/gsalloc.c
Line
Count
Source (jump to first uncovered line)
1
/* Copyright (C) 2001-2023 Artifex Software, Inc.
2
   All Rights Reserved.
3
4
   This software is provided AS-IS with no warranty, either express or
5
   implied.
6
7
   This software is distributed under license and may not be copied,
8
   modified or distributed except as expressly authorized under the terms
9
   of the license contained in the file LICENSE in this distribution.
10
11
   Refer to licensing information at http://www.artifex.com or contact
12
   Artifex Software, Inc.,  39 Mesa Street, Suite 108A, San Francisco,
13
   CA 94129, USA, for further information.
14
*/
15
16
17
/* Standard memory allocator */
18
#include "gx.h"
19
#include "memory_.h"
20
#include "gserrors.h"
21
#include "gsexit.h"
22
#include "gsmdebug.h"
23
#include "gsstruct.h"
24
#include "gxalloc.h"
25
#include "stream.h"   /* for clearing stream list */
26
#include "malloc_.h" /* For MEMENTO */
27
28
/*
29
 * Define whether to try consolidating space before adding a new clump.
30
 * The default is not to do this, because it is computationally
31
 * expensive and doesn't seem to help much.  However, this is done for
32
 * "controlled" spaces whether or not the #define is in effect.
33
 */
34
/*#define CONSOLIDATE_BEFORE_ADDING_CLUMP */
35
36
/*
37
 * This allocator produces tracing messages of the form
38
 *      [aNMOTS]...
39
 * where
40
 *   N is the VM space number, +1 if we are allocating from stable memory.
41
 *   M is : for movable objects, | for immovable,
42
 *   O is {alloc = +, free = -, grow = >, shrink = <},
43
 *   T is {bytes = b, object = <, ref = $, string = >}, and
44
 *   S is {small freelist = f, large freelist = F, LIFO = space,
45
 *      own clump = L, lost = #, lost own clump = ~, other = .}.
46
 */
47
#ifdef DEBUG
48
static int
49
alloc_trace_space(const gs_ref_memory_t *imem)
50
{
51
    return (int)(imem->space + (imem->stable_memory == (const gs_memory_t *)imem));
52
}
53
static void
54
alloc_trace(const char *chars, gs_ref_memory_t * imem, client_name_t cname,
55
            gs_memory_type_ptr_t stype, uint size, const void *ptr)
56
{
57
    if_debug7m('A', (const gs_memory_t *)imem, "[a%d%s]%s %s(%u) %s"PRI_INTPTR"\n",
58
               alloc_trace_space(imem), chars, client_name_string(cname),
59
               (ptr == 0 || stype == 0 ? "" :
60
                struct_type_name_string(stype)),
61
               size, (chars[1] == '+' ? "= " : ""), (intptr_t)ptr);
62
}
63
static bool
64
alloc_size_is_ok(gs_memory_type_ptr_t stype)
65
{
66
    return (stype->ssize > 0 && stype->ssize < 0x200000);
67
}
68
#  define ALLOC_CHECK_SIZE(mem,stype)\
69
    BEGIN\
70
      if (!alloc_size_is_ok(stype)) {\
71
        mlprintf2(mem,"size of struct type "PRI_INTPTR" is 0x%lx!\n",\
72
                  (intptr_t)(stype), (ulong)((stype)->ssize));\
73
        return 0;\
74
      }\
75
    END
76
#else
77
154M
#  define alloc_trace(chars, imem, cname, stype, size, ptr) DO_NOTHING
78
72.2M
#  define ALLOC_CHECK_SIZE(mem,stype) DO_NOTHING
79
#endif
80
81
/*
82
 * The structure descriptor for allocators.  Even though allocators
83
 * are allocated outside GC space, they reference objects within it.
84
 */
85
public_st_ref_memory();
86
static
87
731k
ENUM_PTRS_BEGIN(ref_memory_enum_ptrs) return 0;
88
121k
ENUM_PTR3(0, gs_ref_memory_t, streams, names_array, changes);
89
121k
ENUM_PTR(3, gs_ref_memory_t, saved);
90
731k
ENUM_PTR(4, gs_ref_memory_t, scan_limit);
91
731k
ENUM_PTRS_END
92
119k
static RELOC_PTRS_WITH(ref_memory_reloc_ptrs, gs_ref_memory_t *mptr)
93
119k
{
94
119k
    RELOC_PTR(gs_ref_memory_t, streams);
95
119k
    RELOC_PTR(gs_ref_memory_t, names_array);
96
119k
    RELOC_PTR(gs_ref_memory_t, changes);
97
119k
    RELOC_PTR(gs_ref_memory_t, scan_limit);
98
    /* Don't relocate the saved pointer now -- see igc.c for details. */
99
119k
    mptr->reloc_saved = RELOC_OBJ(mptr->saved);
100
119k
}
101
119k
RELOC_PTRS_END
102
103
/*
104
 * Define the flags for alloc_obj, which implements all but the fastest
105
 * case of allocation.
106
 */
107
typedef enum {
108
    ALLOC_IMMOVABLE = 1,
109
    ALLOC_DIRECT = 2    /* called directly, without fast-case checks */
110
} alloc_flags_t;
111
112
/* Forward references */
113
static void remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top);
114
static obj_header_t *large_freelist_alloc(gs_ref_memory_t *mem, obj_size_t size);
115
static obj_header_t *scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size);
116
static size_t compute_free_objects(gs_ref_memory_t *);
117
static obj_header_t *alloc_obj(gs_ref_memory_t *, obj_size_t, gs_memory_type_ptr_t, alloc_flags_t, client_name_t);
118
static void consolidate_clump_free(clump_t *cp, gs_ref_memory_t *mem);
119
static void trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, obj_size_t size, clump_t *cp);
120
static clump_t *alloc_acquire_clump(gs_ref_memory_t *, size_t, bool, client_name_t);
121
static clump_t *alloc_add_clump(gs_ref_memory_t *, size_t, client_name_t);
122
void alloc_close_clump(gs_ref_memory_t *);
123
124
/*
125
 * Define the standard implementation (with garbage collection)
126
 * of Ghostscript's memory manager interface.
127
 */
128
/* Raw memory procedures */
129
static gs_memory_proc_alloc_bytes(i_alloc_bytes_immovable);
130
static gs_memory_proc_resize_object(i_resize_object);
131
static gs_memory_proc_free_object(i_free_object);
132
static gs_memory_proc_stable(i_stable);
133
static gs_memory_proc_status(i_status);
134
static gs_memory_proc_free_all(i_free_all);
135
static gs_memory_proc_consolidate_free(i_consolidate_free);
136
137
/* Object memory procedures */
138
static gs_memory_proc_alloc_bytes(i_alloc_bytes);
139
static gs_memory_proc_alloc_struct(i_alloc_struct);
140
static gs_memory_proc_alloc_struct(i_alloc_struct_immovable);
141
static gs_memory_proc_alloc_byte_array(i_alloc_byte_array);
142
static gs_memory_proc_alloc_byte_array(i_alloc_byte_array_immovable);
143
static gs_memory_proc_alloc_struct_array(i_alloc_struct_array);
144
static gs_memory_proc_alloc_struct_array(i_alloc_struct_array_immovable);
145
static gs_memory_proc_object_size(i_object_size);
146
static gs_memory_proc_object_type(i_object_type);
147
static gs_memory_proc_alloc_string(i_alloc_string);
148
static gs_memory_proc_alloc_string(i_alloc_string_immovable);
149
static gs_memory_proc_resize_string(i_resize_string);
150
static gs_memory_proc_free_string(i_free_string);
151
static gs_memory_proc_register_root(i_register_root);
152
static gs_memory_proc_unregister_root(i_unregister_root);
153
static gs_memory_proc_enable_free(i_enable_free);
154
static gs_memory_proc_set_object_type(i_set_object_type);
155
static gs_memory_proc_defer_frees(i_defer_frees);
156
157
/* We export the procedures for subclasses. */
158
const gs_memory_procs_t gs_ref_memory_procs =
159
{
160
    /* Raw memory procedures */
161
    i_alloc_bytes_immovable,
162
    i_resize_object,
163
    i_free_object,
164
    i_stable,
165
    i_status,
166
    i_free_all,
167
    i_consolidate_free,
168
    /* Object memory procedures */
169
    i_alloc_bytes,
170
    i_alloc_struct,
171
    i_alloc_struct_immovable,
172
    i_alloc_byte_array,
173
    i_alloc_byte_array_immovable,
174
    i_alloc_struct_array,
175
    i_alloc_struct_array_immovable,
176
    i_object_size,
177
    i_object_type,
178
    i_alloc_string,
179
    i_alloc_string_immovable,
180
    i_resize_string,
181
    i_free_string,
182
    i_register_root,
183
    i_unregister_root,
184
    i_enable_free,
185
    i_set_object_type,
186
    i_defer_frees
187
};
188
189
/*
190
 * Previous versions of this code used a simple linked list of
191
 * clumps. We change here to use a splay tree of clumps.
192
 * Splay Trees can be found documented in "Algorithms and Data
193
 * Structures" by Jeffrey H Kingston.
194
 *
195
 * Essentially they are binary trees, ordered by address of the
196
 * 'cbase' pointer. The 'cunning' feature with them is that
197
 * when a node in the tree is accessed, we do a 'move to root'
198
 * operation. This involves performing various 'rotations' as
199
 * we move up the tree, the net effect of which tends to
200
 * lead to more balanced trees (see Kingston for analysis).
201
 * It also leads to better locality of reference in that
202
 * recently accessed nodes stay near the root.
203
 */
204
205
/* #define DEBUG_CLUMPS */
206
#ifdef DEBUG_CLUMPS
207
#define SANITY_CHECK(cp) sanity_check(cp)
208
#define SANITY_CHECK_MID(cp) sanity_check_mid(cp)
209
210
static void
211
broken_splay()
212
{
213
    dlprintf("Broken splay tree!\n");
214
}
215
216
void sanity_check_rec(clump_t *cp)
217
{
218
    splay_dir_t from = SPLAY_FROM_ABOVE;
219
220
    while (cp)
221
    {
222
        if (from == SPLAY_FROM_ABOVE)
223
        {
224
            /* We have arrived from above. Step left. */
225
            if (cp->left)
226
            {
227
                if (cp->left->cbase > cp->cbase || cp->left->parent != cp)
228
                    broken_splay();
229
                cp = cp->left;
230
                from = SPLAY_FROM_ABOVE;
231
                continue;
232
            }
233
            /* No left to step to, so imagine we have just arrived from there */
234
            from = SPLAY_FROM_LEFT;
235
        }
236
        if (from == SPLAY_FROM_LEFT)
237
        {
238
            /* We have arrived from the left. Step right. */
239
            if (cp->right)
240
            {
241
                if (cp->right->cbase < cp->cbase || cp->right->parent != cp)
242
                    broken_splay();
243
                cp = cp->right;
244
                from = SPLAY_FROM_ABOVE;
245
                continue;
246
            }
247
            /* No right to step to, so imagine we have just arrived from there. */
248
            from = SPLAY_FROM_RIGHT;
249
        }
250
        if (from == SPLAY_FROM_RIGHT)
251
        {
252
            /* We have arrived from the right. Step up. */
253
            if (cp->parent == NULL)
254
                break;
255
            if (cp->parent->left != cp && cp->parent->right != cp)
256
                broken_splay();
257
            from = (cp->parent->left == cp ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT);
258
            cp = cp->parent;
259
        }
260
    }
261
}
262
263
void sanity_check(clump_t *cp)
264
{
265
    sanity_check_rec(cp);
266
}
267
268
void sanity_check_mid(clump_t *cp)
269
{
270
    clump_t *parent;
271
272
    while ((parent = cp->parent) != NULL)
273
    {
274
        if (parent->left == cp)
275
        {
276
            if (parent->right == cp)
277
                broken_splay();
278
        }
279
        else if (parent->right != cp)
280
            broken_splay();
281
        cp = parent;
282
    }
283
284
    sanity_check_rec(cp);
285
}
286
287
#else
288
25.7M
#define SANITY_CHECK(cp) while (0) {}
289
113M
#define SANITY_CHECK_MID(cp) while (0) {}
290
#endif
291
292
/* When initing with the root, we want to pass the smallest inorder one
293
 * back immediately, and set it up so that we step right for the next
294
 * one. */
295
clump_t *
296
clump_splay_walk_init(clump_splay_walker *sw, const gs_ref_memory_t *mem)
297
888k
{
298
888k
    clump_t *cp = mem->root;
299
300
888k
    if (cp)
301
888k
    {
302
888k
        SANITY_CHECK(cp);
303
304
888k
        sw->from = SPLAY_FROM_LEFT;
305
5.79M
        while (cp->left)
306
4.90M
        {
307
4.90M
            cp = cp->left;
308
4.90M
        }
309
888k
    }
310
888k
    sw->cp = cp;
311
888k
    sw->end = NULL;
312
888k
    return cp;
313
888k
}
314
315
clump_t *
316
clump_splay_walk_bwd_init(clump_splay_walker *sw, const gs_ref_memory_t *mem)
317
71.8k
{
318
71.8k
    clump_t *cp = mem->root;
319
320
71.8k
    if (cp)
321
71.8k
    {
322
71.8k
        SANITY_CHECK(cp);
323
324
71.8k
        sw->from = SPLAY_FROM_RIGHT;
325
218k
        while (cp->right)
326
146k
        {
327
146k
            cp = cp->right;
328
146k
        }
329
71.8k
    }
330
71.8k
    sw->cp = cp;
331
71.8k
    sw->end = NULL;
332
71.8k
    return cp;
333
71.8k
}
334
335
/* When initing 'mid walk' (i.e. with a non-root node), we want to
336
 * return the node we are given as the first one, and continue
337
 * onwards in an in order fashion.
338
 */
339
clump_t *
340
clump_splay_walk_init_mid(clump_splay_walker *sw, clump_t *cp)
341
101M
{
342
101M
    sw->from = SPLAY_FROM_LEFT;
343
101M
    sw->cp = cp;
344
101M
    sw->end = cp;
345
101M
    if (cp)
346
101M
    {
347
101M
        SANITY_CHECK_MID(cp);
348
101M
    }
349
101M
    return cp;
350
101M
}
351
352
clump_t *
353
clump_splay_walk_fwd(clump_splay_walker *sw)
354
74.7M
{
355
74.7M
    clump_t *cp = sw->cp;
356
74.7M
    int from = sw->from;
357
358
74.7M
    if (cp == NULL)
359
8
        return NULL;
360
361
    /* We step through the tree, and stop when we arrive
362
     * at sw->end in an in order manner (i.e. by moving from
363
     * the left). */
364
175M
    while (1)
365
175M
    {
366
175M
        if (from == SPLAY_FROM_ABOVE)
367
70.3M
        {
368
            /* We have arrived from above. Step left. */
369
70.3M
            if (cp->left)
370
37.9M
            {
371
37.9M
                cp = cp->left;
372
37.9M
                from = SPLAY_FROM_ABOVE;
373
37.9M
                continue;
374
37.9M
            }
375
            /* No left to step to, so imagine we have just arrived from there */
376
32.4M
            from = SPLAY_FROM_LEFT;
377
            /* Have we reached the stopping point? */
378
32.4M
            if (cp == sw->end)
379
141k
                cp = NULL;
380
            /* We want to stop here, for inorder operation. So break out of the loop. */
381
32.4M
            break;
382
70.3M
        }
383
105M
        if (from == SPLAY_FROM_LEFT)
384
74.7M
        {
385
            /* We have arrived from the left. Step right. */
386
74.7M
            if (cp->right)
387
31.3M
            {
388
31.3M
                cp = cp->right;
389
31.3M
                from = SPLAY_FROM_ABOVE;
390
31.3M
                continue;
391
31.3M
            }
392
            /* No right to step to, so imagine we have just arrived from there. */
393
43.3M
            from = SPLAY_FROM_RIGHT;
394
43.3M
        }
395
73.8M
        if (from == SPLAY_FROM_RIGHT)
396
73.8M
        {
397
            /* We have arrived from the right. Step up. */
398
73.8M
            clump_t *old = cp;
399
73.8M
            cp = cp->parent;
400
73.8M
            if (cp == NULL)
401
1.95M
            {
402
                /* We've reached the root of the tree. Is this our stopping point? */
403
1.95M
                if (sw->end == NULL)
404
887k
                    break;
405
                /* If not, step on. */
406
1.07M
                cp = old;
407
1.07M
                from = SPLAY_FROM_ABOVE;
408
1.07M
            }
409
71.8M
            else
410
71.8M
            {
411
71.8M
                from = (cp->left == old ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT);
412
71.8M
                if (from == SPLAY_FROM_LEFT)
413
41.3M
                {
414
                    /* Have we reached the stopping point? */
415
41.3M
                    if (cp == sw->end)
416
687k
                        cp = NULL;
417
41.3M
                    break;
418
41.3M
                }
419
71.8M
            }
420
73.8M
        }
421
73.8M
    }
422
74.7M
    sw->cp = cp;
423
74.7M
    sw->from = from;
424
74.7M
    return cp;
425
74.7M
}
426
427
clump_t *
428
clump_splay_walk_bwd(clump_splay_walker *sw)
429
1.67M
{
430
1.67M
    clump_t *cp = sw->cp;
431
1.67M
    int from = sw->from;
432
433
1.67M
    if (cp == NULL)
434
0
        return NULL;
435
436
    /* We step backwards through the tree, and stop when we arrive
437
     * at sw->end in a reverse in order manner (i.e. by moving from
438
     * the right). */
439
4.02M
    while (1)
440
4.02M
    {
441
4.02M
        if (from == SPLAY_FROM_ABOVE)
442
1.45M
        {
443
            /* We have arrived from above. Step right. */
444
1.45M
            if (cp->right)
445
551k
            {
446
551k
                cp = cp->right;
447
551k
                from = SPLAY_FROM_ABOVE;
448
551k
                continue;
449
551k
            }
450
            /* No right to step to, so imagine we have just arrived from there. */
451
902k
            from = SPLAY_FROM_RIGHT;
452
            /* Have we reached our end? */
453
902k
            if (cp == sw->end)
454
0
                cp = NULL;
455
            /* Stop to run inorder operation */
456
902k
            break;
457
1.45M
        }
458
2.57M
        if (from == SPLAY_FROM_RIGHT)
459
1.67M
        {
460
            /* We have arrived from the right. Step left. */
461
1.67M
            if (cp->left)
462
902k
            {
463
902k
                cp = cp->left;
464
902k
                from = SPLAY_FROM_ABOVE;
465
902k
                continue;
466
902k
            }
467
            /* No left to step to, so imagine we have just arrived from there. */
468
769k
            from = SPLAY_FROM_LEFT;
469
769k
        }
470
1.67M
        if (from == SPLAY_FROM_LEFT)
471
1.67M
        {
472
            /* We have arrived from the left. Step up. */
473
1.67M
            clump_t *old = cp;
474
1.67M
            cp = cp->parent;
475
1.67M
            from = (cp == NULL || cp->left != old ? SPLAY_FROM_RIGHT : SPLAY_FROM_LEFT);
476
1.67M
            if (from == SPLAY_FROM_RIGHT)
477
769k
            {
478
769k
                if (cp == sw->end)
479
71.8k
                    cp = NULL;
480
769k
                break;
481
769k
            }
482
1.67M
        }
483
1.67M
    }
484
1.67M
    sw->cp = cp;
485
1.67M
    sw->from = from;
486
1.67M
    return cp;
487
1.67M
}
488
489
static clump_t *
490
clump_splay_remove(clump_t *cp, gs_ref_memory_t *imem)
491
21.7M
{
492
21.7M
    clump_t *replacement;
493
494
21.7M
    if (cp->left == NULL)
495
3.27M
    {
496
        /* At most one child - easy */
497
3.27M
        replacement = cp->right;
498
3.27M
    }
499
18.4M
    else if (cp->right == NULL)
500
9.20M
    {
501
        /* Strictly one child - easy */
502
9.20M
        replacement = cp->left;
503
9.20M
    }
504
9.26M
    else
505
9.26M
    {
506
        /* 2 Children - tricky */
507
        /* Find in-order predecessor to f */
508
9.26M
        replacement = cp->left;
509
10.3M
        while (replacement->right)
510
1.10M
            replacement = replacement->right;
511
        /* Remove replacement - easy as just one child */
512
9.26M
        (void)clump_splay_remove(replacement, imem);
513
        /* Replace cp with replacement */
514
9.26M
        if (cp->left)
515
8.63M
            cp->left->parent = replacement;
516
9.26M
        cp->right->parent = replacement;
517
9.26M
        replacement->left = cp->left;
518
9.26M
        replacement->right = cp->right;
519
9.26M
    }
520
21.7M
    if (cp->parent)
521
10.9M
    {
522
10.9M
        if (cp->parent->left == cp)
523
9.47M
            cp->parent->left = replacement;
524
1.51M
        else
525
1.51M
            cp->parent->right = replacement;
526
10.9M
    }
527
10.7M
    else
528
10.7M
        imem->root = replacement;
529
21.7M
    if (replacement)
530
19.2M
        replacement->parent = cp->parent;
531
21.7M
    return replacement;
532
21.7M
}
533
534
/* Here we apply a function to all the nodes in a tree in
535
 * depth first order. This means that the given function
536
 * can safely alter: 1) the clump, 2) it's children,
537
 * 3) it's parents child pointer that points to it
538
 * without fear of corruption. Specifically this means
539
 * that the function can free (and unlink) the node
540
 * if it wants.
541
 */
542
clump_t *
543
clump_splay_app(clump_t *root, gs_ref_memory_t *imem, splay_app_result_t (*fn)(clump_t *, void *), void *arg)
544
279k
{
545
279k
    clump_t *step_to;
546
279k
    clump_t *cp = root;
547
279k
    int from = SPLAY_FROM_ABOVE;
548
279k
    splay_app_result_t res;
549
550
279k
    SANITY_CHECK(cp);
551
552
9.76M
    while (cp)
553
9.53M
    {
554
9.53M
        if (from == SPLAY_FROM_ABOVE)
555
4.90M
        {
556
            /* We have arrived from above. Step left. */
557
4.90M
            step_to = cp->left;
558
4.90M
            if (step_to)
559
2.56M
            {
560
2.56M
                from = SPLAY_FROM_ABOVE;
561
2.56M
                cp = step_to;
562
2.56M
            }
563
2.34M
            else
564
2.34M
            {
565
                /* No left to step to, so imagine we have just arrived from the left */
566
2.34M
                from = SPLAY_FROM_LEFT;
567
2.34M
            }
568
4.90M
        }
569
9.53M
        if (from == SPLAY_FROM_LEFT)
570
4.90M
        {
571
            /* We have arrived from the left. Step right. */
572
4.90M
            step_to = cp->right;
573
4.90M
            if (step_to)
574
2.06M
            {
575
2.06M
                from = SPLAY_FROM_ABOVE;
576
2.06M
                cp = step_to;
577
2.06M
            }
578
2.84M
            else
579
2.84M
            {
580
                /* No right to step to, so imagine we have just arrived from the right. */
581
2.84M
                from = SPLAY_FROM_RIGHT;
582
2.84M
            }
583
4.90M
        }
584
9.53M
        if (from == SPLAY_FROM_RIGHT)
585
4.90M
        {
586
            /* We have arrived from the right. Step up. */
587
4.90M
            step_to = cp->parent;
588
4.90M
            if (step_to)
589
4.62M
            {
590
4.62M
                from = (step_to->left == cp ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT);
591
4.62M
            }
592
4.90M
            res = fn(cp, arg);
593
4.90M
            if (res & SPLAY_APP_STOP)
594
46.0k
                return cp;
595
4.86M
            cp = step_to;
596
4.86M
        }
597
9.53M
    }
598
233k
    return cp;
599
279k
}
600
601
/* Move the given node to the root of the tree, by
602
 * performing a series of the following rotations.
603
 * The key observation here is that all these
604
 * rotations preserve the ordering of the tree, and
605
 * result in 'x' getting higher.
606
 *
607
 * Case 1:   z          x           Case 1b:   z                   x
608
 *          # #        # #                    # #                 # #
609
 *         y   D      A   y                  A   y               y   D
610
 *        # #     =>     # #                    # #     =>      # #
611
 *       x   C          B   z                  B   x           z   C
612
 *      # #                # #                    # #         # #
613
 *     A   B              C   D                  C   D       A   B
614
 *
615
 * Case 2:   z             x        Case 2b:   z                  x
616
 *          # #          ## ##                # #               ## ##
617
 *         y   D        y     z              A   y             z     y
618
 *        # #     =>   # #   # #                # #     =>    # #   # #
619
 *       A   x        A   B C   D              x   D         A   B C   D
620
 *          # #                               # #
621
 *         B   C                             B   C
622
 *
623
 * Case 3:   y          x           Case 3b:  y                  x
624
 *          # #        # #                   # #                # #
625
 *         x   C  =>  A   y                 A   x       =>     y   C
626
 *        # #            # #                   # #            # #
627
 *       A   B          B   C                 B   C          A   B
628
 */
629
static void
630
splay_move_to_root(clump_t *x, gs_ref_memory_t *mem)
631
70.3M
{
632
70.3M
    clump_t *y, *z;
633
634
70.3M
    if (x == NULL)
635
0
        return;
636
637
144M
    while ((y = x->parent) != NULL) {
638
74.0M
        if ((z = y->parent) != NULL) {
639
38.9M
            x->parent = z->parent;
640
38.9M
            if (x->parent) {
641
13.8M
                if (x->parent->left == z)
642
7.11M
                    x->parent->left = x;
643
6.76M
                else
644
6.76M
                    x->parent->right  = x;
645
13.8M
            }
646
38.9M
            y->parent = x;
647
            /* Case 1, 1b, 2 or 2b */
648
38.9M
            if (y->left == x) {
649
                /* Case 1 or 2b */
650
24.8M
                if (z->left == y) {
651
                    /* Case 1 */
652
12.3M
                    y->left = x->right;
653
12.3M
                    if (y->left)
654
9.43M
                        y->left->parent = y;
655
12.3M
                    z->left = y->right;
656
12.3M
                    if (z->left)
657
8.96M
                        z->left->parent = z;
658
12.3M
                    y->right = z;
659
12.3M
                    z->parent = y;
660
12.4M
                } else {
661
                    /* Case 2b */
662
12.4M
                    z->right = x->left;
663
12.4M
                    if (z->right)
664
2.30M
                        z->right->parent = z;
665
12.4M
                    y->left = x->right;
666
12.4M
                    if (y->left)
667
2.69M
                        y->left->parent = y;
668
12.4M
                    x->left = z;
669
12.4M
                    z->parent = x;
670
12.4M
                }
671
24.8M
                x->right      = y;
672
24.8M
            } else {
673
                /* Case 2 or 1b */
674
14.0M
                if (z->left == y) {
675
                    /* Case 2 */
676
3.68M
                    y->right = x->left;
677
3.68M
                    if (y->right)
678
2.35M
                        y->right->parent = y;
679
3.68M
                    z->left = x->right;
680
3.68M
                    if (z->left)
681
2.37M
                        z->left->parent = z;
682
3.68M
                    x->right = z;
683
3.68M
                    z->parent = x;
684
10.3M
                } else {
685
                    /* Case 1b */
686
10.3M
                    z->right = y->left;
687
10.3M
                    if (z->right)
688
8.00M
                        z->right->parent = z;
689
10.3M
                    y->right = x->left;
690
10.3M
                    if (y->right)
691
7.98M
                        y->right->parent = y;
692
10.3M
                    y->left = z;
693
10.3M
                    z->parent = y;
694
10.3M
                }
695
14.0M
                x->left = y;
696
14.0M
            }
697
38.9M
        } else {
698
            /* Case 3 or 3b */
699
35.1M
            x->parent = NULL;
700
35.1M
            y->parent = x;
701
35.1M
            if (y->left == x) {
702
                /* Case 3 */
703
16.6M
                y->left = x->right;
704
16.6M
                if (y->left)
705
13.4M
                    y->left->parent = y;
706
16.6M
                x->right = y;
707
18.5M
            } else {
708
                /* Case 3b */
709
18.5M
                y->right = x->left;
710
18.5M
                if (y->right)
711
14.0M
                    y->right->parent = y;
712
18.5M
                x->left = y;
713
18.5M
            }
714
35.1M
        }
715
74.0M
    }
716
70.3M
    mem->root = x;
717
70.3M
}
718
719
static void
720
splay_insert(clump_t *cp, gs_ref_memory_t *mem)
721
12.4M
{
722
12.4M
    clump_t *node = NULL;
723
12.4M
    clump_t **root = &mem->root;
724
725
41.6M
    while (*root) {
726
29.2M
        node = *root;
727
29.2M
        if (PTR_LT(cp->cbase, node->cbase)) {
728
14.7M
            root = &node->left;
729
14.7M
        } else {
730
14.4M
            root = &node->right;
731
14.4M
        }
732
29.2M
    }
733
12.4M
    *root = cp;
734
12.4M
    cp->left = NULL;
735
12.4M
    cp->right = NULL;
736
12.4M
    cp->parent = node;
737
12.4M
    splay_move_to_root(cp, mem);
738
12.4M
}
739
740
/*
741
 * Allocate and mostly initialize the state of an allocator (system, global,
742
 * or local).  Does not initialize global or space.
743
 */
744
static void *ialloc_solo(gs_memory_t *, gs_memory_type_ptr_t,
745
                          clump_t **);
746
gs_ref_memory_t *
747
ialloc_alloc_state(gs_memory_t * parent, uint clump_size)
748
46.0k
{
749
46.0k
    clump_t *cp;
750
46.0k
    gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp);
751
752
46.0k
    if (iimem == 0)
753
0
        return 0;
754
46.0k
    iimem->stable_memory = (gs_memory_t *)iimem;
755
46.0k
    iimem->procs = gs_ref_memory_procs;
756
46.0k
    iimem->gs_lib_ctx = parent->gs_lib_ctx;
757
46.0k
    iimem->non_gc_memory = parent;
758
46.0k
    iimem->thread_safe_memory = parent->thread_safe_memory;
759
46.0k
    iimem->clump_size = clump_size;
760
#if defined(MEMENTO) || defined(SINGLE_OBJECT_MEMORY_BLOCKS_ONLY)
761
    iimem->large_size = 1;
762
#else
763
46.0k
    iimem->large_size = ((clump_size / 4) & -obj_align_mod) + 1;
764
46.0k
#endif
765
46.0k
    iimem->is_controlled = false;
766
46.0k
    iimem->gc_status.vm_threshold = clump_size * 3L;
767
46.0k
    iimem->gc_status.max_vm = MAX_MAX_VM;
768
46.0k
    iimem->gc_status.signal_value = 0;
769
46.0k
    iimem->gc_status.enabled = false;
770
46.0k
    iimem->gc_status.requested = 0;
771
46.0k
    iimem->gc_allocated = 0;
772
46.0k
    iimem->previous_status.allocated = 0;
773
46.0k
    iimem->previous_status.used = 0;
774
46.0k
    ialloc_reset(iimem);
775
46.0k
    iimem->root = cp;
776
46.0k
    ialloc_set_limit(iimem);
777
46.0k
    iimem->cc = NULL;
778
46.0k
    iimem->save_level = 0;
779
46.0k
    iimem->new_mask = 0;
780
46.0k
    iimem->test_mask = ~0;
781
46.0k
    iimem->streams = 0;
782
46.0k
    iimem->names_array = 0;
783
46.0k
    iimem->roots = 0;
784
46.0k
    iimem->num_contexts = 0;
785
46.0k
    iimem->saved = 0;
786
46.0k
    return iimem;
787
46.0k
}
788
789
/* Allocate a 'solo' object with its own clump. */
790
static void *
791
ialloc_solo(gs_memory_t * parent, gs_memory_type_ptr_t pstype,
792
            clump_t ** pcp)
793
46.0k
{ /*
794
         * We can't assume that the parent uses the same object header
795
         * that we do, but the GC requires that allocators have
796
         * such a header.  Therefore, we prepend one explicitly.
797
         */
798
46.0k
    clump_t *cp =
799
46.0k
        gs_raw_alloc_struct_immovable(parent, &st_clump,
800
46.0k
                                      "ialloc_solo(clump)");
801
46.0k
    uint csize =
802
46.0k
        ROUND_UP(sizeof(clump_head_t) + sizeof(obj_header_t) +
803
46.0k
                 pstype->ssize,
804
46.0k
                 obj_align_mod);
805
46.0k
    byte *cdata = gs_alloc_bytes_immovable(parent, csize, "ialloc_solo");
806
46.0k
    obj_header_t *obj = (obj_header_t *) (cdata + sizeof(clump_head_t));
807
808
46.0k
    if (cp == 0 || cdata == 0) {
809
0
        gs_free_object(parent, cp, "ialloc_solo(allocation failure)");
810
0
        gs_free_object(parent, cdata, "ialloc_solo(allocation failure)");
811
0
        return 0;
812
0
    }
813
46.0k
    alloc_init_clump(cp, cdata, cdata + csize, false, (clump_t *) NULL);
814
46.0k
    cp->cbot = cp->ctop;
815
46.0k
    cp->parent = cp->left = cp->right = 0;
816
46.0k
    cp->c_alone = true;
817
    /* Construct the object header "by hand". */
818
46.0k
    obj->o_pad = 0;
819
46.0k
    obj->o_alone = 1;
820
46.0k
    obj->o_size = pstype->ssize;
821
46.0k
    obj->o_type = pstype;
822
46.0k
    *pcp = cp;
823
46.0k
    return (void *)(obj + 1);
824
46.0k
}
825
826
void
827
ialloc_free_state(gs_ref_memory_t *iimem)
828
0
{
829
0
    clump_t *cp;
830
0
    gs_memory_t *mem;
831
0
    if (iimem == NULL)
832
0
        return;
833
0
    cp = iimem->root;
834
0
    mem = iimem->non_gc_memory;
835
0
    if (cp == NULL)
836
0
        return;
837
0
    gs_free_object(mem, cp->chead, "ialloc_solo(allocation failure)");
838
0
    gs_free_object(mem, cp, "ialloc_solo(allocation failure)");
839
0
}
840
841
/*
842
 * Add a clump to an externally controlled allocator.  Such allocators
843
 * allocate all objects as immovable, are not garbage-collected, and
844
 * don't attempt to acquire additional memory on their own.
845
 */
846
int
847
ialloc_add_clump(gs_ref_memory_t *imem, ulong space, client_name_t cname)
848
0
{
849
0
    clump_t *cp;
850
851
    /* Allow acquisition of this clump. */
852
0
    imem->is_controlled = false;
853
0
    imem->large_size = imem->clump_size;
854
0
    imem->limit = imem->gc_status.max_vm = MAX_MAX_VM;
855
856
    /* Acquire the clump. */
857
0
    cp = alloc_add_clump(imem, space, cname);
858
859
    /*
860
     * Make all allocations immovable.  Since the "movable" allocators
861
     * allocate within existing clumps, whereas the "immovable" ones
862
     * allocate in new clumps, we equate the latter to the former, even
863
     * though this seems backwards.
864
     */
865
0
    imem->procs.alloc_bytes_immovable = imem->procs.alloc_bytes;
866
0
    imem->procs.alloc_struct_immovable = imem->procs.alloc_struct;
867
0
    imem->procs.alloc_byte_array_immovable = imem->procs.alloc_byte_array;
868
0
    imem->procs.alloc_struct_array_immovable = imem->procs.alloc_struct_array;
869
0
    imem->procs.alloc_string_immovable = imem->procs.alloc_string;
870
871
    /* Disable acquisition of additional clumps. */
872
0
    imem->is_controlled = true;
873
0
    imem->limit = 0;
874
875
0
    return (cp ? 0 : gs_note_error(gs_error_VMerror));
876
0
}
877
878
/* Prepare for a GC by clearing the stream list. */
879
/* This probably belongs somewhere else.... */
880
void
881
ialloc_gc_prepare(gs_ref_memory_t * mem)
882
115k
{ /*
883
         * We have to unlink every stream from its neighbors,
884
         * so that referenced streams don't keep all streams around.
885
         */
886
115k
    while (mem->streams != 0) {
887
0
        stream *s = mem->streams;
888
889
0
        mem->streams = s->next;
890
0
        s->prev = s->next = 0;
891
0
    }
892
115k
}
893
894
/* Initialize after a save. */
895
void
896
ialloc_reset(gs_ref_memory_t * mem)
897
71.8k
{
898
71.8k
    mem->root = 0;
899
71.8k
    mem->cc = NULL;
900
71.8k
    mem->allocated = 0;
901
71.8k
    mem->changes = 0;
902
71.8k
    mem->scan_limit = 0;
903
71.8k
    mem->total_scanned = 0;
904
71.8k
    mem->total_scanned_after_compacting = 0;
905
71.8k
    ialloc_reset_free(mem);
906
71.8k
}
907
908
/* Initialize after a save or GC. */
909
void
910
ialloc_reset_free(gs_ref_memory_t * mem)
911
187k
{
912
187k
    int i;
913
187k
    obj_header_t **p;
914
915
187k
    mem->lost.objects = 0;
916
187k
    mem->lost.refs = 0;
917
187k
    mem->lost.strings = 0;
918
187k
    mem->cfreed.cp = 0;
919
19.3M
    for (i = 0, p = &mem->freelists[0]; i < num_freelists; i++, p++)
920
19.1M
        *p = 0;
921
187k
    mem->largest_free_size = 0;
922
187k
}
923
924
/*
925
 * Set an arbitrary limit so that the amount of allocated VM does not grow
926
 * indefinitely even when GC is disabled.  Benchmarks have shown that
927
 * the resulting GC's are infrequent enough not to degrade performance
928
 * significantly.
929
 */
930
#define FORCE_GC_LIMIT 8000000
931
932
/* Set the allocation limit after a change in one or more of */
933
/* vm_threshold, max_vm, or enabled, or after a GC. */
934
void
935
ialloc_set_limit(register gs_ref_memory_t * mem)
936
67.9M
{ /*
937
         * The following code is intended to set the limit so that
938
         * we stop allocating when allocated + previous_status.allocated
939
         * exceeds the lesser of max_vm or (if GC is enabled)
940
         * gc_allocated + vm_threshold.
941
         */
942
67.9M
    size_t max_allocated =
943
67.9M
    (mem->gc_status.max_vm > mem->previous_status.allocated ?
944
67.9M
     mem->gc_status.max_vm - mem->previous_status.allocated :
945
67.9M
     0);
946
947
67.9M
    if (mem->gc_status.enabled) {
948
3.36M
        size_t limit = mem->gc_allocated + mem->gc_status.vm_threshold;
949
950
3.36M
        if (limit < mem->previous_status.allocated)
951
48
            mem->limit = 0;
952
3.36M
        else {
953
3.36M
            limit -= mem->previous_status.allocated;
954
3.36M
            mem->limit = min(limit, max_allocated);
955
3.36M
        }
956
3.36M
    } else
957
64.5M
        mem->limit = min(max_allocated, mem->gc_allocated + FORCE_GC_LIMIT);
958
67.9M
    if_debug7m('0', (const gs_memory_t *)mem,
959
67.9M
               "[0]space=%d, max_vm=%"PRIdSIZE", prev.alloc=%"PRIdSIZE", enabled=%d, "
960
67.9M
               "gc_alloc=%"PRIdSIZE", threshold=%"PRIdSIZE" => limit=%"PRIdSIZE"\n",
961
67.9M
               mem->space, mem->gc_status.max_vm,
962
67.9M
               mem->previous_status.allocated,
963
67.9M
               mem->gc_status.enabled, mem->gc_allocated,
964
67.9M
               mem->gc_status.vm_threshold, mem->limit);
965
67.9M
}
966
967
struct free_data
968
{
969
    gs_ref_memory_t *imem;
970
    clump_t         *allocator;
971
};
972
973
static splay_app_result_t
974
free_all_not_allocator(clump_t *cp, void *arg)
975
1.71M
{
976
1.71M
    struct free_data *fd = (struct free_data *)arg;
977
978
1.71M
    if (cp->cbase + sizeof(obj_header_t) != (byte *)fd->imem)
979
1.62M
        alloc_free_clump(cp, fd->imem);
980
92.1k
    else
981
92.1k
        fd->allocator = cp;
982
983
1.71M
    return SPLAY_APP_CONTINUE;
984
1.71M
}
985
986
static splay_app_result_t
987
free_all_allocator(clump_t *cp, void *arg)
988
46.0k
{
989
46.0k
    struct free_data *fd = (struct free_data *)arg;
990
991
46.0k
    if (cp->cbase + sizeof(obj_header_t) != (byte *)fd->imem)
992
0
        return SPLAY_APP_CONTINUE;
993
994
46.0k
    fd->allocator = cp;
995
46.0k
    alloc_free_clump(cp, fd->imem);
996
46.0k
    return SPLAY_APP_STOP;
997
46.0k
}
998
999
/*
1000
 * Free all the memory owned by the allocator, except the allocator itself.
1001
 * Note that this only frees memory at the current save level: the client
1002
 * is responsible for restoring to the outermost level if desired.
1003
 */
1004
static void
1005
i_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
1006
117k
{
1007
117k
    gs_ref_memory_t * imem = (gs_ref_memory_t *)mem;
1008
117k
    struct free_data fd;
1009
1010
117k
    fd.imem = imem;
1011
117k
    fd.allocator = NULL;
1012
1013
117k
    if (free_mask & FREE_ALL_DATA && imem->root != NULL) {
1014
        /* Free every clump except the allocator */
1015
117k
        clump_splay_app(imem->root, imem, free_all_not_allocator, &fd);
1016
1017
        /* Reinstate the allocator as the sole clump */
1018
117k
        imem->root = fd.allocator;
1019
117k
        if (fd.allocator)
1020
92.1k
            fd.allocator->parent = fd.allocator->left = fd.allocator->right = NULL;
1021
117k
    }
1022
117k
    if (free_mask & FREE_ALL_ALLOCATOR) {
1023
        /* Walk the tree to find the allocator. */
1024
46.0k
        clump_splay_app(imem->root, imem, free_all_allocator, &fd);
1025
46.0k
    }
1026
117k
}
1027
1028
/* ================ Accessors ================ */
1029
1030
/* Get the size of an object from the header. */
1031
static size_t
1032
i_object_size(gs_memory_t * mem, const void /*obj_header_t */ *obj)
1033
1.33M
{
1034
1.33M
    return pre_obj_contents_size((const obj_header_t *)obj - 1);
1035
1.33M
}
1036
1037
/* Get the type of a structure from the header. */
1038
static gs_memory_type_ptr_t
1039
i_object_type(const gs_memory_t * mem, const void /*obj_header_t */ *obj)
1040
1.18M
{
1041
1.18M
    return ((const obj_header_t *)obj - 1)->o_type;
1042
1.18M
}
1043
1044
/* Get the GC status of a memory. */
1045
void
1046
gs_memory_gc_status(const gs_ref_memory_t * mem, gs_memory_gc_status_t * pstat)
1047
68.0M
{
1048
68.0M
    *pstat = mem->gc_status;
1049
68.0M
}
1050
1051
/* Set the GC status of a memory. */
1052
void
1053
gs_memory_set_gc_status(gs_ref_memory_t * mem, const gs_memory_gc_status_t * pstat)
1054
67.8M
{
1055
67.8M
    mem->gc_status = *pstat;
1056
67.8M
    ialloc_set_limit(mem);
1057
67.8M
}
1058
1059
/* Set VM threshold. Value passed as int64_t since it is signed */
1060
void
1061
gs_memory_set_vm_threshold(gs_ref_memory_t * mem, int64_t val)
1062
140k
{
1063
140k
    gs_memory_gc_status_t stat;
1064
140k
    gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
1065
1066
140k
    if (val < MIN_VM_THRESHOLD)
1067
0
        val = MIN_VM_THRESHOLD;
1068
140k
    else if (val > MAX_VM_THRESHOLD)
1069
0
        val = MAX_VM_THRESHOLD;
1070
140k
    gs_memory_gc_status(mem, &stat);
1071
140k
    stat.vm_threshold = val;
1072
140k
    gs_memory_set_gc_status(mem, &stat);
1073
140k
    gs_memory_gc_status(stable, &stat);
1074
140k
    stat.vm_threshold = val;
1075
140k
    gs_memory_set_gc_status(stable, &stat);
1076
140k
}
1077
1078
/* Set VM reclaim. */
1079
void
1080
gs_memory_set_vm_reclaim(gs_ref_memory_t * mem, bool enabled)
1081
140k
{
1082
140k
    gs_memory_gc_status_t stat;
1083
140k
    gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
1084
1085
140k
    gs_memory_gc_status(mem, &stat);
1086
140k
    stat.enabled = enabled;
1087
140k
    gs_memory_set_gc_status(mem, &stat);
1088
140k
    gs_memory_gc_status(stable, &stat);
1089
140k
    stat.enabled = enabled;
1090
140k
    gs_memory_set_gc_status(stable, &stat);
1091
140k
}
1092
1093
/* ================ Objects ================ */
1094
1095
/* Allocate a small object quickly if possible. */
1096
/* The size must be substantially less than max_uint. */
1097
/* ptr must be declared as obj_header_t *. */
1098
/* pfl must be declared as obj_header_t **. */
1099
#define IF_FREELIST_ALLOC(ptr, imem, size, pstype, pfl)\
1100
58.7M
        if ( size <= max_freelist_size &&\
1101
58.7M
             *(pfl = &imem->freelists[(size + obj_align_mask) >> log2_obj_align_mod]) != 0\
1102
58.7M
           )\
1103
58.7M
        { ptr = *pfl;\
1104
24.0M
                *pfl = *(obj_header_t **)ptr;\
1105
24.0M
                ptr[-1].o_size = (obj_size_t)size;\
1106
24.0M
                ptr[-1].o_type = pstype;\
1107
24.0M
                /* If debugging, clear the block in an attempt to */\
1108
24.0M
                /* track down uninitialized data errors. */\
1109
58.7M
                gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
1110
#define ELSEIF_BIG_FREELIST_ALLOC(ptr, imem, size, pstype)\
1111
24.0M
        }\
1112
58.7M
        else if (size > max_freelist_size &&\
1113
34.7M
                 (ptr = large_freelist_alloc(imem, size)) != 0)\
1114
34.7M
        { ptr[-1].o_type = pstype;\
1115
2.62M
                /* If debugging, clear the block in an attempt to */\
1116
2.62M
                /* track down uninitialized data errors. */\
1117
58.7M
                gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
1118
#define ELSEIF_LIFO_ALLOC(ptr, imem, size, pstype)\
1119
2.62M
        }\
1120
34.7M
        else if ( imem->cc && !imem->cc->c_alone && \
1121
32.0M
                (imem->cc->ctop - (byte *)(ptr = (obj_header_t *)imem->cc->cbot))\
1122
32.0M
                >= size + (obj_align_mod + sizeof(obj_header_t) * 2) &&\
1123
32.0M
             size < imem->large_size\
1124
32.0M
           )\
1125
32.0M
        { imem->cc->cbot = (byte *)ptr + obj_size_round(size);\
1126
30.3M
                ptr->o_pad = 0;\
1127
30.3M
                ptr->o_alone = 0;\
1128
30.3M
                ptr->o_size = (obj_size_t)size;\
1129
30.3M
                ptr->o_type = pstype;\
1130
30.3M
                ptr++;\
1131
30.3M
                /* If debugging, clear the block in an attempt to */\
1132
30.3M
                /* track down uninitialized data errors. */\
1133
34.7M
                gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
1134
#define ELSE_ALLOC\
1135
30.3M
        }\
1136
        else
1137
1138
static byte *
1139
i_alloc_bytes(gs_memory_t * mem, size_t ssize, client_name_t cname)
1140
8.57M
{
1141
8.57M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1142
8.57M
    obj_header_t *obj;
1143
8.57M
    obj_header_t **pfl;
1144
8.57M
    obj_size_t size = (obj_size_t)ssize;
1145
1146
#ifdef MEMENTO
1147
    if (Memento_failThisEvent())
1148
        return NULL;
1149
#endif
1150
1151
8.57M
    if ((size_t)size != ssize)
1152
0
        return NULL;
1153
1154
8.57M
    IF_FREELIST_ALLOC(obj, imem, size, &st_bytes, pfl)
1155
1.04M
        alloc_trace(":+bf", imem, cname, NULL, size, obj);
1156
1.04M
    ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, &st_bytes)
1157
337k
        alloc_trace(":+bF", imem, cname, NULL, size, obj);
1158
5.96M
    ELSEIF_LIFO_ALLOC(obj, imem, (uint)size, &st_bytes)
1159
5.96M
        alloc_trace(":+b ", imem, cname, NULL, size, obj);
1160
5.96M
    ELSE_ALLOC
1161
1.22M
    {
1162
1.22M
        obj = alloc_obj(imem, size, &st_bytes, 0, cname);
1163
1.22M
        if (obj == 0)
1164
0
            return 0;
1165
1.22M
        alloc_trace(":+b.", imem, cname, NULL, size, obj);
1166
1.22M
    }
1167
#if IGC_PTR_STABILITY_CHECK
1168
        obj[-1].d.o.space_id = imem->space_id;
1169
#endif
1170
8.57M
    return (byte *)Memento_label(obj, cname);
1171
8.57M
}
1172
static byte *
1173
i_alloc_bytes_immovable(gs_memory_t * mem, size_t ssize, client_name_t cname)
1174
484k
{
1175
484k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1176
484k
    obj_header_t *obj;
1177
484k
    obj_size_t size = (obj_size_t)ssize;
1178
1179
#ifdef MEMENTO
1180
    if (Memento_failThisEvent())
1181
        return NULL;
1182
#endif
1183
1184
484k
    if ((size_t)size != ssize)
1185
0
        return NULL;
1186
1187
484k
    obj = alloc_obj(imem, size, &st_bytes,
1188
484k
                    ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
1189
484k
    if (obj == 0)
1190
0
        return 0;
1191
484k
    alloc_trace("|+b.", imem, cname, NULL, size, obj);
1192
484k
    return (byte *)Memento_label(obj, cname);
1193
484k
}
1194
static void *
1195
i_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
1196
               client_name_t cname)
1197
50.1M
{
1198
50.1M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1199
50.1M
    obj_size_t size = pstype->ssize;
1200
50.1M
    obj_header_t *obj;
1201
50.1M
    obj_header_t **pfl;
1202
1203
#ifdef MEMENTO
1204
    if (Memento_failThisEvent())
1205
        return NULL;
1206
#endif
1207
1208
50.1M
    ALLOC_CHECK_SIZE(mem,pstype);
1209
50.1M
    IF_FREELIST_ALLOC(obj, imem, size, pstype, pfl)
1210
22.9M
        alloc_trace(":+<f", imem, cname, pstype, size, obj);
1211
22.9M
    ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, pstype)
1212
2.29M
        alloc_trace(":+<F", imem, cname, pstype, size, obj);
1213
24.4M
    ELSEIF_LIFO_ALLOC(obj, imem, size, pstype)
1214
24.4M
        alloc_trace(":+< ", imem, cname, pstype, size, obj);
1215
24.4M
    ELSE_ALLOC
1216
467k
    {
1217
467k
        obj = alloc_obj(imem, size, pstype, 0, cname);
1218
467k
        if (obj == 0)
1219
0
            return 0;
1220
467k
        alloc_trace(":+<.", imem, cname, pstype, size, obj);
1221
467k
    }
1222
#if IGC_PTR_STABILITY_CHECK
1223
        obj[-1].d.o.space_id = imem->space_id;
1224
#endif
1225
50.1M
    return Memento_label(obj, cname);
1226
50.1M
}
1227
static void *
1228
i_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
1229
                         client_name_t cname)
1230
8.66M
{
1231
8.66M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1232
8.66M
    obj_size_t size = pstype->ssize;
1233
8.66M
    obj_header_t *obj;
1234
1235
#ifdef MEMENTO
1236
    if (Memento_failThisEvent())
1237
        return NULL;
1238
#endif
1239
1240
8.66M
    ALLOC_CHECK_SIZE(mem,pstype);
1241
8.66M
    obj = alloc_obj(imem, size, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
1242
8.66M
    alloc_trace("|+<.", imem, cname, pstype, size, obj);
1243
8.66M
    return Memento_label(obj, cname);
1244
8.66M
}
1245
1246
static inline bool
1247
alloc_array_check_size(size_t num_elements, size_t elt_size, size_t *lsize)
1248
14.5M
{
1249
14.5M
    int shift0, shift1;
1250
14.5M
    size_t m, n;
1251
1252
    /* Avoid the loops in the overwhelming number of cases. */
1253
14.5M
    if ((num_elements | elt_size) >= 65536) {
1254
        /* Slightly conservative, but it'll work for our purposes. */
1255
        /* m is the maximum unsigned value representable in shift0 bits */
1256
34.0k
        for (m=0, shift0 = 0; m < num_elements; m = (m<<1)+1, shift0++);
1257
        /* n is the maximum unsigned value representable in shift1 bits */
1258
8.85k
        for (n=0, shift1 = 0; n < elt_size; n = (n<<1)+1, shift1++);
1259
        /* An shift0 bit unsigned number multiplied by an shift1 bit
1260
         * unsigned number is guaranteed to fit in n+m-1 bits. */
1261
1.47k
        if (shift0+shift1-1 > 8*sizeof(size_t))
1262
0
            return false; /* Overflow */
1263
1.47k
    }
1264
1265
14.5M
    *lsize = num_elements * elt_size;
1266
14.5M
    return true;
1267
14.5M
}
1268
1269
static byte *
1270
i_alloc_byte_array(gs_memory_t * mem, size_t num_elements, size_t elt_size,
1271
                   client_name_t cname)
1272
1.15M
{
1273
1.15M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1274
1.15M
    obj_header_t *obj;
1275
1.15M
    size_t slsize;
1276
1.15M
    obj_size_t lsize;
1277
#ifdef MEMENTO
1278
    if (Memento_failThisEvent())
1279
        return NULL;
1280
#endif
1281
1.15M
    if (alloc_array_check_size(num_elements, elt_size, &slsize) == false)
1282
0
        return NULL;
1283
1.15M
    lsize = (obj_size_t)slsize;
1284
1.15M
    if ((size_t)lsize != slsize)
1285
0
        return NULL;
1286
1.15M
    obj = alloc_obj(imem, lsize,
1287
1.15M
                    &st_bytes, ALLOC_DIRECT, cname);
1288
1289
1.15M
    if_debug6m('A', mem, "[a%d:+b.]%s -bytes-*(%"PRIuSIZE"=%"PRIuSIZE"*%"PRIuSIZE") = "PRI_INTPTR"\n",
1290
1.15M
               alloc_trace_space(imem), client_name_string(cname),
1291
1.15M
               num_elements * elt_size,
1292
1.15M
               num_elements, elt_size, (intptr_t)obj);
1293
1.15M
    return (byte *)Memento_label(obj, cname);
1294
1.15M
}
1295
static byte *
1296
i_alloc_byte_array_immovable(gs_memory_t * mem, size_t num_elements,
1297
                             size_t elt_size, client_name_t cname)
1298
0
{
1299
0
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1300
0
    obj_header_t *obj;
1301
0
    size_t slsize;
1302
0
    obj_size_t lsize;
1303
#ifdef MEMENTO
1304
    if (Memento_failThisEvent())
1305
        return NULL;
1306
#endif
1307
0
    if (alloc_array_check_size(num_elements, elt_size, &slsize) == false)
1308
0
        return NULL;
1309
0
    lsize = (obj_size_t)slsize;
1310
0
    if ((size_t)lsize != slsize)
1311
0
        return NULL;
1312
0
    obj = alloc_obj(imem, lsize,
1313
0
                    &st_bytes, ALLOC_IMMOVABLE | ALLOC_DIRECT,
1314
0
                    cname);
1315
1316
0
    if_debug6m('A', mem, "[a%d|+b.]%s -bytes-*(%"PRIuSIZE"=%"PRIuSIZE"*%"PRIuSIZE") = "PRI_INTPTR"\n",
1317
0
               alloc_trace_space(imem), client_name_string(cname),
1318
0
               num_elements * elt_size,
1319
0
               num_elements, elt_size, (intptr_t)obj);
1320
0
    return (byte *)Memento_label(obj, cname);
1321
0
}
1322
static void *
1323
i_alloc_struct_array(gs_memory_t * mem, size_t num_elements,
1324
                     gs_memory_type_ptr_t pstype, client_name_t cname)
1325
13.4M
{
1326
13.4M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1327
13.4M
    obj_header_t *obj;
1328
13.4M
    size_t slsize;
1329
13.4M
    obj_size_t lsize;
1330
#ifdef MEMENTO
1331
    if (Memento_failThisEvent())
1332
        return NULL;
1333
#endif
1334
1335
13.4M
    ALLOC_CHECK_SIZE(mem,pstype);
1336
#ifdef DEBUG
1337
    if (pstype->enum_ptrs == basic_enum_ptrs) {
1338
        dmprintf2(mem, "  i_alloc_struct_array: called with incorrect structure type (not element), struct='%s', client='%s'\n",
1339
                pstype->sname, cname);
1340
        return NULL;    /* fail */
1341
    }
1342
#endif
1343
13.4M
    if (alloc_array_check_size(num_elements, pstype->ssize, &slsize) == false)
1344
0
        return NULL;
1345
13.4M
    lsize = (obj_size_t)slsize;
1346
13.4M
    if ((size_t)lsize != slsize)
1347
3
        return NULL;
1348
13.4M
    obj = alloc_obj(imem, lsize, pstype, ALLOC_DIRECT, cname);
1349
13.4M
    if_debug7m('A', mem, "[a%d:+<.]%s %s*(%"PRIuSIZE"=%"PRIuSIZE"*%u) = "PRI_INTPTR"\n",
1350
13.4M
               alloc_trace_space(imem), client_name_string(cname),
1351
13.4M
               struct_type_name_string(pstype),
1352
13.4M
               num_elements * pstype->ssize,
1353
13.4M
               num_elements, pstype->ssize, (intptr_t)obj);
1354
13.4M
    return (char *)Memento_label(obj, cname);
1355
13.4M
}
1356
static void *
1357
i_alloc_struct_array_immovable(gs_memory_t * mem, size_t num_elements,
1358
                               gs_memory_type_ptr_t pstype, client_name_t cname)
1359
9.21k
{
1360
9.21k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1361
9.21k
    obj_header_t *obj;
1362
9.21k
    size_t slsize;
1363
9.21k
    obj_size_t lsize;
1364
#ifdef MEMENTO
1365
    if (Memento_failThisEvent())
1366
        return NULL;
1367
#endif
1368
1369
9.21k
    ALLOC_CHECK_SIZE(mem,pstype);
1370
9.21k
    if (alloc_array_check_size(num_elements, pstype->ssize, &slsize) == false)
1371
0
        return NULL;
1372
9.21k
    lsize = (obj_size_t)slsize;
1373
9.21k
    if ((size_t)lsize != slsize)
1374
0
        return NULL;
1375
9.21k
    obj = alloc_obj(imem, lsize, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
1376
9.21k
    if_debug7m('A', mem, "[a%d|+<.]%s %s*(%"PRIuSIZE"=%"PRIuSIZE"*%u) = "PRI_INTPTR"\n",
1377
9.21k
               alloc_trace_space(imem), client_name_string(cname),
1378
9.21k
               struct_type_name_string(pstype),
1379
9.21k
               num_elements * pstype->ssize,
1380
9.21k
               num_elements, pstype->ssize, (intptr_t)obj);
1381
9.21k
    return (char *)Memento_label(obj, cname);
1382
9.21k
}
1383
static void *
1384
i_resize_object(gs_memory_t * mem, void *obj, size_t new_num_elements,
1385
                client_name_t cname)
1386
18.4k
{
1387
18.4k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1388
18.4k
    obj_header_t *pp = (obj_header_t *) obj - 1;
1389
18.4k
    gs_memory_type_ptr_t pstype = pp->o_type;
1390
18.4k
    size_t old_size = pre_obj_contents_size(pp);
1391
18.4k
    size_t new_size = pstype->ssize * new_num_elements;
1392
18.4k
    size_t old_size_rounded = obj_align_round(old_size);
1393
18.4k
    size_t new_size_rounded = obj_align_round(new_size);
1394
18.4k
    void *new_obj = NULL;
1395
1396
#ifdef MEMENTO
1397
    if (Memento_failThisEvent())
1398
        return NULL;
1399
#endif
1400
1401
18.4k
    if (new_size_rounded != (obj_size_t)new_size_rounded)
1402
0
        return NULL;
1403
1404
18.4k
    if (old_size_rounded == new_size_rounded) {
1405
18.4k
        pp->o_size = (obj_size_t)new_size;
1406
18.4k
        new_obj = obj;
1407
18.4k
    } else
1408
0
        if (imem->cc && (byte *)obj + old_size_rounded == imem->cc->cbot &&
1409
0
            imem->cc->ctop - (byte *)obj >= new_size_rounded ) {
1410
0
            imem->cc->cbot = (byte *)obj + new_size_rounded;
1411
0
            pp->o_size = (obj_size_t)new_size;
1412
0
            new_obj = obj;
1413
0
        } else /* try and trim the object -- but only if room for a dummy header */
1414
0
            if (new_size_rounded + sizeof(obj_header_t) <= old_size_rounded) {
1415
0
                trim_obj(imem, obj, (obj_size_t)new_size, (clump_t *)0);
1416
0
                new_obj = obj;
1417
0
            }
1418
18.4k
    if (new_obj) {
1419
18.4k
        if_debug8m('A', mem, "[a%d:%c%c ]%s %s(%"PRIuSIZE"=>%"PRIuSIZE") "PRI_INTPTR"\n",
1420
18.4k
                   alloc_trace_space(imem),
1421
18.4k
                   (new_size > old_size ? '>' : '<'),
1422
18.4k
                   (pstype == &st_bytes ? 'b' : '<'),
1423
18.4k
                   client_name_string(cname),
1424
18.4k
                   struct_type_name_string(pstype),
1425
18.4k
                   old_size, new_size, (intptr_t)obj);
1426
18.4k
        return Memento_label(new_obj, cname);
1427
18.4k
    }
1428
    /* Punt. */
1429
0
    new_obj = gs_alloc_struct_array(mem, new_num_elements, void,
1430
0
                                    pstype, cname);
1431
0
    if (new_obj == 0)
1432
0
        return 0;
1433
0
    memcpy(new_obj, obj, min(old_size, new_size));
1434
0
    gs_free_object(mem, obj, cname);
1435
0
    return Memento_label(new_obj, cname);
1436
0
}
1437
static void
1438
i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
1439
72.6M
{
1440
72.6M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1441
72.6M
    obj_header_t *pp;
1442
72.6M
    gs_memory_type_ptr_t pstype;
1443
72.6M
    gs_memory_struct_type_t saved_stype;
1444
1445
72.6M
    struct_proc_finalize((*finalize));
1446
72.6M
    size_t size, rounded_size;
1447
1448
72.6M
    if (ptr == 0)
1449
8.07M
        return;
1450
64.5M
    pp = (obj_header_t *) ptr - 1;
1451
64.5M
    pstype = pp->o_type;
1452
#ifdef DEBUG
1453
    if (gs_debug_c('?')) {
1454
        clump_locator_t cld;
1455
1456
        if (pstype == &st_free) {
1457
            mlprintf2(mem, "%s: object "PRI_INTPTR" already free!\n",
1458
                      client_name_string(cname), (intptr_t)ptr);
1459
            return;   /*gs_abort(); */
1460
        }
1461
        /* Check that this allocator owns the object being freed. */
1462
        cld.memory = imem;
1463
        while ((cld.cp = cld.memory->root),
1464
               !clump_locate_ptr(ptr, &cld)
1465
            ) {
1466
            if (!cld.memory->saved) {
1467
                mlprintf3(mem, "%s: freeing "PRI_INTPTR", not owned by memory "PRI_INTPTR"!\n",
1468
                          client_name_string(cname), (intptr_t)ptr,
1469
                          (intptr_t)mem);
1470
                return;   /*gs_abort(); */
1471
            }
1472
                  /****** HACK: we know the saved state is the first ******
1473
                   ****** member of an alloc_save_t. ******/
1474
            cld.memory = (gs_ref_memory_t *) cld.memory->saved;
1475
        }
1476
        /* Check that the object is in the allocated region. */
1477
        if (!(PTR_BETWEEN((const byte *)pp, cld.cp->cbase,
1478
                          cld.cp->cbot))
1479
            ) {
1480
            mlprintf5(mem, "%s: freeing "PRI_INTPTR",\n\toutside clump "PRI_INTPTR" cbase="PRI_INTPTR", cbot="PRI_INTPTR"!\n",
1481
                      client_name_string(cname), (intptr_t) ptr,
1482
                      (intptr_t) cld.cp, (intptr_t) cld.cp->cbase,
1483
                      (intptr_t) cld.cp->cbot);
1484
            return;   /*gs_abort(); */
1485
        }
1486
    }
1487
#endif
1488
64.5M
    size = pre_obj_contents_size(pp);
1489
64.5M
    rounded_size = obj_align_round(size);
1490
64.5M
    finalize = pstype->finalize;
1491
64.5M
    if (finalize != 0) {
1492
1493
        /* unfortunately device finalize procedures will clobber the
1494
           stype which is used for later debugging with "A" debug
1495
           tracing, so we save stype it in a local. */
1496
13.8M
        if (gs_debug['a'] || gs_debug['A'])
1497
0
            saved_stype = *pstype;
1498
1499
13.8M
        if_debug3m('u', mem, "[u]finalizing %s "PRI_INTPTR" (%s)\n",
1500
13.8M
                   struct_type_name_string(pstype),
1501
13.8M
                   (intptr_t)ptr, client_name_string(cname));
1502
13.8M
        (*finalize) (mem, ptr);
1503
1504
13.8M
        if (gs_debug['a'] || gs_debug['A'])
1505
0
            pstype = &saved_stype;
1506
13.8M
    }
1507
64.5M
    if (imem->cc && (byte *) ptr + rounded_size == imem->cc->cbot) {
1508
22.1M
        alloc_trace(":-o ", imem, cname, pstype, size, ptr);
1509
22.1M
        gs_alloc_fill(ptr, gs_alloc_fill_free, size);
1510
22.1M
        imem->cc->cbot = (byte *) pp;
1511
        /* IFF this object is adjacent to (or below) the byte after the
1512
         * highest free object, do the consolidation within this clump. */
1513
22.1M
        if ((byte *)pp <= imem->cc->int_freed_top) {
1514
3.01M
            consolidate_clump_free(imem->cc, imem);
1515
3.01M
        }
1516
22.1M
        return;
1517
22.1M
    }
1518
42.3M
    if (pp->o_alone) {
1519
                /*
1520
                 * We gave this object its own clump.  Free the entire clump,
1521
                 * unless it belongs to an older save level, in which case
1522
                 * we mustn't overwrite it.
1523
                 */
1524
10.3M
        clump_locator_t cl;
1525
1526
#ifdef DEBUG
1527
        {
1528
            clump_locator_t cld;
1529
1530
            cld.memory = imem;
1531
            cld.cp = 0;
1532
            if (gs_debug_c('a'))
1533
                alloc_trace(
1534
                            (clump_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"),
1535
                               imem, cname, pstype, size, ptr);
1536
        }
1537
#endif
1538
10.3M
        cl.memory = imem;
1539
10.3M
        cl.cp = 0;
1540
10.3M
        if (clump_locate_ptr(ptr, &cl)) {
1541
10.3M
            if (!imem->is_controlled)
1542
10.3M
                alloc_free_clump(cl.cp, imem);
1543
10.3M
            return;
1544
10.3M
        }
1545
        /* Don't overwrite even if gs_alloc_debug is set. */
1546
10.3M
    }
1547
32.0M
    if (rounded_size >= sizeof(obj_header_t *)) {
1548
        /*
1549
         * Put the object on a freelist, unless it belongs to
1550
         * an older save level, in which case we mustn't
1551
         * overwrite it.
1552
         */
1553
32.0M
        imem->cfreed.memory = imem;
1554
32.0M
        if (clump_locate(ptr, &imem->cfreed)) {
1555
32.0M
            obj_header_t **pfl;
1556
1557
32.0M
            if (size > max_freelist_size) {
1558
2.79M
                pfl = &imem->freelists[LARGE_FREELIST_INDEX];
1559
2.79M
                if (rounded_size > imem->largest_free_size)
1560
412k
                    imem->largest_free_size = rounded_size;
1561
29.2M
            } else {
1562
29.2M
                pfl = &imem->freelists[(size + obj_align_mask) >>
1563
29.2M
                                      log2_obj_align_mod];
1564
29.2M
            }
1565
            /* keep track of highest object on a freelist */
1566
            /* If we're dealing with a block in the currently open clump
1567
               (in imem->cc) update that, otherwise, update the clump in
1568
               the clump list (in imem->cfreed.cp)
1569
             */
1570
32.0M
            if (imem->cc && imem->cfreed.cp->chead == imem->cc->chead) {
1571
5.34M
                if ((byte *)pp >= imem->cc->int_freed_top) {
1572
2.20M
                    imem->cc->int_freed_top = (byte *)ptr + rounded_size;
1573
2.20M
                }
1574
5.34M
            }
1575
26.6M
            else {
1576
26.6M
                if ((byte *)pp >= imem->cfreed.cp->int_freed_top) {
1577
238k
                    imem->cfreed.cp->int_freed_top = (byte *)ptr + rounded_size;
1578
238k
                }
1579
26.6M
            }
1580
32.0M
            pp->o_type = &st_free;  /* don't confuse GC */
1581
32.0M
            o_set_unmarked(pp);
1582
32.0M
            gs_alloc_fill(ptr, gs_alloc_fill_free, size);
1583
32.0M
            *(obj_header_t **) ptr = *pfl;
1584
32.0M
            *pfl = (obj_header_t *) ptr;
1585
32.0M
            alloc_trace((size > max_freelist_size ? ":-oF" : ":-of"),
1586
32.0M
                        imem, cname, pstype, size, ptr);
1587
32.0M
            return;
1588
32.0M
        }
1589
        /* Don't overwrite even if gs_alloc_debug is set. */
1590
32.0M
    } else {
1591
0
        pp->o_type = &st_free;  /* don't confuse GC */
1592
0
        gs_alloc_fill(ptr, gs_alloc_fill_free, size);
1593
0
    }
1594
32.0M
    alloc_trace(":-o#", imem, cname, pstype, size, ptr);
1595
5.37k
    imem->lost.objects += obj_size_round(size);
1596
5.37k
}
1597
static byte *
1598
i_alloc_string(gs_memory_t * mem, size_t nbytes, client_name_t cname)
1599
87.0M
{
1600
87.0M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1601
87.0M
    byte *str;
1602
87.0M
    clump_splay_walker sw;
1603
1604
    /*
1605
     * Cycle through the clumps at the current save level, starting
1606
     * with the currently open one.
1607
     */
1608
87.0M
    clump_t *cp = clump_splay_walk_init_mid(&sw, imem->cc);
1609
1610
#ifdef MEMENTO
1611
    if (Memento_failThisEvent())
1612
        return NULL;
1613
#endif
1614
87.0M
    if (cp == 0) {
1615
        /* Open an arbitrary clump. */
1616
10
        imem->cc = clump_splay_walk_init(&sw, imem);
1617
10
        alloc_open_clump(imem);
1618
10
    }
1619
95.9M
top:
1620
95.9M
    if (imem->cc && !imem->cc->c_alone && imem->cc->ctop - imem->cc->cbot > nbytes) {
1621
87.0M
        if_debug4m('A', mem, "[a%d:+> ]%s(%"PRIuSIZE") = "PRI_INTPTR"\n",
1622
87.0M
                   alloc_trace_space(imem), client_name_string(cname), nbytes,
1623
87.0M
                   (intptr_t)(imem->cc->ctop - nbytes));
1624
87.0M
        str = imem->cc->ctop -= nbytes;
1625
87.0M
        gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
1626
87.0M
        return str;
1627
87.0M
    }
1628
    /* Try the next clump. */
1629
8.91M
    cp = clump_splay_walk_fwd(&sw);
1630
1631
8.91M
    if (cp != NULL)
1632
8.79M
    {
1633
8.79M
        alloc_close_clump(imem);
1634
8.79M
        imem->cc = cp;
1635
8.79M
        alloc_open_clump(imem);
1636
8.79M
        goto top;
1637
8.79M
    }
1638
115k
    if (nbytes > string_space_quanta(SIZE_MAX - sizeof(clump_head_t)) *
1639
115k
        string_data_quantum
1640
115k
        ) {     /* Can't represent the size in a uint! */
1641
0
        return 0;
1642
0
    }
1643
115k
    if (nbytes >= imem->large_size) { /* Give it a clump all its own. */
1644
24.0k
        return i_alloc_string_immovable(mem, nbytes, cname);
1645
91.5k
    } else {     /* Add another clump. */
1646
91.5k
        cp = alloc_acquire_clump(imem, (ulong) imem->clump_size, true, "clump");
1647
1648
91.5k
        if (cp == 0)
1649
0
            return 0;
1650
91.5k
        alloc_close_clump(imem);
1651
91.5k
        imem->cc = clump_splay_walk_init_mid(&sw, cp);
1652
91.5k
        gs_alloc_fill(imem->cc->cbase, gs_alloc_fill_free,
1653
91.5k
                      imem->cc->climit - imem->cc->cbase);
1654
91.5k
        goto top;
1655
91.5k
    }
1656
115k
}
1657
static byte *
1658
i_alloc_string_immovable(gs_memory_t * mem, size_t nbytes, client_name_t cname)
1659
24.0k
{
1660
24.0k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1661
24.0k
    byte *str;
1662
24.0k
    size_t asize;
1663
24.0k
    clump_t *cp;
1664
1665
#ifdef MEMENTO
1666
    if (Memento_failThisEvent())
1667
        return NULL;
1668
#endif
1669
    /* Give it a clump all its own. */
1670
24.0k
    asize = string_clump_space(nbytes) + sizeof(clump_head_t);
1671
24.0k
    cp = alloc_acquire_clump(imem, asize, true, "large string clump");
1672
1673
24.0k
    if (cp == 0)
1674
0
        return 0;
1675
24.0k
    cp->c_alone = true;
1676
1677
24.0k
    str = cp->ctop = cp->climit - nbytes;
1678
24.0k
    if_debug4m('a', mem, "[a%d|+>L]%s(%"PRIuSIZE") = "PRI_INTPTR"\n",
1679
24.0k
               alloc_trace_space(imem), client_name_string(cname), nbytes,
1680
24.0k
               (intptr_t)str);
1681
24.0k
    gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
1682
1683
24.0k
    return Memento_label(str, cname);
1684
24.0k
}
1685
1686
static byte *
1687
i_resize_string(gs_memory_t * mem, byte * data, size_t old_num, size_t new_num,
1688
                client_name_t cname)
1689
2.49M
{
1690
2.49M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1691
2.49M
    byte *ptr;
1692
1693
2.49M
    if (old_num == new_num)  /* same size returns the same string */
1694
1.96M
        return data;
1695
1696
531k
    if ( imem->cc && data == imem->cc->ctop && /* bottom-most string */
1697
531k
        (new_num < old_num ||
1698
531k
         imem->cc->ctop - imem->cc->cbot > new_num - old_num)
1699
531k
        ) {     /* Resize in place. */
1700
531k
        ptr = data + old_num - new_num;
1701
531k
        if_debug6m('A', mem, "[a%d:%c> ]%s(%"PRIuSIZE"->%"PRIuSIZE") "PRI_INTPTR"\n",
1702
531k
                   alloc_trace_space(imem),
1703
531k
                   (new_num > old_num ? '>' : '<'),
1704
531k
                   client_name_string(cname), old_num, new_num,
1705
531k
                   (intptr_t)ptr);
1706
531k
        imem->cc->ctop = ptr;
1707
531k
        memmove(ptr, data, min(old_num, new_num));
1708
#ifdef DEBUG
1709
        if (new_num > old_num)
1710
            gs_alloc_fill(ptr + old_num, gs_alloc_fill_alloc,
1711
                          new_num - old_num);
1712
        else
1713
            gs_alloc_fill(data, gs_alloc_fill_free, old_num - new_num);
1714
#endif
1715
531k
    } else
1716
227
        if (new_num < old_num) {
1717
            /* trim the string and create a free space hole */
1718
14
            ptr = data;
1719
14
            imem->lost.strings += old_num - new_num;
1720
14
            gs_alloc_fill(data + new_num, gs_alloc_fill_free,
1721
14
                          old_num - new_num);
1722
14
            if_debug5m('A', mem, "[a%d:<> ]%s(%"PRIuSIZE"->%"PRIuSIZE") "PRI_INTPTR"\n",
1723
14
                       alloc_trace_space(imem), client_name_string(cname),
1724
14
                       old_num, new_num, (intptr_t)ptr);
1725
213
        } else {     /* Punt. */
1726
213
            ptr = gs_alloc_string(mem, new_num, cname);
1727
213
            if (ptr == 0)
1728
0
                return 0;
1729
213
            memcpy(ptr, data, min(old_num, new_num));
1730
213
            gs_free_string(mem, data, old_num, cname);
1731
213
        }
1732
1733
531k
    return ptr;
1734
531k
}
1735
1736
static void
1737
i_free_string(gs_memory_t * mem, byte * data, size_t nbytes,
1738
              client_name_t cname)
1739
1.55M
{
1740
1.55M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1741
1742
1.55M
    if (data) {
1743
1.55M
        if (imem->cc && data == imem->cc->ctop) {
1744
1.48M
            if_debug4m('A', mem, "[a%d:-> ]%s(%"PRIuSIZE") "PRI_INTPTR"\n",
1745
1.48M
                       alloc_trace_space(imem), client_name_string(cname), nbytes,
1746
1.48M
                       (intptr_t)data);
1747
1.48M
            imem->cc->ctop += nbytes;
1748
1.48M
        } else {
1749
64.1k
            if_debug4m('A', mem, "[a%d:->#]%s(%"PRIuSIZE") "PRI_INTPTR"\n",
1750
64.1k
                       alloc_trace_space(imem), client_name_string(cname), nbytes,
1751
64.1k
                       (intptr_t)data);
1752
64.1k
            imem->lost.strings += nbytes;
1753
64.1k
        }
1754
1.55M
        gs_alloc_fill(data, gs_alloc_fill_free, nbytes);
1755
1.55M
    }
1756
1.55M
}
1757
1758
static gs_memory_t *
1759
i_stable(gs_memory_t *mem)
1760
118M
{
1761
118M
    return mem->stable_memory;
1762
118M
}
1763
1764
static void
1765
i_status(gs_memory_t * mem, gs_memory_status_t * pstat)
1766
204k
{
1767
204k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1768
204k
    size_t unused = imem->lost.refs + imem->lost.strings;
1769
204k
    size_t inner = 0;
1770
204k
    clump_splay_walker sw;
1771
204k
    clump_t *cp;
1772
1773
204k
    alloc_close_clump(imem);
1774
    /* Add up unallocated space within each clump. */
1775
    /* Also keep track of space allocated to inner clumps, */
1776
    /* which are included in previous_status.allocated. */
1777
5.83M
    for (cp = clump_splay_walk_init(&sw, imem); cp != NULL; cp = clump_splay_walk_fwd(&sw))
1778
5.62M
    {
1779
5.62M
        unused += cp->ctop - cp->cbot;
1780
5.62M
        if (cp->outer)
1781
127k
            inner += cp->cend - (byte *) cp->chead;
1782
5.62M
    }
1783
204k
    unused += compute_free_objects(imem);
1784
204k
    pstat->used = imem->allocated + inner - unused +
1785
204k
        imem->previous_status.used;
1786
204k
    pstat->allocated = imem->allocated +
1787
204k
        imem->previous_status.allocated;
1788
204k
    pstat->max_used = 0;    /* unknown for this allocator */
1789
204k
    pstat->limit = imem->limit;
1790
204k
    pstat->is_thread_safe = false; /* this allocator is not thread safe */
1791
204k
}
1792
1793
static void
1794
i_enable_free(gs_memory_t * mem, bool enable)
1795
331k
{
1796
331k
    if (enable)
1797
165k
        mem->procs.free_object = i_free_object,
1798
165k
            mem->procs.free_string = i_free_string;
1799
165k
    else
1800
165k
        mem->procs.free_object = gs_ignore_free_object,
1801
165k
            mem->procs.free_string = gs_ignore_free_string;
1802
331k
}
1803
1804
static void i_set_object_type(gs_memory_t *mem, void *ptr, gs_memory_type_ptr_t type)
1805
0
{
1806
0
    obj_header_t *pp;
1807
1808
0
    if (ptr == 0)
1809
0
        return;
1810
0
    pp = (obj_header_t *) ptr - 1;
1811
0
    pp->o_type = type;
1812
0
}
1813
1814
static void i_defer_frees(gs_memory_t *mem, int defer)
1815
0
{
1816
0
}
1817
1818
/* ------ Internal procedures ------ */
1819
1820
/* Compute the amount of free object space by scanning free lists. */
1821
static size_t
1822
compute_free_objects(gs_ref_memory_t * mem)
1823
204k
{
1824
204k
    size_t unused = mem->lost.objects;
1825
204k
    int i;
1826
1827
    /* Add up space on free lists. */
1828
21.0M
    for (i = 0; i < num_freelists; i++) {
1829
20.8M
        const obj_header_t *pfree;
1830
1831
20.9M
        for (pfree = mem->freelists[i]; pfree != 0;
1832
20.8M
             pfree = *(const obj_header_t * const *)pfree
1833
20.8M
            )
1834
107k
            unused += obj_align_round(pfree[-1].o_size);
1835
20.8M
    }
1836
204k
    return unused;
1837
204k
}
1838
1839
/* Allocate an object from the large-block freelist. */
1840
static obj_header_t * /* rets obj if allocated, else 0 */
1841
large_freelist_alloc(gs_ref_memory_t *mem, obj_size_t size)
1842
9.41M
{
1843
    /* Scan large object freelist. We'll grab an object up to 1/8 bigger */
1844
    /* right away, else use best fit of entire scan. */
1845
9.41M
    obj_size_t aligned_size = obj_align_round(size);
1846
9.41M
    size_t aligned_min_size = aligned_size + sizeof(obj_header_t);
1847
9.41M
    size_t aligned_max_size =
1848
9.41M
        aligned_min_size + obj_align_round(aligned_min_size / 8);
1849
9.41M
    obj_header_t *best_fit = 0;
1850
9.41M
    obj_header_t **best_fit_prev = NULL; /* Initialize against indeterminism. */
1851
9.41M
    obj_size_t best_fit_size = (obj_size_t)SIZE_MAX;
1852
9.41M
    obj_header_t *pfree;
1853
9.41M
    obj_header_t **ppfprev = &mem->freelists[LARGE_FREELIST_INDEX];
1854
9.41M
    size_t largest_size = 0;
1855
1856
9.41M
    if (aligned_size > mem->largest_free_size)
1857
6.45M
        return 0;    /* definitely no block large enough */
1858
1859
3.97M
    while ((pfree = *ppfprev) != 0) {
1860
3.63M
        obj_size_t free_size = obj_align_round(pfree[-1].o_size);
1861
1862
3.63M
        if (free_size == aligned_size ||
1863
3.63M
            (free_size >= aligned_min_size && free_size < best_fit_size)
1864
3.63M
            ) {
1865
2.67M
            best_fit = pfree;
1866
2.67M
            best_fit_prev = ppfprev;
1867
2.67M
            best_fit_size = pfree[-1].o_size;
1868
2.67M
            if (best_fit_size <= aligned_max_size)
1869
2.61M
                break; /* good enough fit to spare scan of entire list */
1870
2.67M
        }
1871
1.01M
        ppfprev = (obj_header_t **) pfree;
1872
1.01M
        if (free_size > largest_size)
1873
310k
            largest_size = free_size;
1874
1.01M
    }
1875
2.95M
    if (best_fit == 0) {
1876
        /*
1877
         * No single free clump is large enough, but since we scanned the
1878
         * entire list, we now have an accurate updated value for
1879
         * largest_free_size.
1880
         */
1881
316k
        mem->largest_free_size = largest_size;
1882
316k
        return 0;
1883
316k
    }
1884
1885
    /* Remove from freelist & return excess memory to free */
1886
2.64M
    *best_fit_prev = *(obj_header_t **)best_fit;
1887
2.64M
    trim_obj(mem, best_fit, aligned_size, (clump_t *)0);
1888
1889
    /* Pre-init block header; o_alone & o_type are already init'd */
1890
2.64M
    best_fit[-1].o_size = size;
1891
1892
2.64M
    return best_fit;
1893
2.95M
}
1894
1895
/* Allocate an object.  This handles all but the fastest, simplest case. */
1896
static obj_header_t *
1897
alloc_obj(gs_ref_memory_t *mem, obj_size_t lsize, gs_memory_type_ptr_t pstype,
1898
          alloc_flags_t flags, client_name_t cname)
1899
25.4M
{
1900
25.4M
    obj_header_t *ptr;
1901
1902
25.4M
    if (lsize >= mem->large_size || (flags & ALLOC_IMMOVABLE)) {
1903
        /*
1904
         * Give the object a clump all its own.  Note that this case does
1905
         * not occur if is_controlled is true.
1906
         */
1907
11.1M
        obj_size_t asize =
1908
11.1M
            ((lsize + obj_align_mask) & -obj_align_mod) +
1909
11.1M
            sizeof(obj_header_t);
1910
11.1M
        clump_t *cp =
1911
11.1M
            alloc_acquire_clump(mem, asize + sizeof(clump_head_t), false,
1912
11.1M
                                "large object clump");
1913
1914
11.1M
        if (asize < lsize)
1915
0
            return 0;
1916
11.1M
        if (cp == 0)
1917
12
            return 0;
1918
11.1M
        cp->c_alone = true;
1919
11.1M
        ptr = (obj_header_t *) cp->cbot;
1920
11.1M
        cp->cbot += asize;
1921
11.1M
        ptr->o_pad = 0;
1922
11.1M
        ptr->o_alone = 1;
1923
11.1M
        ptr->o_size = (obj_size_t)lsize;
1924
14.2M
    } else {
1925
        /*
1926
         * Cycle through the clumps at the current save level, starting
1927
         * with the currently open one.
1928
         */
1929
14.2M
        clump_splay_walker sw;
1930
14.2M
        clump_t *cp = clump_splay_walk_init_mid(&sw, mem->cc);
1931
14.2M
        obj_size_t asize = obj_size_round(lsize);
1932
14.2M
        bool allocate_success = false;
1933
1934
14.2M
        if (lsize > max_freelist_size && (flags & ALLOC_DIRECT)) {
1935
            /* We haven't checked the large block freelist yet. */
1936
1.97M
            if ((ptr = large_freelist_alloc(mem, lsize)) != 0) {
1937
12.0k
                --ptr;      /* must point to header */
1938
12.0k
                goto done;
1939
12.0k
            }
1940
1.97M
        }
1941
1942
14.2M
        if (cp == 0) {
1943
            /* Open an arbitrary clump. */
1944
47.1k
            mem->cc = clump_splay_walk_init(&sw, mem);
1945
47.1k
            alloc_open_clump(mem);
1946
47.1k
        }
1947
1948
14.2M
#define CAN_ALLOC_AT_END(cp)\
1949
56.3M
  ((cp) && !((cp)->c_alone) && (cp)->ctop - (byte *) (ptr = (obj_header_t *) (cp)->cbot)\
1950
42.5M
   > asize + sizeof(obj_header_t))
1951
1952
56.3M
        do {
1953
56.3M
            if (CAN_ALLOC_AT_END(mem->cc)) {
1954
13.4M
                allocate_success = true;
1955
13.4M
                break;
1956
42.8M
            } else if (mem->is_controlled) {
1957
                /* Try consolidating free space. */
1958
0
                gs_consolidate_free((gs_memory_t *)mem);
1959
0
                if (CAN_ALLOC_AT_END(mem->cc)) {
1960
0
                    allocate_success = true;
1961
0
                    break;
1962
0
                }
1963
0
            }
1964
            /* No luck, go on to the next clump. */
1965
42.8M
            cp = clump_splay_walk_fwd(&sw);
1966
42.8M
            if (cp == NULL)
1967
759k
                break;
1968
1969
42.0M
            alloc_close_clump(mem);
1970
42.0M
            mem->cc = cp;
1971
42.0M
            alloc_open_clump(mem);
1972
42.0M
        }
1973
42.0M
        while (1);
1974
1975
#ifdef CONSOLIDATE_BEFORE_ADDING_CLUMP
1976
        if (!allocate_success) {
1977
            /*
1978
             * Try consolidating free space before giving up.
1979
             * It's not clear this is a good idea, since it requires quite
1980
             * a lot of computation and doesn't seem to improve things much.
1981
             */
1982
            if (!mem->is_controlled) { /* already did this if controlled */
1983
                clump_t *cp;
1984
1985
                alloc_close_clump(mem);
1986
                for (cp = clump_splay_walk_init_mid(&sw, cp_orig); cp != NULL; cp = clump_splay_walk_fwd(&sw))
1987
                {
1988
                    consolidate_clump_free(cp, mem);
1989
                    if (CAN_ALLOC_AT_END(cp)) {
1990
                        mem->cc = cp;
1991
                        alloc_open_clump(mem);
1992
                        allocate_success = true;
1993
                        break;
1994
                    }
1995
                }
1996
            }
1997
        }
1998
#endif
1999
2000
0
#undef CAN_ALLOC_AT_END
2001
2002
14.2M
        if (!allocate_success) {
2003
            /* Add another clump. */
2004
759k
            clump_t *cp =
2005
759k
                alloc_add_clump(mem, mem->clump_size, "clump");
2006
2007
759k
            if (cp) {
2008
                /* mem->cc == cp */
2009
759k
                ptr = (obj_header_t *)cp->cbot;
2010
759k
                allocate_success = true;
2011
759k
            }
2012
759k
        }
2013
2014
        /*
2015
         * If no success, try to scavenge from low free memory. This is
2016
         * only enabled for controlled memory (currently only async
2017
         * renderer) because it's too much work to prevent it from
2018
         * examining outer save levels in the general case.
2019
         */
2020
14.2M
        if (allocate_success)
2021
14.2M
            mem->cc->cbot = (byte *) ptr + asize;
2022
0
        else if (!mem->is_controlled ||
2023
0
                 (ptr = scavenge_low_free(mem, lsize)) == 0)
2024
0
            return 0; /* allocation failed */
2025
14.2M
        ptr->o_pad = 0;
2026
14.2M
        ptr->o_alone = 0;
2027
14.2M
        ptr->o_size = lsize;
2028
14.2M
    }
2029
25.4M
done:
2030
25.4M
    ptr->o_type = pstype;
2031
#   if IGC_PTR_STABILITY_CHECK
2032
        ptr->d.o.space_id = mem->space_id;
2033
#   endif
2034
25.4M
    ptr++;
2035
25.4M
    gs_alloc_fill(ptr, gs_alloc_fill_alloc, lsize);
2036
25.4M
    return Memento_label(ptr, cname);
2037
25.4M
}
2038
2039
/*
2040
 * Consolidate free objects contiguous to free space at cbot onto the cbot
2041
 * area. Also keep track of end of highest internal free object
2042
 * (int_freed_top).
2043
 */
2044
static void
2045
consolidate_clump_free(clump_t *cp, gs_ref_memory_t *mem)
2046
3.01M
{
2047
3.01M
    obj_header_t *begin_free = 0;
2048
2049
3.01M
    cp->int_freed_top = cp->cbase;  /* below all objects in clump */
2050
42.7M
    SCAN_CLUMP_OBJECTS(cp)
2051
42.7M
    DO_ALL
2052
42.7M
        if (pre->o_type == &st_free) {
2053
3.39M
            if (begin_free == 0)
2054
2.02M
                begin_free = pre;
2055
39.3M
        } else {
2056
39.3M
            if (begin_free)
2057
312k
                cp->int_freed_top = (byte *)pre; /* first byte following internal free */
2058
39.3M
            begin_free = 0;
2059
39.3M
        }
2060
42.7M
    END_OBJECTS_SCAN
2061
3.01M
    if (begin_free) {
2062
        /* We found free objects at the top of the object area. */
2063
        /* Remove the free objects from the freelists. */
2064
1.71M
        remove_range_from_freelist(mem, begin_free, cp->cbot);
2065
1.71M
        if_debug4m('a', (const gs_memory_t *)mem,
2066
1.71M
                   "[a]resetting clump "PRI_INTPTR" cbot from "PRI_INTPTR" to "PRI_INTPTR" (%lu free)\n",
2067
1.71M
                   (intptr_t)cp, (intptr_t)cp->cbot, (intptr_t)begin_free,
2068
1.71M
                   (intptr_t)((byte *)cp->cbot - (byte *)begin_free));
2069
1.71M
        cp->cbot = (byte *) begin_free;
2070
1.71M
    }
2071
3.01M
}
2072
2073
static splay_app_result_t
2074
consolidate(clump_t *cp, void *arg)
2075
0
{
2076
0
    gs_ref_memory_t *mem = (gs_ref_memory_t *)arg;
2077
2078
0
    consolidate_clump_free(cp, mem);
2079
0
    if (cp->cbot == cp->cbase && cp->ctop == cp->climit) {
2080
        /* The entire clump is free. */
2081
0
        if (!mem->is_controlled) {
2082
0
            alloc_free_clump(cp, mem);
2083
0
            if (mem->cc == cp)
2084
0
                mem->cc = NULL;
2085
0
        }
2086
0
    }
2087
2088
0
    return SPLAY_APP_CONTINUE;
2089
0
}
2090
2091
/* Consolidate free objects. */
2092
void
2093
ialloc_consolidate_free(gs_ref_memory_t *mem)
2094
0
{
2095
0
    alloc_close_clump(mem);
2096
2097
    /* We used to visit clumps in reverse order to encourage LIFO behavior,
2098
     * but with binary trees this is not possible (unless you want to
2099
     * either change the tree during the process, recurse, or otherwise
2100
     * hold the state). */
2101
0
    clump_splay_app(mem->root, mem, consolidate, mem);
2102
2103
    /* NOTE: Previously, if we freed the current clump, we'd move to whatever the
2104
     * bigger of it's children was. We now just move to the root. */
2105
0
    if (mem->cc == NULL)
2106
0
        mem->cc = mem->root;
2107
2108
0
    alloc_open_clump(mem);
2109
0
}
2110
static void
2111
i_consolidate_free(gs_memory_t *mem)
2112
0
{
2113
0
    ialloc_consolidate_free((gs_ref_memory_t *)mem);
2114
0
}
2115
2116
typedef struct
2117
{
2118
    uint need_free;
2119
    obj_header_t *found_pre;
2120
    gs_ref_memory_t *mem;
2121
    obj_size_t request_size;
2122
} scavenge_data;
2123
2124
static splay_app_result_t
2125
scavenge(clump_t *cp, void *arg)
2126
0
{
2127
0
    scavenge_data *sd = (scavenge_data *)arg;
2128
0
    obj_header_t *begin_free = NULL;
2129
0
    obj_size_t found_free = 0;
2130
2131
0
    sd->found_pre = NULL;
2132
2133
0
    SCAN_CLUMP_OBJECTS(cp)
2134
0
    DO_ALL
2135
0
        if (pre->o_type == &st_free) {
2136
0
            if (begin_free == 0) {
2137
0
                found_free = 0;
2138
0
                begin_free = pre;
2139
0
            }
2140
0
            found_free += pre_obj_rounded_size(pre);
2141
0
            if (begin_free != 0 && found_free >= sd->need_free)
2142
0
                break;
2143
0
        } else
2144
0
            begin_free = 0;
2145
0
    END_OBJECTS_SCAN_NO_ABORT
2146
2147
0
    if (begin_free != 0 && found_free >= sd->need_free) {
2148
        /* Fish found pieces out of various freelists */
2149
0
        remove_range_from_freelist(sd->mem, (char*)begin_free,
2150
0
                                   (char*)begin_free + found_free);
2151
2152
        /* Prepare found object */
2153
0
        sd->found_pre = begin_free;
2154
0
        sd->found_pre->o_type = &st_free;  /* don't confuse GC if gets lost */
2155
0
        sd->found_pre->o_size = found_free - sizeof(obj_header_t);
2156
2157
        /* Chop off excess tail piece & toss it back into free pool */
2158
0
        trim_obj(sd->mem, sd->found_pre + 1, sd->request_size, cp);
2159
0
        return SPLAY_APP_STOP;
2160
0
    }
2161
2162
0
    return SPLAY_APP_CONTINUE;
2163
0
}
2164
2165
/* try to free-up given amount of space from freespace below clump base */
2166
static obj_header_t * /* returns uninitialized object hdr, NULL if none found */
2167
scavenge_low_free(gs_ref_memory_t *mem, obj_size_t request_size)
2168
0
{
2169
    /* find 1st range of memory that can be glued back together to fill request */
2170
0
    scavenge_data sd;
2171
0
    obj_size_t request_size_rounded = obj_size_round(request_size);
2172
2173
0
    sd.found_pre = 0;
2174
0
    sd.need_free = request_size_rounded + sizeof(obj_header_t);    /* room for GC's dummy hdr */
2175
0
    sd.mem = mem;
2176
0
    sd.request_size = request_size;
2177
2178
0
    clump_splay_app(mem->root, mem, scavenge, &sd);
2179
0
    return sd.found_pre;
2180
0
}
2181
2182
/* Remove range of memory from a mem's freelists */
2183
static void
2184
remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top)
2185
1.71M
{
2186
1.71M
    int num_free[num_freelists];
2187
1.71M
    int smallest = num_freelists, largest = -1;
2188
1.71M
    const obj_header_t *cur;
2189
1.71M
    obj_size_t size;
2190
1.71M
    int i;
2191
1.71M
    obj_size_t removed = 0;
2192
2193
    /*
2194
     * Scan from bottom to top, a range containing only free objects,
2195
     * counting the number of objects of each size.
2196
     */
2197
2198
4.52M
    for (cur = bottom; cur != top;
2199
2.81M
         cur = (const obj_header_t *)
2200
2.81M
             ((const byte *)cur + obj_size_round(size))
2201
2.81M
        ) {
2202
2.81M
        size = cur->o_size;
2203
2.81M
        i = (size > max_freelist_size ? LARGE_FREELIST_INDEX :
2204
2.81M
             (size + obj_align_mask) >> log2_obj_align_mod);
2205
2.81M
        if (i < smallest) {
2206
            /*
2207
             * 0-length free blocks aren't kept on any list, because
2208
             * they don't have room for a pointer.
2209
             */
2210
1.94M
            if (i == 0)
2211
19
                continue;
2212
1.94M
            if (smallest < num_freelists)
2213
232k
                memset(&num_free[i], 0, (smallest - i) * sizeof(int));
2214
1.71M
            else
2215
1.71M
                num_free[i] = 0;
2216
1.94M
            smallest = i;
2217
1.94M
        }
2218
2.81M
        if (i > largest) {
2219
2.01M
            if (largest >= 0)
2220
306k
                memset(&num_free[largest + 1], 0, (i - largest) * sizeof(int));
2221
2.01M
            largest = i;
2222
2.01M
        }
2223
2.81M
        num_free[i]++;
2224
2.81M
    }
2225
2226
    /*
2227
     * Remove free objects from the freelists, adjusting lost.objects by
2228
     * subtracting the size of the region being processed minus the amount
2229
     * of space reclaimed.
2230
     */
2231
2232
19.8M
    for (i = smallest; i <= largest; i++) {
2233
18.1M
        int count = num_free[i];
2234
18.1M
        obj_header_t *pfree;
2235
18.1M
        obj_header_t **ppfprev;
2236
2237
18.1M
        if (!count)
2238
15.8M
            continue;
2239
2.28M
        ppfprev = &mem->freelists[i];
2240
3.63M
        for (;;) {
2241
3.63M
            pfree = *ppfprev;
2242
3.63M
            if (PTR_GE(pfree, bottom) && PTR_LT(pfree, top)) {
2243
                /* We're removing an object. */
2244
2.81M
                *ppfprev = *(obj_header_t **) pfree;
2245
2.81M
                removed += obj_align_round(pfree[-1].o_size);
2246
2.81M
                if (!--count)
2247
2.28M
                    break;
2248
2.81M
            } else
2249
821k
                ppfprev = (obj_header_t **) pfree;
2250
3.63M
        }
2251
2.28M
    }
2252
1.71M
    mem->lost.objects -= (char*)top - (char*)bottom - removed;
2253
1.71M
}
2254
2255
/* Trim a memory object down to a given size */
2256
static void
2257
trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, obj_size_t size, clump_t *cp)
2258
/* Obj must have rounded size == req'd size, or have enough room for */
2259
/* trailing dummy obj_header */
2260
2.64M
{
2261
2.64M
    obj_size_t rounded_size = obj_align_round(size);
2262
2.64M
    obj_header_t *pre_obj = obj - 1;
2263
2.64M
    obj_header_t *excess_pre = (obj_header_t*)((char*)obj + rounded_size);
2264
2.64M
    obj_size_t old_rounded_size = obj_align_round(pre_obj->o_size);
2265
2.64M
    obj_size_t excess_size = old_rounded_size - rounded_size - sizeof(obj_header_t);
2266
2267
    /* trim object's size to desired */
2268
2.64M
    pre_obj->o_size = size;
2269
2.64M
    if (old_rounded_size == rounded_size)
2270
2.61M
        return;  /* nothing more to do here */
2271
    /*
2272
     * If the object is alone in its clump, move cbot to point to the end
2273
     * of the object.
2274
     */
2275
29.4k
    if (pre_obj->o_alone) {
2276
0
        if (!cp) {
2277
0
            mem->cfreed.memory = mem;
2278
0
            if (clump_locate(obj, &mem->cfreed)) {
2279
0
                cp = mem->cfreed.cp;
2280
0
            }
2281
0
        }
2282
0
        if (cp) {
2283
#ifdef DEBUG
2284
            if (cp->cbot != (byte *)obj + old_rounded_size) {
2285
                lprintf3("resizing "PRI_INTPTR", old size %u, new size %u, cbot wrong!\n",
2286
                         (intptr_t)obj, old_rounded_size, size);
2287
                /* gs_abort */
2288
            } else
2289
#endif
2290
0
                {
2291
0
                    cp->cbot = (byte *)excess_pre;
2292
0
                    return;
2293
0
                }
2294
0
        }
2295
        /*
2296
         * Something very weird is going on.  This probably shouldn't
2297
         * ever happen, but if it does....
2298
         */
2299
0
        pre_obj->o_pad = 0;
2300
0
        pre_obj->o_alone = 0;
2301
0
    }
2302
    /* make excess into free obj */
2303
29.4k
    excess_pre->o_type = &st_free;  /* don't confuse GC */
2304
29.4k
    excess_pre->o_size = excess_size;
2305
29.4k
    excess_pre->o_pad = 0;
2306
29.4k
    excess_pre->o_alone = 0;
2307
29.4k
    if (excess_size >= obj_align_mod) {
2308
        /* Put excess object on a freelist */
2309
29.0k
        obj_header_t **pfl;
2310
2311
29.0k
        if (mem->cc && (byte *)excess_pre >= mem->cc->int_freed_top)
2312
4.16k
            mem->cc->int_freed_top = (byte *)excess_pre + excess_size;
2313
29.0k
        if (excess_size <= max_freelist_size)
2314
15.9k
            pfl = &mem->freelists[(excess_size + obj_align_mask) >>
2315
15.9k
                                 log2_obj_align_mod];
2316
13.1k
        else {
2317
13.1k
            uint rounded_size = obj_align_round(excess_size);
2318
2319
13.1k
            pfl = &mem->freelists[LARGE_FREELIST_INDEX];
2320
13.1k
            if (rounded_size > mem->largest_free_size)
2321
0
                mem->largest_free_size = rounded_size;
2322
13.1k
        }
2323
29.0k
        *(obj_header_t **) (excess_pre + 1) = *pfl;
2324
29.0k
        *pfl = excess_pre + 1;
2325
29.0k
        mem->cfreed.memory = mem;
2326
29.0k
    } else {
2327
        /* excess piece will be "lost" memory */
2328
338
        mem->lost.objects += excess_size + sizeof(obj_header_t);
2329
338
    }
2330
29.4k
}
2331
2332
/* ================ Roots ================ */
2333
2334
/* Register a root. */
2335
static int
2336
i_register_root(gs_memory_t * mem, gs_gc_root_t ** rpp, gs_ptr_type_t ptype,
2337
                void **up, client_name_t cname)
2338
337k
{
2339
337k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
2340
337k
    gs_gc_root_t *rp;
2341
2342
337k
    if (rpp == NULL || *rpp == NULL) {
2343
0
        rp = gs_raw_alloc_struct_immovable(imem->non_gc_memory, &st_gc_root_t,
2344
0
                                           "i_register_root");
2345
0
        if (rp == 0)
2346
0
            return_error(gs_error_VMerror);
2347
0
        rp->free_on_unregister = true;
2348
0
        if (rpp && *rpp == NULL)
2349
0
            *rpp = rp;
2350
337k
    } else {
2351
337k
        rp = *rpp;
2352
337k
        rp->free_on_unregister = false;
2353
337k
    }
2354
337k
    if_debug3m('8', mem, "[8]register root(%s) "PRI_INTPTR" -> "PRI_INTPTR"\n",
2355
337k
               client_name_string(cname), (intptr_t)rp, (intptr_t)up);
2356
337k
    rp->ptype = ptype;
2357
337k
    rp->p = up;
2358
337k
    rp->next = imem->roots;
2359
337k
    imem->roots = rp;
2360
337k
    return 0;
2361
337k
}
2362
2363
/* Unregister a root. */
2364
static void
2365
i_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname)
2366
310k
{
2367
310k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
2368
310k
    gs_gc_root_t **rpp = &imem->roots;
2369
2370
310k
    if_debug2m('8', mem, "[8]unregister root(%s) "PRI_INTPTR"\n",
2371
310k
               client_name_string(cname), (intptr_t)rp);
2372
310k
    while (*rpp != rp)
2373
0
        rpp = &(*rpp)->next;
2374
310k
    *rpp = (*rpp)->next;
2375
310k
    if (rp->free_on_unregister)
2376
0
        gs_free_object(imem->non_gc_memory, rp, "i_unregister_root");
2377
310k
}
2378
2379
/* ================ clumps ================ */
2380
2381
public_st_clump();
2382
2383
/* Insert a clump in the chain.  This is exported for the GC and for */
2384
/* the forget_save operation. */
2385
void
2386
alloc_link_clump(clump_t * cp, gs_ref_memory_t * imem)
2387
12.4M
{
2388
12.4M
    splay_insert(cp, imem);
2389
12.4M
    SANITY_CHECK(cp);
2390
12.4M
}
2391
2392
/* Add a clump for ordinary allocation. */
2393
static clump_t *
2394
alloc_add_clump(gs_ref_memory_t * mem, size_t csize, client_name_t cname)
2395
759k
{
2396
759k
    clump_t *cp = alloc_acquire_clump(mem, csize, true, cname);
2397
2398
759k
    if (cp) {
2399
759k
        alloc_close_clump(mem);
2400
759k
        mem->cc = cp;
2401
759k
        gs_alloc_fill(mem->cc->cbase, gs_alloc_fill_free,
2402
759k
                      mem->cc->climit - mem->cc->cbase);
2403
759k
    }
2404
759k
    return cp;
2405
759k
}
2406
2407
/* Acquire a clump.  If we would exceed MaxLocalVM (if relevant), */
2408
/* or if we would exceed the VMThreshold and psignal is NULL, */
2409
/* return 0; if we would exceed the VMThreshold but psignal is valid, */
2410
/* just set the signal and return successfully. */
2411
static clump_t *
2412
alloc_acquire_clump(gs_ref_memory_t * mem, size_t csize, bool has_strings,
2413
                    client_name_t cname)
2414
12.0M
{
2415
12.0M
    gs_memory_t *parent = mem->non_gc_memory;
2416
12.0M
    clump_t *cp;
2417
12.0M
    byte *cdata;
2418
2419
12.0M
#if ARCH_SIZEOF_LONG > ARCH_SIZEOF_INT
2420
    /* If csize is larger than max_uint, punt. */
2421
12.0M
    if (csize != (uint) csize)
2422
0
        return 0;
2423
12.0M
#endif
2424
12.0M
    cp = gs_raw_alloc_struct_immovable(parent, &st_clump, cname);
2425
2426
    /* gc_status.signal_value is initialised to zero when the
2427
     * allocator is created, only the Postscript interpreter
2428
     * (which implement garbage collection) takes the action to set
2429
     * it to anything other than zero
2430
     */
2431
12.0M
    if( mem->gc_status.signal_value != 0) {
2432
        /* we have a garbage collector */
2433
11.7M
        if (mem->allocated >= mem->limit) {
2434
2.12k
            mem->gc_status.requested += csize;
2435
2.12k
            if (mem->limit >= mem->gc_status.max_vm) {
2436
0
                gs_free_object(parent, cp, cname);
2437
0
                return 0;
2438
0
            }
2439
2.12k
            if_debug4m('0', (const gs_memory_t *)mem,
2440
2.12k
                       "[0]signaling space=%d, allocated=%"PRIdSIZE", limit=%"PRIdSIZE", requested=%"PRIdSIZE"\n",
2441
2.12k
                       mem->space, mem->allocated,
2442
2.12k
                       mem->limit, mem->gc_status.requested);
2443
2.12k
            mem->gs_lib_ctx->gcsignal = mem->gc_status.signal_value;
2444
2.12k
        }
2445
11.7M
    }
2446
12.0M
    cdata = gs_alloc_bytes_immovable(parent, csize, cname);
2447
12.0M
    if (cp == 0 || cdata == 0) {
2448
12
        gs_free_object(parent, cdata, cname);
2449
12
        gs_free_object(parent, cp, cname);
2450
12
        mem->gc_status.requested = csize;
2451
12
        return 0;
2452
12
    }
2453
12.0M
    alloc_init_clump(cp, cdata, cdata + csize, has_strings, (clump_t *) 0);
2454
12.0M
    alloc_link_clump(cp, mem);
2455
12.0M
    mem->allocated += st_clump.ssize + csize;
2456
12.0M
    SANITY_CHECK(cp);
2457
12.0M
    return cp;
2458
12.0M
}
2459
2460
/* Initialize the pointers in a clump.  This is exported for save/restore. */
2461
/* The bottom pointer must be aligned, but the top pointer need not */
2462
/* be aligned. */
2463
void
2464
alloc_init_clump(clump_t * cp, byte * bot, byte * top, bool has_strings,
2465
                 clump_t * outer)
2466
12.4M
{
2467
12.4M
    byte *cdata = bot;
2468
2469
12.4M
    if (outer != 0)
2470
390k
        outer->inner_count++;
2471
12.4M
    cp->chead = (clump_head_t *) cdata;
2472
12.4M
    cdata += sizeof(clump_head_t);
2473
12.4M
    cp->cbot = cp->cbase = cp->int_freed_top = cdata;
2474
12.4M
    cp->cend = top;
2475
12.4M
    cp->rcur = 0;
2476
12.4M
    cp->rtop = 0;
2477
12.4M
    cp->outer = outer;
2478
12.4M
    cp->inner_count = 0;
2479
12.4M
    cp->has_refs = false;
2480
12.4M
    cp->sbase = cdata;
2481
12.4M
    cp->c_alone = false; /* should be set correctly by caller */
2482
12.4M
    if (has_strings && top - cdata >= string_space_quantum + sizeof(long) - 1) {
2483
        /*
2484
         * We allocate a large enough string marking and reloc table
2485
         * to cover the entire clump.
2486
         */
2487
1.26M
        uint nquanta = string_space_quanta(top - cdata);
2488
2489
1.26M
        cp->climit = cdata + nquanta * string_data_quantum;
2490
1.26M
        cp->smark = cp->climit;
2491
1.26M
        cp->smark_size = string_quanta_mark_size(nquanta);
2492
1.26M
        cp->sreloc =
2493
1.26M
            (string_reloc_offset *) (cp->smark + cp->smark_size);
2494
1.26M
        cp->sfree1 = (uint *) cp->sreloc;
2495
11.2M
    } else {
2496
        /* No strings, don't need the string GC tables. */
2497
11.2M
        cp->climit = cp->cend;
2498
11.2M
        cp->sfree1 = 0;
2499
11.2M
        cp->smark = 0;
2500
11.2M
        cp->smark_size = 0;
2501
11.2M
        cp->sreloc = 0;
2502
11.2M
    }
2503
12.4M
    cp->ctop = cp->climit;
2504
12.4M
    alloc_init_free_strings(cp);
2505
12.4M
}
2506
2507
/* Initialize the string freelists in a clump. */
2508
void
2509
alloc_init_free_strings(clump_t * cp)
2510
12.4M
{
2511
12.4M
    if (cp->sfree1)
2512
1.26M
        memset(cp->sfree1, 0, STRING_FREELIST_SPACE(cp));
2513
12.4M
    cp->sfree = 0;
2514
12.4M
}
2515
2516
/* Close up the current clump. */
2517
/* This is exported for save/restore and the GC. */
2518
void
2519
alloc_close_clump(gs_ref_memory_t * mem)
2520
52.1M
{
2521
#ifdef DEBUG
2522
    if (gs_debug_c('A')) {
2523
        dmlprintf1((const gs_memory_t *)mem, "[a%d]", alloc_trace_space(mem));
2524
        dmprintf_clump((const gs_memory_t *)mem, "closing clump", mem->cc);
2525
    }
2526
#endif
2527
52.1M
}
2528
2529
/* Reopen the current clump after a GC or restore. */
2530
void
2531
alloc_open_clump(gs_ref_memory_t * mem)
2532
51.0M
{
2533
#ifdef DEBUG
2534
    if (gs_debug_c('A')) {
2535
        dmlprintf1((const gs_memory_t *)mem, "[a%d]", alloc_trace_space(mem));
2536
        dmprintf_clump((const gs_memory_t *)mem, "opening clump", mem->cc);
2537
    }
2538
#endif
2539
51.0M
}
2540
2541
#ifdef DEBUG
2542
static splay_app_result_t
2543
check_in_clump(clump_t *cp, void *arg)
2544
{
2545
    clump_t **cpp = (clump_t **)arg;
2546
2547
    if (*cpp != cp)
2548
        return SPLAY_APP_CONTINUE;
2549
    *cpp = NULL;
2550
2551
    return SPLAY_APP_STOP;
2552
}
2553
#endif
2554
2555
/* Remove a clump from the chain.  This is exported for the GC. */
2556
void
2557
alloc_unlink_clump(clump_t * cp, gs_ref_memory_t * mem)
2558
12.4M
{
2559
12.4M
    SANITY_CHECK_MID(cp);
2560
#ifdef DEBUG
2561
    if (gs_alloc_debug) { /* Check to make sure this clump belongs to this allocator. */
2562
        clump_t *found = cp;
2563
        clump_splay_app(mem->root, mem, check_in_clump, &found);
2564
2565
        if (found != NULL) {
2566
            mlprintf2((const gs_memory_t *)mem, "unlink_clump "PRI_INTPTR" not owned by memory "PRI_INTPTR"!\n",
2567
                      (intptr_t)cp, (intptr_t)mem);
2568
            return;   /*gs_abort(); */
2569
        }
2570
    }
2571
#endif
2572
12.4M
    (void)clump_splay_remove(cp, mem);
2573
12.4M
    if (mem->cc == cp) {
2574
72.8k
        mem->cc = NULL;
2575
72.8k
    }
2576
12.4M
}
2577
2578
/*
2579
 * Free a clump.  This is exported for the GC.  Since we eventually use
2580
 * this to free the clump containing the allocator itself, we must be
2581
 * careful not to reference anything in the allocator after freeing the
2582
 * clump data.
2583
 */
2584
void
2585
alloc_free_clump(clump_t * cp, gs_ref_memory_t * mem)
2586
12.4M
{
2587
12.4M
    gs_memory_t *parent = mem->non_gc_memory;
2588
12.4M
    byte *cdata = (byte *)cp->chead;
2589
12.4M
    ulong csize = (byte *)cp->cend - cdata;
2590
2591
12.4M
    alloc_unlink_clump(cp, mem);
2592
12.4M
    mem->allocated -= st_clump.ssize;
2593
12.4M
    if (mem->cfreed.cp == cp)
2594
50.2k
        mem->cfreed.cp = 0;
2595
12.4M
    if (cp->outer == 0) {
2596
12.0M
        mem->allocated -= csize;
2597
12.0M
        gs_free_object(parent, cdata, "alloc_free_clump(data)");
2598
12.0M
    } else {
2599
390k
        cp->outer->inner_count--;
2600
390k
        gs_alloc_fill(cdata, gs_alloc_fill_free, csize);
2601
390k
    }
2602
12.4M
    gs_free_object(parent, cp, "alloc_free_clump(clump struct)");
2603
12.4M
}
2604
2605
/* Find the clump for a pointer. */
2606
/* Note that this only searches the current save level. */
2607
/* Since a given save level can't contain both a clump and an inner clump */
2608
/* of that clump, we can stop when is_within_clump succeeds, and just test */
2609
/* is_in_inner_clump then. */
2610
bool
2611
clump_locate_ptr(const void *ptr, clump_locator_t * clp)
2612
60.0M
{
2613
60.0M
    clump_t *cp = clp->memory->root;
2614
2615
155M
    while (cp)
2616
153M
    {
2617
153M
        if (PTR_LT(ptr, cp->cbase))
2618
50.4M
        {
2619
50.4M
            cp = cp->left;
2620
50.4M
            continue;
2621
50.4M
        }
2622
103M
        if (PTR_GE(ptr, cp->cend))
2623
45.3M
        {
2624
45.3M
            cp = cp->right;
2625
45.3M
            continue;
2626
45.3M
        }
2627
        /* Found it! */
2628
57.9M
        splay_move_to_root(cp, clp->memory);
2629
57.9M
        clp->cp = cp;
2630
57.9M
        return !ptr_is_in_inner_clump(ptr, cp);
2631
103M
    }
2632
2.10M
    return false;
2633
60.0M
}
2634
2635
bool ptr_is_within_mem_clumps(const void *ptr, gs_ref_memory_t *mem)
2636
201k
{
2637
201k
    clump_t *cp = mem->root;
2638
2639
1.08M
    while (cp)
2640
882k
    {
2641
882k
        if (PTR_LT(ptr, cp->cbase))
2642
574k
        {
2643
574k
            cp = cp->left;
2644
574k
            continue;
2645
574k
        }
2646
307k
        if (PTR_GE(ptr, cp->cend))
2647
307k
        {
2648
307k
            cp = cp->right;
2649
307k
            continue;
2650
307k
        }
2651
        /* Found it! */
2652
0
        splay_move_to_root(cp, mem);
2653
0
        return true;
2654
307k
    }
2655
201k
    return false;
2656
201k
}
2657
2658
/* ------ Debugging ------ */
2659
2660
#ifdef DEBUG
2661
2662
#include "string_.h"
2663
2664
static inline bool
2665
obj_in_control_region(const void *obot, const void *otop,
2666
                      const dump_control_t *pdc)
2667
{
2668
    return
2669
        ((pdc->bottom == NULL || PTR_GT(otop, pdc->bottom)) &&
2670
         (pdc->top == NULL || PTR_LT(obot, pdc->top)));
2671
}
2672
2673
const dump_control_t dump_control_default =
2674
{
2675
    dump_do_default, NULL, NULL
2676
};
2677
const dump_control_t dump_control_all =
2678
{
2679
    dump_do_strings | dump_do_type_addresses | dump_do_pointers |
2680
    dump_do_pointed_strings | dump_do_contents, NULL, NULL
2681
};
2682
2683
const dump_control_t dump_control_no_contents =
2684
{
2685
    dump_do_strings | dump_do_type_addresses | dump_do_pointers |
2686
    dump_do_pointed_strings, NULL, NULL
2687
};
2688
2689
/*
2690
 * Internal procedure to dump a block of memory, in hex and optionally
2691
 * also as characters.
2692
 */
2693
static void
2694
debug_indent(const gs_memory_t *mem, int indent)
2695
{
2696
    int i;
2697
2698
    for (i = indent; i > 0; --i)
2699
        dmputc(mem, ' ');
2700
}
2701
static void
2702
debug_dump_contents(const gs_memory_t *mem, const byte * bot,
2703
                    const byte * top, int indent, bool as_chars)
2704
{
2705
    const byte *block;
2706
2707
#define block_size 16
2708
2709
    if (bot >= top)
2710
        return;
2711
    for (block = bot - ((bot - (byte *) 0) & (block_size - 1));
2712
         block < top; block += block_size
2713
        ) {
2714
        int i;
2715
        char label[12];
2716
2717
        /* Check for repeated blocks. */
2718
        if (block >= bot + block_size &&
2719
            block <= top - (block_size * 2) &&
2720
            !memcmp(block, block - block_size, block_size) &&
2721
            !memcmp(block, block + block_size, block_size)
2722
            ) {
2723
            if (block < bot + block_size * 2 ||
2724
                memcmp(block, block - block_size * 2, block_size)
2725
                ) {
2726
                debug_indent(mem, indent);
2727
                dmputs(mem, "  ...\n");
2728
            }
2729
            continue;
2730
        }
2731
        gs_snprintf(label, sizeof(label), PRI_INTPTR":", (intptr_t)block);
2732
        debug_indent(mem, indent);
2733
        dmputs(mem, label);
2734
        for (i = 0; i < block_size; ++i) {
2735
            const char *sepr = ((i & 3) == 0 && i != 0 ? "  " : " ");
2736
2737
            dmputs(mem, sepr);
2738
            if (block + i >= bot && block + i < top)
2739
                dmprintf1(mem, "%02x", block[i]);
2740
            else
2741
                dmputs(mem, "  ");
2742
        }
2743
        dmputc(mem, '\n');
2744
        if (as_chars) {
2745
            debug_indent(mem, indent + strlen(label));
2746
            for (i = 0; i < block_size; ++i) {
2747
                byte ch;
2748
2749
                if ((i & 3) == 0 && i != 0)
2750
                    dmputc(mem, ' ');
2751
                if (block + i >= bot && block + i < top &&
2752
                    (ch = block[i]) >= 32 && ch <= 126
2753
                    )
2754
                    dmprintf1(mem, "  %c", ch);
2755
                else
2756
                    dmputs(mem, "   ");
2757
            }
2758
            dmputc(mem, '\n');
2759
        }
2760
    }
2761
#undef block_size
2762
}
2763
2764
/* Print one object with the given options. */
2765
/* Relevant options: type_addresses, no_types, pointers, pointed_strings, */
2766
/* contents. */
2767
void
2768
debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t * control)
2769
{
2770
    const obj_header_t *pre = ((const obj_header_t *)obj) - 1;
2771
    ulong size = pre_obj_contents_size(pre);
2772
    const gs_memory_struct_type_t *type = pre->o_type;
2773
    dump_options_t options = control->options;
2774
2775
    dmprintf3(mem, "  pre="PRI_INTPTR"(obj="PRI_INTPTR") size=%lu",
2776
              (intptr_t) pre, (intptr_t) obj, size);
2777
    switch (options & (dump_do_type_addresses | dump_do_no_types)) {
2778
    case dump_do_type_addresses + dump_do_no_types: /* addresses only */
2779
        dmprintf1(mem, " type="PRI_INTPTR"", (intptr_t) type);
2780
        break;
2781
    case dump_do_type_addresses:  /* addresses & names */
2782
        dmprintf2(mem, " type=%s("PRI_INTPTR")", struct_type_name_string(type),
2783
                 (intptr_t)type);
2784
        break;
2785
    case 0:   /* names only */
2786
        dmprintf1(mem, " type=%s", struct_type_name_string(type));
2787
    case dump_do_no_types:  /* nothing */
2788
        ;
2789
    }
2790
    if (options & dump_do_marks) {
2791
        dmprintf2(mem, " smark/back=%u (0x%x)", pre->o_smark, pre->o_smark);
2792
    }
2793
    dmputc(mem, '\n');
2794
    if (type == &st_free)
2795
        return;
2796
    if (options & dump_do_pointers) {
2797
        struct_proc_enum_ptrs((*proc)) = type->enum_ptrs;
2798
        uint index = 0;
2799
        enum_ptr_t eptr;
2800
        gs_ptr_type_t ptype;
2801
2802
        if (proc != gs_no_struct_enum_ptrs) {
2803
            if (proc != 0) {
2804
                for (; (ptype = (*proc)(mem, pre + 1, size, index, &eptr, type, NULL)) != 0;
2805
                     ++index
2806
                     ) {
2807
                    const void *ptr = eptr.ptr;
2808
2809
                    dmprintf1(mem, "    ptr %u: ", index);
2810
                    if (ptype == ptr_string_type || ptype == ptr_const_string_type) {
2811
                        const gs_const_string *str = (const gs_const_string *)&eptr;
2812
                        if (!str)
2813
                            dmprintf(mem, "0x0");
2814
                        else
2815
                            dmprintf2(mem, PRI_INTPTR "(%u)", (intptr_t)str->data, str->size);
2816
                        if (options & dump_do_pointed_strings) {
2817
                            dmputs(mem, " =>\n");
2818
                            if (!str)
2819
                                dmprintf(mem, "(null)\n");
2820
                            else
2821
                                debug_dump_contents(mem, str->data, str->data + str->size, 6,
2822
                                                    true);
2823
                        } else {
2824
                            dmputc(mem, '\n');
2825
                        }
2826
                    } else {
2827
                        dmprintf1(mem, (PTR_BETWEEN(ptr, obj, (const byte *)obj + size) ?
2828
                                  "("PRI_INTPTR")\n" : PRI_INTPTR "\n"), (intptr_t) ptr);
2829
                    }
2830
                }
2831
            } else { /* proc == 0 */
2832
                dmprintf(mem, "previous line should be a ref\n");
2833
            }
2834
        } /* proc != gs_no_struct_enum_ptrs */
2835
    }
2836
    if (options & dump_do_contents) {
2837
        debug_dump_contents(mem, (const byte *)obj, (const byte *)obj + size,
2838
                            0, false);
2839
    }
2840
}
2841
2842
/* Print the contents of a clump with the given options. */
2843
/* Relevant options: all. */
2844
void
2845
debug_dump_clump(const gs_memory_t *mem, const clump_t * cp, const dump_control_t * control)
2846
{
2847
    dmprintf1(mem, "clump at "PRI_INTPTR":\n", (intptr_t) cp);
2848
    dmprintf3(mem, "   chead="PRI_INTPTR"  cbase="PRI_INTPTR" sbase="PRI_INTPTR"\n",
2849
              (intptr_t)cp->chead, (intptr_t)cp->cbase, (intptr_t)cp->sbase);
2850
    dmprintf3(mem, "    rcur="PRI_INTPTR"   rtop="PRI_INTPTR"  cbot="PRI_INTPTR"\n",
2851
              (intptr_t)cp->rcur, (intptr_t)cp->rtop, (intptr_t)cp->cbot);
2852
    dmprintf4(mem, "    ctop="PRI_INTPTR" climit="PRI_INTPTR" smark="PRI_INTPTR", size=%u\n",
2853
              (intptr_t)cp->ctop, (intptr_t)cp->climit, (intptr_t)cp->smark,
2854
              cp->smark_size);
2855
    dmprintf2(mem, "  sreloc="PRI_INTPTR"   cend="PRI_INTPTR"\n",
2856
              (intptr_t)cp->sreloc, (intptr_t)cp->cend);
2857
    dmprintf6(mem, "left="PRI_INTPTR" right="PRI_INTPTR" parent="PRI_INTPTR" outer="PRI_INTPTR" inner_count=%u has_refs=%s\n",
2858
              (intptr_t)cp->left, (intptr_t)cp->right, (intptr_t)cp->parent, (intptr_t)cp->outer,
2859
              cp->inner_count, (cp->has_refs ? "true" : "false"));
2860
2861
    dmprintf2(mem, "  sfree1="PRI_INTPTR"   sfree="PRI_INTPTR"\n",
2862
              (intptr_t)cp->sfree1, (intptr_t)cp->sfree);
2863
    if (control->options & dump_do_strings) {
2864
        debug_dump_contents(mem, (control->bottom == 0 ? cp->ctop :
2865
                             max(control->bottom, cp->ctop)),
2866
                            (control->top == 0 ? cp->climit :
2867
                             min(control->top, cp->climit)),
2868
                            0, true);
2869
    }
2870
    SCAN_CLUMP_OBJECTS(cp)
2871
        DO_ALL
2872
        if (obj_in_control_region(pre + 1,
2873
                                  (const byte *)(pre + 1) + size,
2874
                                  control)
2875
        )
2876
        debug_print_object(mem, pre + 1, control);
2877
    END_OBJECTS_SCAN_NO_ABORT
2878
}
2879
void
2880
debug_print_clump(const gs_memory_t *mem, const clump_t * cp)
2881
{
2882
    dump_control_t control;
2883
2884
    control = dump_control_default;
2885
    debug_dump_clump(mem, cp, &control);
2886
}
2887
2888
/* Print the contents of all clumps managed by an allocator. */
2889
/* Relevant options: all. */
2890
void
2891
debug_dump_memory(const gs_ref_memory_t * mem, const dump_control_t * control)
2892
{
2893
    const clump_t *cp;
2894
    clump_splay_walker sw;
2895
2896
    for (cp = clump_splay_walk_init(&sw, mem); cp != NULL; cp = clump_splay_walk_fwd(&sw))
2897
    {
2898
        if (obj_in_control_region(cp->cbase, cp->cend, control))
2899
            debug_dump_clump((const gs_memory_t *)mem, cp, control);
2900
    }
2901
}
2902
2903
void
2904
debug_dump_allocator(const gs_ref_memory_t *mem)
2905
{
2906
    debug_dump_memory(mem, &dump_control_no_contents);
2907
}
2908
2909
/* Find all the objects that contain a given pointer. */
2910
void
2911
debug_find_pointers(const gs_ref_memory_t *mem, const void *target)
2912
{
2913
    clump_splay_walker sw;
2914
    dump_control_t control;
2915
    const clump_t *cp;
2916
2917
    control.options = 0;
2918
    for (cp = clump_splay_walk_init(&sw, mem); cp; cp = clump_splay_walk_fwd(&sw))
2919
    {
2920
        SCAN_CLUMP_OBJECTS(cp);
2921
        DO_ALL
2922
            struct_proc_enum_ptrs((*proc)) = pre->o_type->enum_ptrs;
2923
            uint index = 0;
2924
            enum_ptr_t eptr;
2925
2926
            if (proc)   /* doesn't trace refs NB fix me. */
2927
                for (; (*proc)((const gs_memory_t *)mem, pre + 1, size, index,
2928
                               &eptr, pre->o_type, NULL);
2929
                     ++index)
2930
                    if (eptr.ptr == target) {
2931
                        dmprintf1((const gs_memory_t *)mem, "Index %d in", index);
2932
                        debug_print_object((const gs_memory_t *)mem, pre + 1, &control);
2933
                    }
2934
        END_OBJECTS_SCAN_NO_ABORT
2935
    }
2936
}
2937
static void ddct(const gs_memory_t *mem, clump_t *cp, clump_t *parent, int depth)
2938
{
2939
    int i;
2940
2941
    if (cp == NULL)
2942
        return;
2943
    for (i = 0; i < depth; i++)
2944
        dmlprintf(mem, " ");
2945
2946
    dmlprintf7(mem, "Clump "PRI_INTPTR":"PRI_INTPTR" parent="PRI_INTPTR" left="PRI_INTPTR":"PRI_INTPTR" right="PRI_INTPTR":"PRI_INTPTR"\n",
2947
        (intptr_t)cp, (intptr_t)cp->cbase, (intptr_t)cp->parent,
2948
        (intptr_t)cp->left, (intptr_t)(cp->left ? cp->left->cbase : NULL),
2949
        (intptr_t)cp->right, (intptr_t)(cp->right ? cp->right->cbase : NULL));
2950
    if (cp->parent != parent)
2951
        dmlprintf(mem, "Parent pointer mismatch!\n");
2952
    ddct(mem, cp->left, cp, depth+1);
2953
    ddct(mem, cp->right, cp, depth+1);
2954
}
2955
void
2956
debug_dump_clump_tree(const gs_ref_memory_t *mem)
2957
{
2958
    ddct((const gs_memory_t *)mem, mem->root, NULL, 0);
2959
}
2960
2961
#endif /* DEBUG */