Coverage Report

Created: 2025-06-10 07:19

/src/ghostpdl/base/gsalloc.c
Line
Count
Source (jump to first uncovered line)
1
/* Copyright (C) 2001-2023 Artifex Software, Inc.
2
   All Rights Reserved.
3
4
   This software is provided AS-IS with no warranty, either express or
5
   implied.
6
7
   This software is distributed under license and may not be copied,
8
   modified or distributed except as expressly authorized under the terms
9
   of the license contained in the file LICENSE in this distribution.
10
11
   Refer to licensing information at http://www.artifex.com or contact
12
   Artifex Software, Inc.,  39 Mesa Street, Suite 108A, San Francisco,
13
   CA 94129, USA, for further information.
14
*/
15
16
17
/* Standard memory allocator */
18
#include "gx.h"
19
#include "memory_.h"
20
#include "gserrors.h"
21
#include "gsexit.h"
22
#include "gsmdebug.h"
23
#include "gsstruct.h"
24
#include "gxalloc.h"
25
#include "stream.h"   /* for clearing stream list */
26
#include "malloc_.h" /* For MEMENTO */
27
28
/*
29
 * Define whether to try consolidating space before adding a new clump.
30
 * The default is not to do this, because it is computationally
31
 * expensive and doesn't seem to help much.  However, this is done for
32
 * "controlled" spaces whether or not the #define is in effect.
33
 */
34
/*#define CONSOLIDATE_BEFORE_ADDING_CLUMP */
35
36
/*
37
 * This allocator produces tracing messages of the form
38
 *      [aNMOTS]...
39
 * where
40
 *   N is the VM space number, +1 if we are allocating from stable memory.
41
 *   M is : for movable objects, | for immovable,
42
 *   O is {alloc = +, free = -, grow = >, shrink = <},
43
 *   T is {bytes = b, object = <, ref = $, string = >}, and
44
 *   S is {small freelist = f, large freelist = F, LIFO = space,
45
 *      own clump = L, lost = #, lost own clump = ~, other = .}.
46
 */
47
#ifdef DEBUG
48
static int
49
alloc_trace_space(const gs_ref_memory_t *imem)
50
{
51
    return (int)(imem->space + (imem->stable_memory == (const gs_memory_t *)imem));
52
}
53
static void
54
alloc_trace(const char *chars, gs_ref_memory_t * imem, client_name_t cname,
55
            gs_memory_type_ptr_t stype, uint size, const void *ptr)
56
{
57
    if_debug7m('A', (const gs_memory_t *)imem, "[a%d%s]%s %s(%u) %s"PRI_INTPTR"\n",
58
               alloc_trace_space(imem), chars, client_name_string(cname),
59
               (ptr == 0 || stype == 0 ? "" :
60
                struct_type_name_string(stype)),
61
               size, (chars[1] == '+' ? "= " : ""), (intptr_t)ptr);
62
}
63
static bool
64
alloc_size_is_ok(gs_memory_type_ptr_t stype)
65
{
66
    return (stype->ssize > 0 && stype->ssize < 0x200000);
67
}
68
#  define ALLOC_CHECK_SIZE(mem,stype)\
69
    BEGIN\
70
      if (!alloc_size_is_ok(stype)) {\
71
        mlprintf2(mem,"size of struct type "PRI_INTPTR" is 0x%lx!\n",\
72
                  (intptr_t)(stype), (ulong)((stype)->ssize));\
73
        return 0;\
74
      }\
75
    END
76
#else
77
579M
#  define alloc_trace(chars, imem, cname, stype, size, ptr) DO_NOTHING
78
236M
#  define ALLOC_CHECK_SIZE(mem,stype) DO_NOTHING
79
#endif
80
81
/*
82
 * The structure descriptor for allocators.  Even though allocators
83
 * are allocated outside GC space, they reference objects within it.
84
 */
85
public_st_ref_memory();
86
static
87
2.93M
ENUM_PTRS_BEGIN(ref_memory_enum_ptrs) return 0;
88
488k
ENUM_PTR3(0, gs_ref_memory_t, streams, names_array, changes);
89
488k
ENUM_PTR(3, gs_ref_memory_t, saved);
90
2.93M
ENUM_PTR(4, gs_ref_memory_t, scan_limit);
91
2.93M
ENUM_PTRS_END
92
486k
static RELOC_PTRS_WITH(ref_memory_reloc_ptrs, gs_ref_memory_t *mptr)
93
486k
{
94
486k
    RELOC_PTR(gs_ref_memory_t, streams);
95
486k
    RELOC_PTR(gs_ref_memory_t, names_array);
96
486k
    RELOC_PTR(gs_ref_memory_t, changes);
97
486k
    RELOC_PTR(gs_ref_memory_t, scan_limit);
98
    /* Don't relocate the saved pointer now -- see igc.c for details. */
99
486k
    mptr->reloc_saved = RELOC_OBJ(mptr->saved);
100
486k
}
101
486k
RELOC_PTRS_END
102
103
/*
104
 * Define the flags for alloc_obj, which implements all but the fastest
105
 * case of allocation.
106
 */
107
typedef enum {
108
    ALLOC_IMMOVABLE = 1,
109
    ALLOC_DIRECT = 2    /* called directly, without fast-case checks */
110
} alloc_flags_t;
111
112
/* Forward references */
113
static void remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top);
114
static obj_header_t *large_freelist_alloc(gs_ref_memory_t *mem, obj_size_t size);
115
static obj_header_t *scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size);
116
static size_t compute_free_objects(gs_ref_memory_t *);
117
static obj_header_t *alloc_obj(gs_ref_memory_t *, obj_size_t, gs_memory_type_ptr_t, alloc_flags_t, client_name_t);
118
static void consolidate_clump_free(clump_t *cp, gs_ref_memory_t *mem);
119
static void trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, obj_size_t size, clump_t *cp);
120
static clump_t *alloc_acquire_clump(gs_ref_memory_t *, size_t, bool, client_name_t);
121
static clump_t *alloc_add_clump(gs_ref_memory_t *, size_t, client_name_t);
122
void alloc_close_clump(gs_ref_memory_t *);
123
124
/*
125
 * Define the standard implementation (with garbage collection)
126
 * of Ghostscript's memory manager interface.
127
 */
128
/* Raw memory procedures */
129
static gs_memory_proc_alloc_bytes(i_alloc_bytes_immovable);
130
static gs_memory_proc_resize_object(i_resize_object);
131
static gs_memory_proc_free_object(i_free_object);
132
static gs_memory_proc_stable(i_stable);
133
static gs_memory_proc_status(i_status);
134
static gs_memory_proc_free_all(i_free_all);
135
static gs_memory_proc_consolidate_free(i_consolidate_free);
136
137
/* Object memory procedures */
138
static gs_memory_proc_alloc_bytes(i_alloc_bytes);
139
static gs_memory_proc_alloc_struct(i_alloc_struct);
140
static gs_memory_proc_alloc_struct(i_alloc_struct_immovable);
141
static gs_memory_proc_alloc_byte_array(i_alloc_byte_array);
142
static gs_memory_proc_alloc_byte_array(i_alloc_byte_array_immovable);
143
static gs_memory_proc_alloc_struct_array(i_alloc_struct_array);
144
static gs_memory_proc_alloc_struct_array(i_alloc_struct_array_immovable);
145
static gs_memory_proc_object_size(i_object_size);
146
static gs_memory_proc_object_type(i_object_type);
147
static gs_memory_proc_alloc_string(i_alloc_string);
148
static gs_memory_proc_alloc_string(i_alloc_string_immovable);
149
static gs_memory_proc_resize_string(i_resize_string);
150
static gs_memory_proc_free_string(i_free_string);
151
static gs_memory_proc_register_root(i_register_root);
152
static gs_memory_proc_unregister_root(i_unregister_root);
153
static gs_memory_proc_enable_free(i_enable_free);
154
static gs_memory_proc_set_object_type(i_set_object_type);
155
static gs_memory_proc_defer_frees(i_defer_frees);
156
157
/* We export the procedures for subclasses. */
158
const gs_memory_procs_t gs_ref_memory_procs =
159
{
160
    /* Raw memory procedures */
161
    i_alloc_bytes_immovable,
162
    i_resize_object,
163
    i_free_object,
164
    i_stable,
165
    i_status,
166
    i_free_all,
167
    i_consolidate_free,
168
    /* Object memory procedures */
169
    i_alloc_bytes,
170
    i_alloc_struct,
171
    i_alloc_struct_immovable,
172
    i_alloc_byte_array,
173
    i_alloc_byte_array_immovable,
174
    i_alloc_struct_array,
175
    i_alloc_struct_array_immovable,
176
    i_object_size,
177
    i_object_type,
178
    i_alloc_string,
179
    i_alloc_string_immovable,
180
    i_resize_string,
181
    i_free_string,
182
    i_register_root,
183
    i_unregister_root,
184
    i_enable_free,
185
    i_set_object_type,
186
    i_defer_frees
187
};
188
189
/*
190
 * Previous versions of this code used a simple linked list of
191
 * clumps. We change here to use a splay tree of clumps.
192
 * Splay Trees can be found documented in "Algorithms and Data
193
 * Structures" by Jeffrey H Kingston.
194
 *
195
 * Essentially they are binary trees, ordered by address of the
196
 * 'cbase' pointer. The 'cunning' feature with them is that
197
 * when a node in the tree is accessed, we do a 'move to root'
198
 * operation. This involves performing various 'rotations' as
199
 * we move up the tree, the net effect of which tends to
200
 * lead to more balanced trees (see Kingston for analysis).
201
 * It also leads to better locality of reference in that
202
 * recently accessed nodes stay near the root.
203
 */
204
205
/* #define DEBUG_CLUMPS */
206
#ifdef DEBUG_CLUMPS
207
#define SANITY_CHECK(cp) sanity_check(cp)
208
#define SANITY_CHECK_MID(cp) sanity_check_mid(cp)
209
210
static void
211
broken_splay()
212
{
213
    dlprintf("Broken splay tree!\n");
214
}
215
216
void sanity_check_rec(clump_t *cp)
217
{
218
    splay_dir_t from = SPLAY_FROM_ABOVE;
219
220
    while (cp)
221
    {
222
        if (from == SPLAY_FROM_ABOVE)
223
        {
224
            /* We have arrived from above. Step left. */
225
            if (cp->left)
226
            {
227
                if (cp->left->cbase > cp->cbase || cp->left->parent != cp)
228
                    broken_splay();
229
                cp = cp->left;
230
                from = SPLAY_FROM_ABOVE;
231
                continue;
232
            }
233
            /* No left to step to, so imagine we have just arrived from there */
234
            from = SPLAY_FROM_LEFT;
235
        }
236
        if (from == SPLAY_FROM_LEFT)
237
        {
238
            /* We have arrived from the left. Step right. */
239
            if (cp->right)
240
            {
241
                if (cp->right->cbase < cp->cbase || cp->right->parent != cp)
242
                    broken_splay();
243
                cp = cp->right;
244
                from = SPLAY_FROM_ABOVE;
245
                continue;
246
            }
247
            /* No right to step to, so imagine we have just arrived from there. */
248
            from = SPLAY_FROM_RIGHT;
249
        }
250
        if (from == SPLAY_FROM_RIGHT)
251
        {
252
            /* We have arrived from the right. Step up. */
253
            if (cp->parent == NULL)
254
                break;
255
            if (cp->parent->left != cp && cp->parent->right != cp)
256
                broken_splay();
257
            from = (cp->parent->left == cp ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT);
258
            cp = cp->parent;
259
        }
260
    }
261
}
262
263
void sanity_check(clump_t *cp)
264
{
265
    sanity_check_rec(cp);
266
}
267
268
void sanity_check_mid(clump_t *cp)
269
{
270
    clump_t *parent;
271
272
    while ((parent = cp->parent) != NULL)
273
    {
274
        if (parent->left == cp)
275
        {
276
            if (parent->right == cp)
277
                broken_splay();
278
        }
279
        else if (parent->right != cp)
280
            broken_splay();
281
        cp = parent;
282
    }
283
284
    sanity_check_rec(cp);
285
}
286
287
#else
288
50.7M
#define SANITY_CHECK(cp) while (0) {}
289
132M
#define SANITY_CHECK_MID(cp) while (0) {}
290
#endif
291
292
/* When initing with the root, we want to pass the smallest inorder one
293
 * back immediately, and set it up so that we step right for the next
294
 * one. */
295
clump_t *
296
clump_splay_walk_init(clump_splay_walker *sw, const gs_ref_memory_t *mem)
297
3.48M
{
298
3.48M
    clump_t *cp = mem->root;
299
300
3.48M
    if (cp)
301
3.47M
    {
302
3.47M
        SANITY_CHECK(cp);
303
304
3.47M
        sw->from = SPLAY_FROM_LEFT;
305
12.6M
        while (cp->left)
306
9.17M
        {
307
9.17M
            cp = cp->left;
308
9.17M
        }
309
3.47M
    }
310
3.48M
    sw->cp = cp;
311
3.48M
    sw->end = NULL;
312
3.48M
    return cp;
313
3.48M
}
314
315
clump_t *
316
clump_splay_walk_bwd_init(clump_splay_walker *sw, const gs_ref_memory_t *mem)
317
153k
{
318
153k
    clump_t *cp = mem->root;
319
320
153k
    if (cp)
321
153k
    {
322
153k
        SANITY_CHECK(cp);
323
324
153k
        sw->from = SPLAY_FROM_RIGHT;
325
356k
        while (cp->right)
326
202k
        {
327
202k
            cp = cp->right;
328
202k
        }
329
153k
    }
330
153k
    sw->cp = cp;
331
153k
    sw->end = NULL;
332
153k
    return cp;
333
153k
}
334
335
/* When initing 'mid walk' (i.e. with a non-root node), we want to
336
 * return the node we are given as the first one, and continue
337
 * onwards in an in order fashion.
338
 */
339
clump_t *
340
clump_splay_walk_init_mid(clump_splay_walker *sw, clump_t *cp)
341
109M
{
342
109M
    sw->from = SPLAY_FROM_LEFT;
343
109M
    sw->cp = cp;
344
109M
    sw->end = cp;
345
109M
    if (cp)
346
109M
    {
347
109M
        SANITY_CHECK_MID(cp);
348
109M
    }
349
109M
    return cp;
350
109M
}
351
352
clump_t *
353
clump_splay_walk_fwd(clump_splay_walker *sw)
354
132M
{
355
132M
    clump_t *cp = sw->cp;
356
132M
    int from = sw->from;
357
358
132M
    if (cp == NULL)
359
15.8k
        return NULL;
360
361
    /* We step through the tree, and stop when we arrive
362
     * at sw->end in an in order manner (i.e. by moving from
363
     * the left). */
364
285M
    while (1)
365
285M
    {
366
285M
        if (from == SPLAY_FROM_ABOVE)
367
122M
        {
368
            /* We have arrived from above. Step left. */
369
122M
            if (cp->left)
370
89.4M
            {
371
89.4M
                cp = cp->left;
372
89.4M
                from = SPLAY_FROM_ABOVE;
373
89.4M
                continue;
374
89.4M
            }
375
            /* No left to step to, so imagine we have just arrived from there */
376
32.7M
            from = SPLAY_FROM_LEFT;
377
            /* Have we reached the stopping point? */
378
32.7M
            if (cp == sw->end)
379
194k
                cp = NULL;
380
            /* We want to stop here, for inorder operation. So break out of the loop. */
381
32.7M
            break;
382
122M
        }
383
163M
        if (from == SPLAY_FROM_LEFT)
384
132M
        {
385
            /* We have arrived from the left. Step right. */
386
132M
            if (cp->right)
387
31.4M
            {
388
31.4M
                cp = cp->right;
389
31.4M
                from = SPLAY_FROM_ABOVE;
390
31.4M
                continue;
391
31.4M
            }
392
            /* No right to step to, so imagine we have just arrived from there. */
393
101M
            from = SPLAY_FROM_RIGHT;
394
101M
        }
395
132M
        if (from == SPLAY_FROM_RIGHT)
396
132M
        {
397
            /* We have arrived from the right. Step up. */
398
132M
            clump_t *old = cp;
399
132M
            cp = cp->parent;
400
132M
            if (cp == NULL)
401
4.74M
            {
402
                /* We've reached the root of the tree. Is this our stopping point? */
403
4.74M
                if (sw->end == NULL)
404
3.47M
                    break;
405
                /* If not, step on. */
406
1.27M
                cp = old;
407
1.27M
                from = SPLAY_FROM_ABOVE;
408
1.27M
            }
409
127M
            else
410
127M
            {
411
127M
                from = (cp->left == old ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT);
412
127M
                if (from == SPLAY_FROM_LEFT)
413
96.7M
                {
414
                    /* Have we reached the stopping point? */
415
96.7M
                    if (cp == sw->end)
416
766k
                        cp = NULL;
417
96.7M
                    break;
418
96.7M
                }
419
127M
            }
420
132M
        }
421
132M
    }
422
132M
    sw->cp = cp;
423
132M
    sw->from = from;
424
132M
    return cp;
425
132M
}
426
427
clump_t *
428
clump_splay_walk_bwd(clump_splay_walker *sw)
429
1.95M
{
430
1.95M
    clump_t *cp = sw->cp;
431
1.95M
    int from = sw->from;
432
433
1.95M
    if (cp == NULL)
434
0
        return NULL;
435
436
    /* We step backwards through the tree, and stop when we arrive
437
     * at sw->end in a reverse in order manner (i.e. by moving from
438
     * the right). */
439
4.54M
    while (1)
440
4.54M
    {
441
4.54M
        if (from == SPLAY_FROM_ABOVE)
442
1.59M
        {
443
            /* We have arrived from above. Step right. */
444
1.59M
            if (cp->right)
445
609k
            {
446
609k
                cp = cp->right;
447
609k
                from = SPLAY_FROM_ABOVE;
448
609k
                continue;
449
609k
            }
450
            /* No right to step to, so imagine we have just arrived from there. */
451
990k
            from = SPLAY_FROM_RIGHT;
452
            /* Have we reached our end? */
453
990k
            if (cp == sw->end)
454
0
                cp = NULL;
455
            /* Stop to run inorder operation */
456
990k
            break;
457
1.59M
        }
458
2.94M
        if (from == SPLAY_FROM_RIGHT)
459
1.95M
        {
460
            /* We have arrived from the right. Step left. */
461
1.95M
            if (cp->left)
462
990k
            {
463
990k
                cp = cp->left;
464
990k
                from = SPLAY_FROM_ABOVE;
465
990k
                continue;
466
990k
            }
467
            /* No left to step to, so imagine we have just arrived from there. */
468
966k
            from = SPLAY_FROM_LEFT;
469
966k
        }
470
1.95M
        if (from == SPLAY_FROM_LEFT)
471
1.95M
        {
472
            /* We have arrived from the left. Step up. */
473
1.95M
            clump_t *old = cp;
474
1.95M
            cp = cp->parent;
475
1.95M
            from = (cp == NULL || cp->left != old ? SPLAY_FROM_RIGHT : SPLAY_FROM_LEFT);
476
1.95M
            if (from == SPLAY_FROM_RIGHT)
477
966k
            {
478
966k
                if (cp == sw->end)
479
153k
                    cp = NULL;
480
966k
                break;
481
966k
            }
482
1.95M
        }
483
1.95M
    }
484
1.95M
    sw->cp = cp;
485
1.95M
    sw->from = from;
486
1.95M
    return cp;
487
1.95M
}
488
489
static clump_t *
490
clump_splay_remove(clump_t *cp, gs_ref_memory_t *imem)
491
40.8M
{
492
40.8M
    clump_t *replacement;
493
494
40.8M
    if (cp->left == NULL)
495
7.55M
    {
496
        /* At most one child - easy */
497
7.55M
        replacement = cp->right;
498
7.55M
    }
499
33.3M
    else if (cp->right == NULL)
500
15.9M
    {
501
        /* Strictly one child - easy */
502
15.9M
        replacement = cp->left;
503
15.9M
    }
504
17.3M
    else
505
17.3M
    {
506
        /* 2 Children - tricky */
507
        /* Find in-order predecessor to f */
508
17.3M
        replacement = cp->left;
509
19.0M
        while (replacement->right)
510
1.66M
            replacement = replacement->right;
511
        /* Remove replacement - easy as just one child */
512
17.3M
        (void)clump_splay_remove(replacement, imem);
513
        /* Replace cp with replacement */
514
17.3M
        if (cp->left)
515
14.7M
            cp->left->parent = replacement;
516
17.3M
        cp->right->parent = replacement;
517
17.3M
        replacement->left = cp->left;
518
17.3M
        replacement->right = cp->right;
519
17.3M
    }
520
40.8M
    if (cp->parent)
521
19.3M
    {
522
19.3M
        if (cp->parent->left == cp)
523
17.3M
            cp->parent->left = replacement;
524
1.94M
        else
525
1.94M
            cp->parent->right = replacement;
526
19.3M
    }
527
21.5M
    else
528
21.5M
        imem->root = replacement;
529
40.8M
    if (replacement)
530
36.0M
        replacement->parent = cp->parent;
531
40.8M
    return replacement;
532
40.8M
}
533
534
/* Here we apply a function to all the nodes in a tree in
535
 * depth first order. This means that the given function
536
 * can safely alter: 1) the clump, 2) it's children,
537
 * 3) it's parents child pointer that points to it
538
 * without fear of corruption. Specifically this means
539
 * that the function can free (and unlink) the node
540
 * if it wants.
541
 */
542
clump_t *
543
clump_splay_app(clump_t *root, gs_ref_memory_t *imem, splay_app_result_t (*fn)(clump_t *, void *), void *arg)
544
731k
{
545
731k
    clump_t *step_to;
546
731k
    clump_t *cp = root;
547
731k
    int from = SPLAY_FROM_ABOVE;
548
731k
    splay_app_result_t res;
549
550
731k
    SANITY_CHECK(cp);
551
552
12.3M
    while (cp)
553
11.6M
    {
554
11.6M
        if (from == SPLAY_FROM_ABOVE)
555
6.19M
        {
556
            /* We have arrived from above. Step left. */
557
6.19M
            step_to = cp->left;
558
6.19M
            if (step_to)
559
3.24M
            {
560
3.24M
                from = SPLAY_FROM_ABOVE;
561
3.24M
                cp = step_to;
562
3.24M
            }
563
2.95M
            else
564
2.95M
            {
565
                /* No left to step to, so imagine we have just arrived from the left */
566
2.95M
                from = SPLAY_FROM_LEFT;
567
2.95M
            }
568
6.19M
        }
569
11.6M
        if (from == SPLAY_FROM_LEFT)
570
6.19M
        {
571
            /* We have arrived from the left. Step right. */
572
6.19M
            step_to = cp->right;
573
6.19M
            if (step_to)
574
2.22M
            {
575
2.22M
                from = SPLAY_FROM_ABOVE;
576
2.22M
                cp = step_to;
577
2.22M
            }
578
3.97M
            else
579
3.97M
            {
580
                /* No right to step to, so imagine we have just arrived from the right. */
581
3.97M
                from = SPLAY_FROM_RIGHT;
582
3.97M
            }
583
6.19M
        }
584
11.6M
        if (from == SPLAY_FROM_RIGHT)
585
6.19M
        {
586
            /* We have arrived from the right. Step up. */
587
6.19M
            step_to = cp->parent;
588
6.19M
            if (step_to)
589
5.46M
            {
590
5.46M
                from = (step_to->left == cp ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT);
591
5.46M
            }
592
6.19M
            res = fn(cp, arg);
593
6.19M
            if (res & SPLAY_APP_STOP)
594
47.8k
                return cp;
595
6.14M
            cp = step_to;
596
6.14M
        }
597
11.6M
    }
598
683k
    return cp;
599
731k
}
600
601
/* Move the given node to the root of the tree, by
602
 * performing a series of the following rotations.
603
 * The key observation here is that all these
604
 * rotations preserve the ordering of the tree, and
605
 * result in 'x' getting higher.
606
 *
607
 * Case 1:   z          x           Case 1b:   z                   x
608
 *          # #        # #                    # #                 # #
609
 *         y   D      A   y                  A   y               y   D
610
 *        # #     =>     # #                    # #     =>      # #
611
 *       x   C          B   z                  B   x           z   C
612
 *      # #                # #                    # #         # #
613
 *     A   B              C   D                  C   D       A   B
614
 *
615
 * Case 2:   z             x        Case 2b:   z                  x
616
 *          # #          ## ##                # #               ## ##
617
 *         y   D        y     z              A   y             z     y
618
 *        # #     =>   # #   # #                # #     =>    # #   # #
619
 *       A   x        A   B C   D              x   D         A   B C   D
620
 *          # #                               # #
621
 *         B   C                             B   C
622
 *
623
 * Case 3:   y          x           Case 3b:  y                  x
624
 *          # #        # #                   # #                # #
625
 *         x   C  =>  A   y                 A   x       =>     y   C
626
 *        # #            # #                   # #            # #
627
 *       A   B          B   C                 B   C          A   B
628
 */
629
static void
630
splay_move_to_root(clump_t *x, gs_ref_memory_t *mem)
631
112M
{
632
112M
    clump_t *y, *z;
633
634
112M
    if (x == NULL)
635
0
        return;
636
637
225M
    while ((y = x->parent) != NULL) {
638
113M
        if ((z = y->parent) != NULL) {
639
62.4M
            x->parent = z->parent;
640
62.4M
            if (x->parent) {
641
21.1M
                if (x->parent->left == z)
642
10.9M
                    x->parent->left = x;
643
10.2M
                else
644
10.2M
                    x->parent->right  = x;
645
21.1M
            }
646
62.4M
            y->parent = x;
647
            /* Case 1, 1b, 2 or 2b */
648
62.4M
            if (y->left == x) {
649
                /* Case 1 or 2b */
650
40.4M
                if (z->left == y) {
651
                    /* Case 1 */
652
18.3M
                    y->left = x->right;
653
18.3M
                    if (y->left)
654
13.9M
                        y->left->parent = y;
655
18.3M
                    z->left = y->right;
656
18.3M
                    if (z->left)
657
13.2M
                        z->left->parent = z;
658
18.3M
                    y->right = z;
659
18.3M
                    z->parent = y;
660
22.1M
                } else {
661
                    /* Case 2b */
662
22.1M
                    z->right = x->left;
663
22.1M
                    if (z->right)
664
3.50M
                        z->right->parent = z;
665
22.1M
                    y->left = x->right;
666
22.1M
                    if (y->left)
667
3.96M
                        y->left->parent = y;
668
22.1M
                    x->left = z;
669
22.1M
                    z->parent = x;
670
22.1M
                }
671
40.4M
                x->right      = y;
672
40.4M
            } else {
673
                /* Case 2 or 1b */
674
22.0M
                if (z->left == y) {
675
                    /* Case 2 */
676
5.87M
                    y->right = x->left;
677
5.87M
                    if (y->right)
678
3.85M
                        y->right->parent = y;
679
5.87M
                    z->left = x->right;
680
5.87M
                    if (z->left)
681
3.66M
                        z->left->parent = z;
682
5.87M
                    x->right = z;
683
5.87M
                    z->parent = x;
684
16.1M
                } else {
685
                    /* Case 1b */
686
16.1M
                    z->right = y->left;
687
16.1M
                    if (z->right)
688
12.2M
                        z->right->parent = z;
689
16.1M
                    y->right = x->left;
690
16.1M
                    if (y->right)
691
12.0M
                        y->right->parent = y;
692
16.1M
                    y->left = z;
693
16.1M
                    z->parent = y;
694
16.1M
                }
695
22.0M
                x->left = y;
696
22.0M
            }
697
62.4M
        } else {
698
            /* Case 3 or 3b */
699
50.9M
            x->parent = NULL;
700
50.9M
            y->parent = x;
701
50.9M
            if (y->left == x) {
702
                /* Case 3 */
703
24.8M
                y->left = x->right;
704
24.8M
                if (y->left)
705
18.5M
                    y->left->parent = y;
706
24.8M
                x->right = y;
707
26.1M
            } else {
708
                /* Case 3b */
709
26.1M
                y->right = x->left;
710
26.1M
                if (y->right)
711
19.5M
                    y->right->parent = y;
712
26.1M
                x->left = y;
713
26.1M
            }
714
50.9M
        }
715
113M
    }
716
112M
    mem->root = x;
717
112M
}
718
719
static void
720
splay_insert(clump_t *cp, gs_ref_memory_t *mem)
721
23.4M
{
722
23.4M
    clump_t *node = NULL;
723
23.4M
    clump_t **root = &mem->root;
724
725
74.0M
    while (*root) {
726
50.5M
        node = *root;
727
50.5M
        if (PTR_LT(cp->cbase, node->cbase)) {
728
26.0M
            root = &node->left;
729
26.0M
        } else {
730
24.5M
            root = &node->right;
731
24.5M
        }
732
50.5M
    }
733
23.4M
    *root = cp;
734
23.4M
    cp->left = NULL;
735
23.4M
    cp->right = NULL;
736
23.4M
    cp->parent = node;
737
23.4M
    splay_move_to_root(cp, mem);
738
23.4M
}
739
740
/*
741
 * Allocate and mostly initialize the state of an allocator (system, global,
742
 * or local).  Does not initialize global or space.
743
 */
744
static void *ialloc_solo(gs_memory_t *, gs_memory_type_ptr_t,
745
                          clump_t **);
746
gs_ref_memory_t *
747
ialloc_alloc_state(gs_memory_t * parent, uint clump_size)
748
47.8k
{
749
47.8k
    clump_t *cp;
750
47.8k
    gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp);
751
752
47.8k
    if (iimem == 0)
753
0
        return 0;
754
47.8k
    iimem->stable_memory = (gs_memory_t *)iimem;
755
47.8k
    iimem->procs = gs_ref_memory_procs;
756
47.8k
    iimem->gs_lib_ctx = parent->gs_lib_ctx;
757
47.8k
    iimem->non_gc_memory = parent;
758
47.8k
    iimem->thread_safe_memory = parent->thread_safe_memory;
759
47.8k
    iimem->clump_size = clump_size;
760
#if defined(MEMENTO) || defined(SINGLE_OBJECT_MEMORY_BLOCKS_ONLY)
761
    iimem->large_size = 1;
762
#else
763
47.8k
    iimem->large_size = ((clump_size / 4) & -obj_align_mod) + 1;
764
47.8k
#endif
765
47.8k
    iimem->is_controlled = false;
766
47.8k
    iimem->gc_status.vm_threshold = clump_size * 3L;
767
47.8k
    iimem->gc_status.max_vm = MAX_MAX_VM;
768
47.8k
    iimem->gc_status.signal_value = 0;
769
47.8k
    iimem->gc_status.enabled = false;
770
47.8k
    iimem->gc_status.requested = 0;
771
47.8k
    iimem->gc_allocated = 0;
772
47.8k
    iimem->previous_status.allocated = 0;
773
47.8k
    iimem->previous_status.used = 0;
774
47.8k
    ialloc_reset(iimem);
775
47.8k
    iimem->root = cp;
776
47.8k
    ialloc_set_limit(iimem);
777
47.8k
    iimem->cc = NULL;
778
47.8k
    iimem->save_level = 0;
779
47.8k
    iimem->new_mask = 0;
780
47.8k
    iimem->test_mask = ~0;
781
47.8k
    iimem->streams = 0;
782
47.8k
    iimem->names_array = 0;
783
47.8k
    iimem->roots = 0;
784
47.8k
    iimem->num_contexts = 0;
785
47.8k
    iimem->saved = 0;
786
47.8k
    return iimem;
787
47.8k
}
788
789
/* Allocate a 'solo' object with its own clump. */
790
static void *
791
ialloc_solo(gs_memory_t * parent, gs_memory_type_ptr_t pstype,
792
            clump_t ** pcp)
793
47.8k
{ /*
794
         * We can't assume that the parent uses the same object header
795
         * that we do, but the GC requires that allocators have
796
         * such a header.  Therefore, we prepend one explicitly.
797
         */
798
47.8k
    clump_t *cp =
799
47.8k
        gs_raw_alloc_struct_immovable(parent, &st_clump,
800
47.8k
                                      "ialloc_solo(clump)");
801
47.8k
    uint csize =
802
47.8k
        ROUND_UP(sizeof(clump_head_t) + sizeof(obj_header_t) +
803
47.8k
                 pstype->ssize,
804
47.8k
                 obj_align_mod);
805
47.8k
    byte *cdata = gs_alloc_bytes_immovable(parent, csize, "ialloc_solo");
806
47.8k
    obj_header_t *obj = (obj_header_t *) (cdata + sizeof(clump_head_t));
807
808
47.8k
    if (cp == 0 || cdata == 0) {
809
0
        gs_free_object(parent, cp, "ialloc_solo(allocation failure)");
810
0
        gs_free_object(parent, cdata, "ialloc_solo(allocation failure)");
811
0
        return 0;
812
0
    }
813
47.8k
    alloc_init_clump(cp, cdata, cdata + csize, false, (clump_t *) NULL);
814
47.8k
    cp->cbot = cp->ctop;
815
47.8k
    cp->parent = cp->left = cp->right = 0;
816
47.8k
    cp->c_alone = true;
817
    /* Construct the object header "by hand". */
818
47.8k
    obj->o_pad = 0;
819
47.8k
    obj->o_alone = 1;
820
47.8k
    obj->o_size = pstype->ssize;
821
47.8k
    obj->o_type = pstype;
822
47.8k
    *pcp = cp;
823
47.8k
    return (void *)(obj + 1);
824
47.8k
}
825
826
void
827
ialloc_free_state(gs_ref_memory_t *iimem)
828
0
{
829
0
    clump_t *cp;
830
0
    gs_memory_t *mem;
831
0
    if (iimem == NULL)
832
0
        return;
833
0
    cp = iimem->root;
834
0
    mem = iimem->non_gc_memory;
835
0
    if (cp == NULL)
836
0
        return;
837
0
    gs_free_object(mem, cp->chead, "ialloc_solo(allocation failure)");
838
0
    gs_free_object(mem, cp, "ialloc_solo(allocation failure)");
839
0
}
840
841
/*
842
 * Add a clump to an externally controlled allocator.  Such allocators
843
 * allocate all objects as immovable, are not garbage-collected, and
844
 * don't attempt to acquire additional memory on their own.
845
 */
846
int
847
ialloc_add_clump(gs_ref_memory_t *imem, ulong space, client_name_t cname)
848
0
{
849
0
    clump_t *cp;
850
851
    /* Allow acquisition of this clump. */
852
0
    imem->is_controlled = false;
853
0
    imem->large_size = imem->clump_size;
854
0
    imem->limit = imem->gc_status.max_vm = MAX_MAX_VM;
855
856
    /* Acquire the clump. */
857
0
    cp = alloc_add_clump(imem, space, cname);
858
859
    /*
860
     * Make all allocations immovable.  Since the "movable" allocators
861
     * allocate within existing clumps, whereas the "immovable" ones
862
     * allocate in new clumps, we equate the latter to the former, even
863
     * though this seems backwards.
864
     */
865
0
    imem->procs.alloc_bytes_immovable = imem->procs.alloc_bytes;
866
0
    imem->procs.alloc_struct_immovable = imem->procs.alloc_struct;
867
0
    imem->procs.alloc_byte_array_immovable = imem->procs.alloc_byte_array;
868
0
    imem->procs.alloc_struct_array_immovable = imem->procs.alloc_struct_array;
869
0
    imem->procs.alloc_string_immovable = imem->procs.alloc_string;
870
871
    /* Disable acquisition of additional clumps. */
872
0
    imem->is_controlled = true;
873
0
    imem->limit = 0;
874
875
0
    return (cp ? 0 : gs_note_error(gs_error_VMerror));
876
0
}
877
878
/* Prepare for a GC by clearing the stream list. */
879
/* This probably belongs somewhere else.... */
880
void
881
ialloc_gc_prepare(gs_ref_memory_t * mem)
882
482k
{ /*
883
         * We have to unlink every stream from its neighbors,
884
         * so that referenced streams don't keep all streams around.
885
         */
886
482k
    while (mem->streams != 0) {
887
0
        stream *s = mem->streams;
888
889
0
        mem->streams = s->next;
890
0
        s->prev = s->next = 0;
891
0
    }
892
482k
}
893
894
/* Initialize after a save. */
895
void
896
ialloc_reset(gs_ref_memory_t * mem)
897
153k
{
898
153k
    mem->root = 0;
899
153k
    mem->cc = NULL;
900
153k
    mem->allocated = 0;
901
153k
    mem->changes = 0;
902
153k
    mem->scan_limit = 0;
903
153k
    mem->total_scanned = 0;
904
153k
    mem->total_scanned_after_compacting = 0;
905
153k
    ialloc_reset_free(mem);
906
153k
}
907
908
/* Initialize after a save or GC. */
909
void
910
ialloc_reset_free(gs_ref_memory_t * mem)
911
635k
{
912
635k
    int i;
913
635k
    obj_header_t **p;
914
915
635k
    mem->lost.objects = 0;
916
635k
    mem->lost.refs = 0;
917
635k
    mem->lost.strings = 0;
918
635k
    mem->cfreed.cp = 0;
919
65.4M
    for (i = 0, p = &mem->freelists[0]; i < num_freelists; i++, p++)
920
64.8M
        *p = 0;
921
635k
    mem->largest_free_size = 0;
922
635k
}
923
924
/*
925
 * Set an arbitrary limit so that the amount of allocated VM does not grow
926
 * indefinitely even when GC is disabled.  Benchmarks have shown that
927
 * the resulting GC's are infrequent enough not to degrade performance
928
 * significantly.
929
 */
930
#define FORCE_GC_LIMIT 8000000
931
932
/* Set the allocation limit after a change in one or more of */
933
/* vm_threshold, max_vm, or enabled, or after a GC. */
934
void
935
ialloc_set_limit(register gs_ref_memory_t * mem)
936
72.0M
{ /*
937
         * The following code is intended to set the limit so that
938
         * we stop allocating when allocated + previous_status.allocated
939
         * exceeds the lesser of max_vm or (if GC is enabled)
940
         * gc_allocated + vm_threshold.
941
         */
942
72.0M
    size_t max_allocated =
943
72.0M
    (mem->gc_status.max_vm > mem->previous_status.allocated ?
944
72.0M
     mem->gc_status.max_vm - mem->previous_status.allocated :
945
72.0M
     0);
946
947
72.0M
    if (mem->gc_status.enabled) {
948
4.91M
        size_t limit = mem->gc_allocated + mem->gc_status.vm_threshold;
949
950
4.91M
        if (limit < mem->previous_status.allocated)
951
416
            mem->limit = 0;
952
4.91M
        else {
953
4.91M
            limit -= mem->previous_status.allocated;
954
4.91M
            mem->limit = min(limit, max_allocated);
955
4.91M
        }
956
4.91M
    } else
957
67.1M
        mem->limit = min(max_allocated, mem->gc_allocated + FORCE_GC_LIMIT);
958
72.0M
    if_debug7m('0', (const gs_memory_t *)mem,
959
72.0M
               "[0]space=%d, max_vm=%"PRIdSIZE", prev.alloc=%"PRIdSIZE", enabled=%d, "
960
72.0M
               "gc_alloc=%"PRIdSIZE", threshold=%"PRIdSIZE" => limit=%"PRIdSIZE"\n",
961
72.0M
               mem->space, mem->gc_status.max_vm,
962
72.0M
               mem->previous_status.allocated,
963
72.0M
               mem->gc_status.enabled, mem->gc_allocated,
964
72.0M
               mem->gc_status.vm_threshold, mem->limit);
965
72.0M
}
966
967
struct free_data
968
{
969
    gs_ref_memory_t *imem;
970
    clump_t         *allocator;
971
};
972
973
static splay_app_result_t
974
free_all_not_allocator(clump_t *cp, void *arg)
975
2.00M
{
976
2.00M
    struct free_data *fd = (struct free_data *)arg;
977
978
2.00M
    if (cp->cbase + sizeof(obj_header_t) != (byte *)fd->imem)
979
1.90M
        alloc_free_clump(cp, fd->imem);
980
95.7k
    else
981
95.7k
        fd->allocator = cp;
982
983
2.00M
    return SPLAY_APP_CONTINUE;
984
2.00M
}
985
986
static splay_app_result_t
987
free_all_allocator(clump_t *cp, void *arg)
988
47.8k
{
989
47.8k
    struct free_data *fd = (struct free_data *)arg;
990
991
47.8k
    if (cp->cbase + sizeof(obj_header_t) != (byte *)fd->imem)
992
0
        return SPLAY_APP_CONTINUE;
993
994
47.8k
    fd->allocator = cp;
995
47.8k
    alloc_free_clump(cp, fd->imem);
996
47.8k
    return SPLAY_APP_STOP;
997
47.8k
}
998
999
/*
1000
 * Free all the memory owned by the allocator, except the allocator itself.
1001
 * Note that this only frees memory at the current save level: the client
1002
 * is responsible for restoring to the outermost level if desired.
1003
 */
1004
static void
1005
i_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
1006
201k
{
1007
201k
    gs_ref_memory_t * imem = (gs_ref_memory_t *)mem;
1008
201k
    struct free_data fd;
1009
1010
201k
    fd.imem = imem;
1011
201k
    fd.allocator = NULL;
1012
1013
201k
    if (free_mask & FREE_ALL_DATA && imem->root != NULL) {
1014
        /* Free every clump except the allocator */
1015
201k
        clump_splay_app(imem->root, imem, free_all_not_allocator, &fd);
1016
1017
        /* Reinstate the allocator as the sole clump */
1018
201k
        imem->root = fd.allocator;
1019
201k
        if (fd.allocator)
1020
95.7k
            fd.allocator->parent = fd.allocator->left = fd.allocator->right = NULL;
1021
201k
    }
1022
201k
    if (free_mask & FREE_ALL_ALLOCATOR) {
1023
        /* Walk the tree to find the allocator. */
1024
47.8k
        clump_splay_app(imem->root, imem, free_all_allocator, &fd);
1025
47.8k
    }
1026
201k
}
1027
1028
/* ================ Accessors ================ */
1029
1030
/* Get the size of an object from the header. */
1031
static size_t
1032
i_object_size(gs_memory_t * mem, const void /*obj_header_t */ *obj)
1033
2.70M
{
1034
2.70M
    return pre_obj_contents_size((const obj_header_t *)obj - 1);
1035
2.70M
}
1036
1037
/* Get the type of a structure from the header. */
1038
static gs_memory_type_ptr_t
1039
i_object_type(const gs_memory_t * mem, const void /*obj_header_t */ *obj)
1040
206k
{
1041
206k
    return ((const obj_header_t *)obj - 1)->o_type;
1042
206k
}
1043
1044
/* Get the GC status of a memory. */
1045
void
1046
gs_memory_gc_status(const gs_ref_memory_t * mem, gs_memory_gc_status_t * pstat)
1047
72.1M
{
1048
72.1M
    *pstat = mem->gc_status;
1049
72.1M
}
1050
1051
/* Set the GC status of a memory. */
1052
void
1053
gs_memory_set_gc_status(gs_ref_memory_t * mem, const gs_memory_gc_status_t * pstat)
1054
71.9M
{
1055
71.9M
    mem->gc_status = *pstat;
1056
71.9M
    ialloc_set_limit(mem);
1057
71.9M
}
1058
1059
/* Set VM threshold. Value passed as int64_t since it is signed */
1060
void
1061
gs_memory_set_vm_threshold(gs_ref_memory_t * mem, int64_t val)
1062
158k
{
1063
158k
    gs_memory_gc_status_t stat;
1064
158k
    gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
1065
1066
158k
    if (val < MIN_VM_THRESHOLD)
1067
0
        val = MIN_VM_THRESHOLD;
1068
158k
    else if (val > MAX_VM_THRESHOLD)
1069
0
        val = MAX_VM_THRESHOLD;
1070
158k
    gs_memory_gc_status(mem, &stat);
1071
158k
    stat.vm_threshold = val;
1072
158k
    gs_memory_set_gc_status(mem, &stat);
1073
158k
    gs_memory_gc_status(stable, &stat);
1074
158k
    stat.vm_threshold = val;
1075
158k
    gs_memory_set_gc_status(stable, &stat);
1076
158k
}
1077
1078
/* Set VM reclaim. */
1079
void
1080
gs_memory_set_vm_reclaim(gs_ref_memory_t * mem, bool enabled)
1081
158k
{
1082
158k
    gs_memory_gc_status_t stat;
1083
158k
    gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
1084
1085
158k
    gs_memory_gc_status(mem, &stat);
1086
158k
    stat.enabled = enabled;
1087
158k
    gs_memory_set_gc_status(mem, &stat);
1088
158k
    gs_memory_gc_status(stable, &stat);
1089
158k
    stat.enabled = enabled;
1090
158k
    gs_memory_set_gc_status(stable, &stat);
1091
158k
}
1092
1093
/* ================ Objects ================ */
1094
1095
/* Allocate a small object quickly if possible. */
1096
/* The size must be substantially less than max_uint. */
1097
/* ptr must be declared as obj_header_t *. */
1098
/* pfl must be declared as obj_header_t **. */
1099
#define IF_FREELIST_ALLOC(ptr, imem, size, pstype, pfl)\
1100
218M
        if ( size <= max_freelist_size &&\
1101
218M
             *(pfl = &imem->freelists[(size + obj_align_mask) >> log2_obj_align_mod]) != 0\
1102
218M
           )\
1103
218M
        { ptr = *pfl;\
1104
107M
                *pfl = *(obj_header_t **)ptr;\
1105
107M
                ptr[-1].o_size = (obj_size_t)size;\
1106
107M
                ptr[-1].o_type = pstype;\
1107
107M
                /* If debugging, clear the block in an attempt to */\
1108
107M
                /* track down uninitialized data errors. */\
1109
218M
                gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
1110
#define ELSEIF_BIG_FREELIST_ALLOC(ptr, imem, size, pstype)\
1111
107M
        }\
1112
218M
        else if (size > max_freelist_size &&\
1113
110M
                 (ptr = large_freelist_alloc(imem, size)) != 0)\
1114
110M
        { ptr[-1].o_type = pstype;\
1115
7.77M
                /* If debugging, clear the block in an attempt to */\
1116
7.77M
                /* track down uninitialized data errors. */\
1117
218M
                gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
1118
#define ELSEIF_LIFO_ALLOC(ptr, imem, size, pstype)\
1119
7.77M
        }\
1120
110M
        else if ( imem->cc && !imem->cc->c_alone && \
1121
102M
                (imem->cc->ctop - (byte *)(ptr = (obj_header_t *)imem->cc->cbot))\
1122
102M
                >= size + (obj_align_mod + sizeof(obj_header_t) * 2) &&\
1123
102M
             size < imem->large_size\
1124
102M
           )\
1125
102M
        { imem->cc->cbot = (byte *)ptr + obj_size_round(size);\
1126
100M
                ptr->o_pad = 0;\
1127
100M
                ptr->o_alone = 0;\
1128
100M
                ptr->o_size = (obj_size_t)size;\
1129
100M
                ptr->o_type = pstype;\
1130
100M
                ptr++;\
1131
100M
                /* If debugging, clear the block in an attempt to */\
1132
100M
                /* track down uninitialized data errors. */\
1133
110M
                gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
1134
#define ELSE_ALLOC\
1135
100M
        }\
1136
        else
1137
1138
static byte *
1139
i_alloc_bytes(gs_memory_t * mem, size_t ssize, client_name_t cname)
1140
14.8M
{
1141
14.8M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1142
14.8M
    obj_header_t *obj;
1143
14.8M
    obj_header_t **pfl;
1144
14.8M
    obj_size_t size = (obj_size_t)ssize;
1145
1146
#ifdef MEMENTO
1147
    if (Memento_failThisEvent())
1148
        return NULL;
1149
#endif
1150
1151
14.8M
    if ((size_t)size != ssize)
1152
0
        return NULL;
1153
1154
14.8M
    IF_FREELIST_ALLOC(obj, imem, size, &st_bytes, pfl)
1155
2.28M
        alloc_trace(":+bf", imem, cname, NULL, size, obj);
1156
2.28M
    ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, &st_bytes)
1157
455k
        alloc_trace(":+bF", imem, cname, NULL, size, obj);
1158
10.7M
    ELSEIF_LIFO_ALLOC(obj, imem, (uint)size, &st_bytes)
1159
10.7M
        alloc_trace(":+b ", imem, cname, NULL, size, obj);
1160
10.7M
    ELSE_ALLOC
1161
1.44M
    {
1162
1.44M
        obj = alloc_obj(imem, size, &st_bytes, 0, cname);
1163
1.44M
        if (obj == 0)
1164
0
            return 0;
1165
1.44M
        alloc_trace(":+b.", imem, cname, NULL, size, obj);
1166
1.44M
    }
1167
#if IGC_PTR_STABILITY_CHECK
1168
        obj[-1].d.o.space_id = imem->space_id;
1169
#endif
1170
14.8M
    return (byte *)Memento_label(obj, cname);
1171
14.8M
}
1172
static byte *
1173
i_alloc_bytes_immovable(gs_memory_t * mem, size_t ssize, client_name_t cname)
1174
430k
{
1175
430k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1176
430k
    obj_header_t *obj;
1177
430k
    obj_size_t size = (obj_size_t)ssize;
1178
1179
#ifdef MEMENTO
1180
    if (Memento_failThisEvent())
1181
        return NULL;
1182
#endif
1183
1184
430k
    if ((size_t)size != ssize)
1185
0
        return NULL;
1186
1187
430k
    obj = alloc_obj(imem, size, &st_bytes,
1188
430k
                    ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
1189
430k
    if (obj == 0)
1190
0
        return 0;
1191
430k
    alloc_trace("|+b.", imem, cname, NULL, size, obj);
1192
430k
    return (byte *)Memento_label(obj, cname);
1193
430k
}
1194
static void *
1195
i_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
1196
               client_name_t cname)
1197
203M
{
1198
203M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1199
203M
    obj_size_t size = pstype->ssize;
1200
203M
    obj_header_t *obj;
1201
203M
    obj_header_t **pfl;
1202
1203
#ifdef MEMENTO
1204
    if (Memento_failThisEvent())
1205
        return NULL;
1206
#endif
1207
1208
203M
    ALLOC_CHECK_SIZE(mem,pstype);
1209
203M
    IF_FREELIST_ALLOC(obj, imem, size, pstype, pfl)
1210
105M
        alloc_trace(":+<f", imem, cname, pstype, size, obj);
1211
105M
    ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, pstype)
1212
7.32M
        alloc_trace(":+<F", imem, cname, pstype, size, obj);
1213
90.0M
    ELSEIF_LIFO_ALLOC(obj, imem, size, pstype)
1214
90.0M
        alloc_trace(":+< ", imem, cname, pstype, size, obj);
1215
90.0M
    ELSE_ALLOC
1216
718k
    {
1217
718k
        obj = alloc_obj(imem, size, pstype, 0, cname);
1218
718k
        if (obj == 0)
1219
0
            return 0;
1220
718k
        alloc_trace(":+<.", imem, cname, pstype, size, obj);
1221
718k
    }
1222
#if IGC_PTR_STABILITY_CHECK
1223
        obj[-1].d.o.space_id = imem->space_id;
1224
#endif
1225
203M
    return Memento_label(obj, cname);
1226
203M
}
1227
static void *
1228
i_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
1229
                         client_name_t cname)
1230
19.0M
{
1231
19.0M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1232
19.0M
    obj_size_t size = pstype->ssize;
1233
19.0M
    obj_header_t *obj;
1234
1235
#ifdef MEMENTO
1236
    if (Memento_failThisEvent())
1237
        return NULL;
1238
#endif
1239
1240
19.0M
    ALLOC_CHECK_SIZE(mem,pstype);
1241
19.0M
    obj = alloc_obj(imem, size, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
1242
19.0M
    alloc_trace("|+<.", imem, cname, pstype, size, obj);
1243
19.0M
    return Memento_label(obj, cname);
1244
19.0M
}
1245
1246
static inline bool
1247
alloc_array_check_size(size_t num_elements, size_t elt_size, size_t *lsize)
1248
15.9M
{
1249
15.9M
    int shift0, shift1;
1250
15.9M
    size_t m, n;
1251
1252
    /* Avoid the loops in the overwhelming number of cases. */
1253
15.9M
    if ((num_elements | elt_size) >= 65536) {
1254
        /* Slightly conservative, but it'll work for our purposes. */
1255
        /* m is the maximum unsigned value representable in shift0 bits */
1256
42.7k
        for (m=0, shift0 = 0; m < num_elements; m = (m<<1)+1, shift0++);
1257
        /* n is the maximum unsigned value representable in shift1 bits */
1258
12.4k
        for (n=0, shift1 = 0; n < elt_size; n = (n<<1)+1, shift1++);
1259
        /* An shift0 bit unsigned number multiplied by an shift1 bit
1260
         * unsigned number is guaranteed to fit in n+m-1 bits. */
1261
2.07k
        if (shift0+shift1-1 > 8*sizeof(size_t))
1262
0
            return false; /* Overflow */
1263
2.07k
    }
1264
1265
15.9M
    *lsize = num_elements * elt_size;
1266
15.9M
    return true;
1267
15.9M
}
1268
1269
static byte *
1270
i_alloc_byte_array(gs_memory_t * mem, size_t num_elements, size_t elt_size,
1271
                   client_name_t cname)
1272
2.27M
{
1273
2.27M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1274
2.27M
    obj_header_t *obj;
1275
2.27M
    size_t slsize;
1276
2.27M
    obj_size_t lsize;
1277
#ifdef MEMENTO
1278
    if (Memento_failThisEvent())
1279
        return NULL;
1280
#endif
1281
2.27M
    if (alloc_array_check_size(num_elements, elt_size, &slsize) == false)
1282
0
        return NULL;
1283
2.27M
    lsize = (obj_size_t)slsize;
1284
2.27M
    if ((size_t)lsize != slsize)
1285
0
        return NULL;
1286
2.27M
    obj = alloc_obj(imem, lsize,
1287
2.27M
                    &st_bytes, ALLOC_DIRECT, cname);
1288
1289
2.27M
    if_debug6m('A', mem, "[a%d:+b.]%s -bytes-*(%"PRIuSIZE"=%"PRIuSIZE"*%"PRIuSIZE") = "PRI_INTPTR"\n",
1290
2.27M
               alloc_trace_space(imem), client_name_string(cname),
1291
2.27M
               num_elements * elt_size,
1292
2.27M
               num_elements, elt_size, (intptr_t)obj);
1293
2.27M
    return (byte *)Memento_label(obj, cname);
1294
2.27M
}
1295
static byte *
1296
i_alloc_byte_array_immovable(gs_memory_t * mem, size_t num_elements,
1297
                             size_t elt_size, client_name_t cname)
1298
0
{
1299
0
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1300
0
    obj_header_t *obj;
1301
0
    size_t slsize;
1302
0
    obj_size_t lsize;
1303
#ifdef MEMENTO
1304
    if (Memento_failThisEvent())
1305
        return NULL;
1306
#endif
1307
0
    if (alloc_array_check_size(num_elements, elt_size, &slsize) == false)
1308
0
        return NULL;
1309
0
    lsize = (obj_size_t)slsize;
1310
0
    if ((size_t)lsize != slsize)
1311
0
        return NULL;
1312
0
    obj = alloc_obj(imem, lsize,
1313
0
                    &st_bytes, ALLOC_IMMOVABLE | ALLOC_DIRECT,
1314
0
                    cname);
1315
1316
0
    if_debug6m('A', mem, "[a%d|+b.]%s -bytes-*(%"PRIuSIZE"=%"PRIuSIZE"*%"PRIuSIZE") = "PRI_INTPTR"\n",
1317
0
               alloc_trace_space(imem), client_name_string(cname),
1318
0
               num_elements * elt_size,
1319
0
               num_elements, elt_size, (intptr_t)obj);
1320
0
    return (byte *)Memento_label(obj, cname);
1321
0
}
1322
static void *
1323
i_alloc_struct_array(gs_memory_t * mem, size_t num_elements,
1324
                     gs_memory_type_ptr_t pstype, client_name_t cname)
1325
13.6M
{
1326
13.6M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1327
13.6M
    obj_header_t *obj;
1328
13.6M
    size_t slsize;
1329
13.6M
    obj_size_t lsize;
1330
#ifdef MEMENTO
1331
    if (Memento_failThisEvent())
1332
        return NULL;
1333
#endif
1334
1335
13.6M
    ALLOC_CHECK_SIZE(mem,pstype);
1336
#ifdef DEBUG
1337
    if (pstype->enum_ptrs == basic_enum_ptrs) {
1338
        dmprintf2(mem, "  i_alloc_struct_array: called with incorrect structure type (not element), struct='%s', client='%s'\n",
1339
                pstype->sname, cname);
1340
        return NULL;    /* fail */
1341
    }
1342
#endif
1343
13.6M
    if (alloc_array_check_size(num_elements, pstype->ssize, &slsize) == false)
1344
0
        return NULL;
1345
13.6M
    lsize = (obj_size_t)slsize;
1346
13.6M
    if ((size_t)lsize != slsize)
1347
5
        return NULL;
1348
13.6M
    obj = alloc_obj(imem, lsize, pstype, ALLOC_DIRECT, cname);
1349
13.6M
    if_debug7m('A', mem, "[a%d:+<.]%s %s*(%"PRIuSIZE"=%"PRIuSIZE"*%u) = "PRI_INTPTR"\n",
1350
13.6M
               alloc_trace_space(imem), client_name_string(cname),
1351
13.6M
               struct_type_name_string(pstype),
1352
13.6M
               num_elements * pstype->ssize,
1353
13.6M
               num_elements, pstype->ssize, (intptr_t)obj);
1354
13.6M
    return (char *)Memento_label(obj, cname);
1355
13.6M
}
1356
static void *
1357
i_alloc_struct_array_immovable(gs_memory_t * mem, size_t num_elements,
1358
                               gs_memory_type_ptr_t pstype, client_name_t cname)
1359
9.57k
{
1360
9.57k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1361
9.57k
    obj_header_t *obj;
1362
9.57k
    size_t slsize;
1363
9.57k
    obj_size_t lsize;
1364
#ifdef MEMENTO
1365
    if (Memento_failThisEvent())
1366
        return NULL;
1367
#endif
1368
1369
9.57k
    ALLOC_CHECK_SIZE(mem,pstype);
1370
9.57k
    if (alloc_array_check_size(num_elements, pstype->ssize, &slsize) == false)
1371
0
        return NULL;
1372
9.57k
    lsize = (obj_size_t)slsize;
1373
9.57k
    if ((size_t)lsize != slsize)
1374
0
        return NULL;
1375
9.57k
    obj = alloc_obj(imem, lsize, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
1376
9.57k
    if_debug7m('A', mem, "[a%d|+<.]%s %s*(%"PRIuSIZE"=%"PRIuSIZE"*%u) = "PRI_INTPTR"\n",
1377
9.57k
               alloc_trace_space(imem), client_name_string(cname),
1378
9.57k
               struct_type_name_string(pstype),
1379
9.57k
               num_elements * pstype->ssize,
1380
9.57k
               num_elements, pstype->ssize, (intptr_t)obj);
1381
9.57k
    return (char *)Memento_label(obj, cname);
1382
9.57k
}
1383
static void *
1384
i_resize_object(gs_memory_t * mem, void *obj, size_t new_num_elements,
1385
                client_name_t cname)
1386
19.1k
{
1387
19.1k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1388
19.1k
    obj_header_t *pp = (obj_header_t *) obj - 1;
1389
19.1k
    gs_memory_type_ptr_t pstype = pp->o_type;
1390
19.1k
    size_t old_size = pre_obj_contents_size(pp);
1391
19.1k
    size_t new_size = pstype->ssize * new_num_elements;
1392
19.1k
    size_t old_size_rounded = obj_align_round(old_size);
1393
19.1k
    size_t new_size_rounded = obj_align_round(new_size);
1394
19.1k
    void *new_obj = NULL;
1395
1396
#ifdef MEMENTO
1397
    if (Memento_failThisEvent())
1398
        return NULL;
1399
#endif
1400
1401
19.1k
    if (new_size_rounded != (obj_size_t)new_size_rounded)
1402
0
        return NULL;
1403
1404
19.1k
    if (old_size_rounded == new_size_rounded) {
1405
19.1k
        pp->o_size = (obj_size_t)new_size;
1406
19.1k
        new_obj = obj;
1407
19.1k
    } else
1408
0
        if (imem->cc && (byte *)obj + old_size_rounded == imem->cc->cbot &&
1409
0
            imem->cc->ctop - (byte *)obj >= new_size_rounded ) {
1410
0
            imem->cc->cbot = (byte *)obj + new_size_rounded;
1411
0
            pp->o_size = (obj_size_t)new_size;
1412
0
            new_obj = obj;
1413
0
        } else /* try and trim the object -- but only if room for a dummy header */
1414
0
            if (new_size_rounded + sizeof(obj_header_t) <= old_size_rounded) {
1415
0
                trim_obj(imem, obj, (obj_size_t)new_size, (clump_t *)0);
1416
0
                new_obj = obj;
1417
0
            }
1418
19.1k
    if (new_obj) {
1419
19.1k
        if_debug8m('A', mem, "[a%d:%c%c ]%s %s(%"PRIuSIZE"=>%"PRIuSIZE") "PRI_INTPTR"\n",
1420
19.1k
                   alloc_trace_space(imem),
1421
19.1k
                   (new_size > old_size ? '>' : '<'),
1422
19.1k
                   (pstype == &st_bytes ? 'b' : '<'),
1423
19.1k
                   client_name_string(cname),
1424
19.1k
                   struct_type_name_string(pstype),
1425
19.1k
                   old_size, new_size, (intptr_t)obj);
1426
19.1k
        return Memento_label(new_obj, cname);
1427
19.1k
    }
1428
    /* Punt. */
1429
0
    new_obj = gs_alloc_struct_array(mem, new_num_elements, void,
1430
0
                                    pstype, cname);
1431
0
    if (new_obj == 0)
1432
0
        return 0;
1433
0
    memcpy(new_obj, obj, min(old_size, new_size));
1434
0
    gs_free_object(mem, obj, cname);
1435
0
    return Memento_label(new_obj, cname);
1436
0
}
1437
static void
1438
i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
1439
257M
{
1440
257M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1441
257M
    obj_header_t *pp;
1442
257M
    gs_memory_type_ptr_t pstype;
1443
257M
    gs_memory_struct_type_t saved_stype;
1444
1445
257M
    struct_proc_finalize((*finalize));
1446
257M
    size_t size, rounded_size;
1447
1448
257M
    if (ptr == 0)
1449
27.8M
        return;
1450
229M
    pp = (obj_header_t *) ptr - 1;
1451
229M
    pstype = pp->o_type;
1452
#ifdef DEBUG
1453
    if (gs_debug_c('?')) {
1454
        clump_locator_t cld;
1455
1456
        if (pstype == &st_free) {
1457
            mlprintf2(mem, "%s: object "PRI_INTPTR" already free!\n",
1458
                      client_name_string(cname), (intptr_t)ptr);
1459
            return;   /*gs_abort(); */
1460
        }
1461
        /* Check that this allocator owns the object being freed. */
1462
        cld.memory = imem;
1463
        while ((cld.cp = cld.memory->root),
1464
               !clump_locate_ptr(ptr, &cld)
1465
            ) {
1466
            if (!cld.memory->saved) {
1467
                mlprintf3(mem, "%s: freeing "PRI_INTPTR", not owned by memory "PRI_INTPTR"!\n",
1468
                          client_name_string(cname), (intptr_t)ptr,
1469
                          (intptr_t)mem);
1470
                return;   /*gs_abort(); */
1471
            }
1472
                  /****** HACK: we know the saved state is the first ******
1473
                   ****** member of an alloc_save_t. ******/
1474
            cld.memory = (gs_ref_memory_t *) cld.memory->saved;
1475
        }
1476
        /* Check that the object is in the allocated region. */
1477
        if (!(PTR_BETWEEN((const byte *)pp, cld.cp->cbase,
1478
                          cld.cp->cbot))
1479
            ) {
1480
            mlprintf5(mem, "%s: freeing "PRI_INTPTR",\n\toutside clump "PRI_INTPTR" cbase="PRI_INTPTR", cbot="PRI_INTPTR"!\n",
1481
                      client_name_string(cname), (intptr_t) ptr,
1482
                      (intptr_t) cld.cp, (intptr_t) cld.cp->cbase,
1483
                      (intptr_t) cld.cp->cbot);
1484
            return;   /*gs_abort(); */
1485
        }
1486
    }
1487
#endif
1488
229M
    size = pre_obj_contents_size(pp);
1489
229M
    rounded_size = obj_align_round(size);
1490
229M
    finalize = pstype->finalize;
1491
229M
    if (finalize != 0) {
1492
1493
        /* unfortunately device finalize procedures will clobber the
1494
           stype which is used for later debugging with "A" debug
1495
           tracing, so we save stype it in a local. */
1496
29.9M
        if (gs_debug['a'] || gs_debug['A'])
1497
0
            saved_stype = *pstype;
1498
1499
29.9M
        if_debug3m('u', mem, "[u]finalizing %s "PRI_INTPTR" (%s)\n",
1500
29.9M
                   struct_type_name_string(pstype),
1501
29.9M
                   (intptr_t)ptr, client_name_string(cname));
1502
29.9M
        (*finalize) (mem, ptr);
1503
1504
29.9M
        if (gs_debug['a'] || gs_debug['A'])
1505
0
            pstype = &saved_stype;
1506
29.9M
    }
1507
229M
    if (imem->cc && (byte *) ptr + rounded_size == imem->cc->cbot) {
1508
76.2M
        alloc_trace(":-o ", imem, cname, pstype, size, ptr);
1509
76.2M
        gs_alloc_fill(ptr, gs_alloc_fill_free, size);
1510
76.2M
        imem->cc->cbot = (byte *) pp;
1511
        /* IFF this object is adjacent to (or below) the byte after the
1512
         * highest free object, do the consolidation within this clump. */
1513
76.2M
        if ((byte *)pp <= imem->cc->int_freed_top) {
1514
7.86M
            consolidate_clump_free(imem->cc, imem);
1515
7.86M
        }
1516
76.2M
        return;
1517
76.2M
    }
1518
153M
    if (pp->o_alone) {
1519
                /*
1520
                 * We gave this object its own clump.  Free the entire clump,
1521
                 * unless it belongs to an older save level, in which case
1522
                 * we mustn't overwrite it.
1523
                 */
1524
21.0M
        clump_locator_t cl;
1525
1526
#ifdef DEBUG
1527
        {
1528
            clump_locator_t cld;
1529
1530
            cld.memory = imem;
1531
            cld.cp = 0;
1532
            if (gs_debug_c('a'))
1533
                alloc_trace(
1534
                            (clump_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"),
1535
                               imem, cname, pstype, size, ptr);
1536
        }
1537
#endif
1538
21.0M
        cl.memory = imem;
1539
21.0M
        cl.cp = 0;
1540
21.0M
        if (clump_locate_ptr(ptr, &cl)) {
1541
21.0M
            if (!imem->is_controlled)
1542
21.0M
                alloc_free_clump(cl.cp, imem);
1543
21.0M
            return;
1544
21.0M
        }
1545
        /* Don't overwrite even if gs_alloc_debug is set. */
1546
21.0M
    }
1547
132M
    if (rounded_size >= sizeof(obj_header_t *)) {
1548
        /*
1549
         * Put the object on a freelist, unless it belongs to
1550
         * an older save level, in which case we mustn't
1551
         * overwrite it.
1552
         */
1553
132M
        imem->cfreed.memory = imem;
1554
132M
        if (clump_locate(ptr, &imem->cfreed)) {
1555
132M
            obj_header_t **pfl;
1556
1557
132M
            if (size > max_freelist_size) {
1558
8.42M
                pfl = &imem->freelists[LARGE_FREELIST_INDEX];
1559
8.42M
                if (rounded_size > imem->largest_free_size)
1560
865k
                    imem->largest_free_size = rounded_size;
1561
124M
            } else {
1562
124M
                pfl = &imem->freelists[(size + obj_align_mask) >>
1563
124M
                                      log2_obj_align_mod];
1564
124M
            }
1565
            /* keep track of highest object on a freelist */
1566
            /* If we're dealing with a block in the currently open clump
1567
               (in imem->cc) update that, otherwise, update the clump in
1568
               the clump list (in imem->cfreed.cp)
1569
             */
1570
132M
            if (imem->cc && imem->cfreed.cp->chead == imem->cc->chead) {
1571
16.5M
                if ((byte *)pp >= imem->cc->int_freed_top) {
1572
4.01M
                    imem->cc->int_freed_top = (byte *)ptr + rounded_size;
1573
4.01M
                }
1574
16.5M
            }
1575
116M
            else {
1576
116M
                if ((byte *)pp >= imem->cfreed.cp->int_freed_top) {
1577
382k
                    imem->cfreed.cp->int_freed_top = (byte *)ptr + rounded_size;
1578
382k
                }
1579
116M
            }
1580
132M
            pp->o_type = &st_free;  /* don't confuse GC */
1581
132M
            o_set_unmarked(pp);
1582
132M
            gs_alloc_fill(ptr, gs_alloc_fill_free, size);
1583
132M
            *(obj_header_t **) ptr = *pfl;
1584
132M
            *pfl = (obj_header_t *) ptr;
1585
132M
            alloc_trace((size > max_freelist_size ? ":-oF" : ":-of"),
1586
132M
                        imem, cname, pstype, size, ptr);
1587
132M
            return;
1588
132M
        }
1589
        /* Don't overwrite even if gs_alloc_debug is set. */
1590
132M
    } else {
1591
350
        pp->o_type = &st_free;  /* don't confuse GC */
1592
350
        gs_alloc_fill(ptr, gs_alloc_fill_free, size);
1593
350
    }
1594
132M
    alloc_trace(":-o#", imem, cname, pstype, size, ptr);
1595
5.28k
    imem->lost.objects += obj_size_round(size);
1596
5.28k
}
1597
static byte *
1598
i_alloc_string(gs_memory_t * mem, size_t nbytes, client_name_t cname)
1599
93.5M
{
1600
93.5M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1601
93.5M
    byte *str;
1602
93.5M
    clump_splay_walker sw;
1603
1604
    /*
1605
     * Cycle through the clumps at the current save level, starting
1606
     * with the currently open one.
1607
     */
1608
93.5M
    clump_t *cp = clump_splay_walk_init_mid(&sw, imem->cc);
1609
1610
#ifdef MEMENTO
1611
    if (Memento_failThisEvent())
1612
        return NULL;
1613
#endif
1614
93.5M
    if (cp == 0) {
1615
        /* Open an arbitrary clump. */
1616
4
        imem->cc = clump_splay_walk_init(&sw, imem);
1617
4
        alloc_open_clump(imem);
1618
4
    }
1619
102M
top:
1620
102M
    if (imem->cc && !imem->cc->c_alone && imem->cc->ctop - imem->cc->cbot > nbytes) {
1621
93.5M
        if_debug4m('A', mem, "[a%d:+> ]%s(%"PRIuSIZE") = "PRI_INTPTR"\n",
1622
93.5M
                   alloc_trace_space(imem), client_name_string(cname), nbytes,
1623
93.5M
                   (intptr_t)(imem->cc->ctop - nbytes));
1624
93.5M
        str = imem->cc->ctop -= nbytes;
1625
93.5M
        gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
1626
93.5M
        return str;
1627
93.5M
    }
1628
    /* Try the next clump. */
1629
9.46M
    cp = clump_splay_walk_fwd(&sw);
1630
1631
9.46M
    if (cp != NULL)
1632
9.33M
    {
1633
9.33M
        alloc_close_clump(imem);
1634
9.33M
        imem->cc = cp;
1635
9.33M
        alloc_open_clump(imem);
1636
9.33M
        goto top;
1637
9.33M
    }
1638
121k
    if (nbytes > string_space_quanta(SIZE_MAX - sizeof(clump_head_t)) *
1639
121k
        string_data_quantum
1640
121k
        ) {     /* Can't represent the size in a uint! */
1641
0
        return 0;
1642
0
    }
1643
121k
    if (nbytes >= imem->large_size) { /* Give it a clump all its own. */
1644
24.3k
        return i_alloc_string_immovable(mem, nbytes, cname);
1645
96.6k
    } else {     /* Add another clump. */
1646
96.6k
        cp = alloc_acquire_clump(imem, (ulong) imem->clump_size, true, "clump");
1647
1648
96.6k
        if (cp == 0)
1649
0
            return 0;
1650
96.6k
        alloc_close_clump(imem);
1651
96.6k
        imem->cc = clump_splay_walk_init_mid(&sw, cp);
1652
96.6k
        gs_alloc_fill(imem->cc->cbase, gs_alloc_fill_free,
1653
96.6k
                      imem->cc->climit - imem->cc->cbase);
1654
96.6k
        goto top;
1655
96.6k
    }
1656
121k
}
1657
static byte *
1658
i_alloc_string_immovable(gs_memory_t * mem, size_t nbytes, client_name_t cname)
1659
24.3k
{
1660
24.3k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1661
24.3k
    byte *str;
1662
24.3k
    size_t asize;
1663
24.3k
    clump_t *cp;
1664
1665
#ifdef MEMENTO
1666
    if (Memento_failThisEvent())
1667
        return NULL;
1668
#endif
1669
    /* Give it a clump all its own. */
1670
24.3k
    asize = string_clump_space(nbytes) + sizeof(clump_head_t);
1671
24.3k
    cp = alloc_acquire_clump(imem, asize, true, "large string clump");
1672
1673
24.3k
    if (cp == 0)
1674
0
        return 0;
1675
24.3k
    cp->c_alone = true;
1676
1677
24.3k
    str = cp->ctop = cp->climit - nbytes;
1678
24.3k
    if_debug4m('a', mem, "[a%d|+>L]%s(%"PRIuSIZE") = "PRI_INTPTR"\n",
1679
24.3k
               alloc_trace_space(imem), client_name_string(cname), nbytes,
1680
24.3k
               (intptr_t)str);
1681
24.3k
    gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
1682
1683
24.3k
    return Memento_label(str, cname);
1684
24.3k
}
1685
1686
static byte *
1687
i_resize_string(gs_memory_t * mem, byte * data, size_t old_num, size_t new_num,
1688
                client_name_t cname)
1689
4.91M
{
1690
4.91M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1691
4.91M
    byte *ptr;
1692
1693
4.91M
    if (old_num == new_num)  /* same size returns the same string */
1694
4.35M
        return data;
1695
1696
559k
    if ( imem->cc && data == imem->cc->ctop && /* bottom-most string */
1697
559k
        (new_num < old_num ||
1698
558k
         imem->cc->ctop - imem->cc->cbot > new_num - old_num)
1699
559k
        ) {     /* Resize in place. */
1700
558k
        ptr = data + old_num - new_num;
1701
558k
        if_debug6m('A', mem, "[a%d:%c> ]%s(%"PRIuSIZE"->%"PRIuSIZE") "PRI_INTPTR"\n",
1702
558k
                   alloc_trace_space(imem),
1703
558k
                   (new_num > old_num ? '>' : '<'),
1704
558k
                   client_name_string(cname), old_num, new_num,
1705
558k
                   (intptr_t)ptr);
1706
558k
        imem->cc->ctop = ptr;
1707
558k
        memmove(ptr, data, min(old_num, new_num));
1708
#ifdef DEBUG
1709
        if (new_num > old_num)
1710
            gs_alloc_fill(ptr + old_num, gs_alloc_fill_alloc,
1711
                          new_num - old_num);
1712
        else
1713
            gs_alloc_fill(data, gs_alloc_fill_free, old_num - new_num);
1714
#endif
1715
558k
    } else
1716
271
        if (new_num < old_num) {
1717
            /* trim the string and create a free space hole */
1718
29
            ptr = data;
1719
29
            imem->lost.strings += old_num - new_num;
1720
29
            gs_alloc_fill(data + new_num, gs_alloc_fill_free,
1721
29
                          old_num - new_num);
1722
29
            if_debug5m('A', mem, "[a%d:<> ]%s(%"PRIuSIZE"->%"PRIuSIZE") "PRI_INTPTR"\n",
1723
29
                       alloc_trace_space(imem), client_name_string(cname),
1724
29
                       old_num, new_num, (intptr_t)ptr);
1725
242
        } else {     /* Punt. */
1726
242
            ptr = gs_alloc_string(mem, new_num, cname);
1727
242
            if (ptr == 0)
1728
0
                return 0;
1729
242
            memcpy(ptr, data, min(old_num, new_num));
1730
242
            gs_free_string(mem, data, old_num, cname);
1731
242
        }
1732
1733
559k
    return ptr;
1734
559k
}
1735
1736
static void
1737
i_free_string(gs_memory_t * mem, byte * data, size_t nbytes,
1738
              client_name_t cname)
1739
4.20M
{
1740
4.20M
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1741
1742
4.20M
    if (data) {
1743
4.18M
        if (imem->cc && data == imem->cc->ctop) {
1744
3.95M
            if_debug4m('A', mem, "[a%d:-> ]%s(%"PRIuSIZE") "PRI_INTPTR"\n",
1745
3.95M
                       alloc_trace_space(imem), client_name_string(cname), nbytes,
1746
3.95M
                       (intptr_t)data);
1747
3.95M
            imem->cc->ctop += nbytes;
1748
3.95M
        } else {
1749
222k
            if_debug4m('A', mem, "[a%d:->#]%s(%"PRIuSIZE") "PRI_INTPTR"\n",
1750
222k
                       alloc_trace_space(imem), client_name_string(cname), nbytes,
1751
222k
                       (intptr_t)data);
1752
222k
            imem->lost.strings += nbytes;
1753
222k
        }
1754
4.18M
        gs_alloc_fill(data, gs_alloc_fill_free, nbytes);
1755
4.18M
    }
1756
4.20M
}
1757
1758
static gs_memory_t *
1759
i_stable(gs_memory_t *mem)
1760
377M
{
1761
377M
    return mem->stable_memory;
1762
377M
}
1763
1764
static void
1765
i_status(gs_memory_t * mem, gs_memory_status_t * pstat)
1766
713k
{
1767
713k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1768
713k
    size_t unused = imem->lost.refs + imem->lost.strings;
1769
713k
    size_t inner = 0;
1770
713k
    clump_splay_walker sw;
1771
713k
    clump_t *cp;
1772
1773
713k
    alloc_close_clump(imem);
1774
    /* Add up unallocated space within each clump. */
1775
    /* Also keep track of space allocated to inner clumps, */
1776
    /* which are included in previous_status.allocated. */
1777
9.58M
    for (cp = clump_splay_walk_init(&sw, imem); cp != NULL; cp = clump_splay_walk_fwd(&sw))
1778
8.87M
    {
1779
8.87M
        unused += cp->ctop - cp->cbot;
1780
8.87M
        if (cp->outer)
1781
1.55M
            inner += cp->cend - (byte *) cp->chead;
1782
8.87M
    }
1783
713k
    unused += compute_free_objects(imem);
1784
713k
    pstat->used = imem->allocated + inner - unused +
1785
713k
        imem->previous_status.used;
1786
713k
    pstat->allocated = imem->allocated +
1787
713k
        imem->previous_status.allocated;
1788
713k
    pstat->max_used = 0;    /* unknown for this allocator */
1789
713k
    pstat->limit = imem->limit;
1790
713k
    pstat->is_thread_safe = false; /* this allocator is not thread safe */
1791
713k
}
1792
1793
static void
1794
i_enable_free(gs_memory_t * mem, bool enable)
1795
502k
{
1796
502k
    if (enable)
1797
251k
        mem->procs.free_object = i_free_object,
1798
251k
            mem->procs.free_string = i_free_string;
1799
251k
    else
1800
251k
        mem->procs.free_object = gs_ignore_free_object,
1801
251k
            mem->procs.free_string = gs_ignore_free_string;
1802
502k
}
1803
1804
static void i_set_object_type(gs_memory_t *mem, void *ptr, gs_memory_type_ptr_t type)
1805
0
{
1806
0
    obj_header_t *pp;
1807
1808
0
    if (ptr == 0)
1809
0
        return;
1810
0
    pp = (obj_header_t *) ptr - 1;
1811
0
    pp->o_type = type;
1812
0
}
1813
1814
static void i_defer_frees(gs_memory_t *mem, int defer)
1815
0
{
1816
0
}
1817
1818
/* ------ Internal procedures ------ */
1819
1820
/* Compute the amount of free object space by scanning free lists. */
1821
static size_t
1822
compute_free_objects(gs_ref_memory_t * mem)
1823
713k
{
1824
713k
    size_t unused = mem->lost.objects;
1825
713k
    int i;
1826
1827
    /* Add up space on free lists. */
1828
73.5M
    for (i = 0; i < num_freelists; i++) {
1829
72.8M
        const obj_header_t *pfree;
1830
1831
73.1M
        for (pfree = mem->freelists[i]; pfree != 0;
1832
72.8M
             pfree = *(const obj_header_t * const *)pfree
1833
72.8M
            )
1834
362k
            unused += obj_align_round(pfree[-1].o_size);
1835
72.8M
    }
1836
713k
    return unused;
1837
713k
}
1838
1839
/* Allocate an object from the large-block freelist. */
1840
static obj_header_t * /* rets obj if allocated, else 0 */
1841
large_freelist_alloc(gs_ref_memory_t *mem, obj_size_t size)
1842
18.3M
{
1843
    /* Scan large object freelist. We'll grab an object up to 1/8 bigger */
1844
    /* right away, else use best fit of entire scan. */
1845
18.3M
    obj_size_t aligned_size = obj_align_round(size);
1846
18.3M
    size_t aligned_min_size = aligned_size + sizeof(obj_header_t);
1847
18.3M
    size_t aligned_max_size =
1848
18.3M
        aligned_min_size + obj_align_round(aligned_min_size / 8);
1849
18.3M
    obj_header_t *best_fit = 0;
1850
18.3M
    obj_header_t **best_fit_prev = NULL; /* Initialize against indeterminism. */
1851
18.3M
    obj_size_t best_fit_size = (obj_size_t)SIZE_MAX;
1852
18.3M
    obj_header_t *pfree;
1853
18.3M
    obj_header_t **ppfprev = &mem->freelists[LARGE_FREELIST_INDEX];
1854
18.3M
    size_t largest_size = 0;
1855
1856
18.3M
    if (aligned_size > mem->largest_free_size)
1857
9.83M
        return 0;    /* definitely no block large enough */
1858
1859
18.1M
    while ((pfree = *ppfprev) != 0) {
1860
17.4M
        obj_size_t free_size = obj_align_round(pfree[-1].o_size);
1861
1862
17.4M
        if (free_size == aligned_size ||
1863
17.4M
            (free_size >= aligned_min_size && free_size < best_fit_size)
1864
17.4M
            ) {
1865
8.08M
            best_fit = pfree;
1866
8.08M
            best_fit_prev = ppfprev;
1867
8.08M
            best_fit_size = pfree[-1].o_size;
1868
8.08M
            if (best_fit_size <= aligned_max_size)
1869
7.84M
                break; /* good enough fit to spare scan of entire list */
1870
8.08M
        }
1871
9.63M
        ppfprev = (obj_header_t **) pfree;
1872
9.63M
        if (free_size > largest_size)
1873
1.94M
            largest_size = free_size;
1874
9.63M
    }
1875
8.54M
    if (best_fit == 0) {
1876
        /*
1877
         * No single free clump is large enough, but since we scanned the
1878
         * entire list, we now have an accurate updated value for
1879
         * largest_free_size.
1880
         */
1881
657k
        mem->largest_free_size = largest_size;
1882
657k
        return 0;
1883
657k
    }
1884
1885
    /* Remove from freelist & return excess memory to free */
1886
7.88M
    *best_fit_prev = *(obj_header_t **)best_fit;
1887
7.88M
    trim_obj(mem, best_fit, aligned_size, (clump_t *)0);
1888
1889
    /* Pre-init block header; o_alone & o_type are already init'd */
1890
7.88M
    best_fit[-1].o_size = size;
1891
1892
7.88M
    return best_fit;
1893
8.54M
}
1894
1895
/* Allocate an object.  This handles all but the fastest, simplest case. */
1896
static obj_header_t *
1897
alloc_obj(gs_ref_memory_t *mem, obj_size_t lsize, gs_memory_type_ptr_t pstype,
1898
          alloc_flags_t flags, client_name_t cname)
1899
37.5M
{
1900
37.5M
    obj_header_t *ptr;
1901
1902
37.5M
    if (lsize >= mem->large_size || (flags & ALLOC_IMMOVABLE)) {
1903
        /*
1904
         * Give the object a clump all its own.  Note that this case does
1905
         * not occur if is_controlled is true.
1906
         */
1907
21.8M
        obj_size_t asize =
1908
21.8M
            ((lsize + obj_align_mask) & -obj_align_mod) +
1909
21.8M
            sizeof(obj_header_t);
1910
21.8M
        clump_t *cp =
1911
21.8M
            alloc_acquire_clump(mem, asize + sizeof(clump_head_t), false,
1912
21.8M
                                "large object clump");
1913
1914
21.8M
        if (asize < lsize)
1915
0
            return 0;
1916
21.8M
        if (cp == 0)
1917
8
            return 0;
1918
21.8M
        cp->c_alone = true;
1919
21.8M
        ptr = (obj_header_t *) cp->cbot;
1920
21.8M
        cp->cbot += asize;
1921
21.8M
        ptr->o_pad = 0;
1922
21.8M
        ptr->o_alone = 1;
1923
21.8M
        ptr->o_size = (obj_size_t)lsize;
1924
21.8M
    } else {
1925
        /*
1926
         * Cycle through the clumps at the current save level, starting
1927
         * with the currently open one.
1928
         */
1929
15.6M
        clump_splay_walker sw;
1930
15.6M
        clump_t *cp = clump_splay_walk_init_mid(&sw, mem->cc);
1931
15.6M
        obj_size_t asize = obj_size_round(lsize);
1932
15.6M
        bool allocate_success = false;
1933
1934
15.6M
        if (lsize > max_freelist_size && (flags & ALLOC_DIRECT)) {
1935
            /* We haven't checked the large block freelist yet. */
1936
2.28M
            if ((ptr = large_freelist_alloc(mem, lsize)) != 0) {
1937
105k
                --ptr;      /* must point to header */
1938
105k
                goto done;
1939
105k
            }
1940
2.28M
        }
1941
1942
15.5M
        if (cp == 0) {
1943
            /* Open an arbitrary clump. */
1944
64.7k
            mem->cc = clump_splay_walk_init(&sw, mem);
1945
64.7k
            alloc_open_clump(mem);
1946
64.7k
        }
1947
1948
15.5M
#define CAN_ALLOC_AT_END(cp)\
1949
106M
  ((cp) && !((cp)->c_alone) && (cp)->ctop - (byte *) (ptr = (obj_header_t *) (cp)->cbot)\
1950
91.2M
   > asize + sizeof(obj_header_t))
1951
1952
106M
        do {
1953
106M
            if (CAN_ALLOC_AT_END(mem->cc)) {
1954
14.6M
                allocate_success = true;
1955
14.6M
                break;
1956
91.5M
            } else if (mem->is_controlled) {
1957
                /* Try consolidating free space. */
1958
0
                gs_consolidate_free((gs_memory_t *)mem);
1959
0
                if (CAN_ALLOC_AT_END(mem->cc)) {
1960
0
                    allocate_success = true;
1961
0
                    break;
1962
0
                }
1963
0
            }
1964
            /* No luck, go on to the next clump. */
1965
91.5M
            cp = clump_splay_walk_fwd(&sw);
1966
91.5M
            if (cp == NULL)
1967
903k
                break;
1968
1969
90.5M
            alloc_close_clump(mem);
1970
90.5M
            mem->cc = cp;
1971
90.5M
            alloc_open_clump(mem);
1972
90.5M
        }
1973
90.5M
        while (1);
1974
1975
#ifdef CONSOLIDATE_BEFORE_ADDING_CLUMP
1976
        if (!allocate_success) {
1977
            /*
1978
             * Try consolidating free space before giving up.
1979
             * It's not clear this is a good idea, since it requires quite
1980
             * a lot of computation and doesn't seem to improve things much.
1981
             */
1982
            if (!mem->is_controlled) { /* already did this if controlled */
1983
                clump_t *cp;
1984
1985
                alloc_close_clump(mem);
1986
                for (cp = clump_splay_walk_init_mid(&sw, cp_orig); cp != NULL; cp = clump_splay_walk_fwd(&sw))
1987
                {
1988
                    consolidate_clump_free(cp, mem);
1989
                    if (CAN_ALLOC_AT_END(cp)) {
1990
                        mem->cc = cp;
1991
                        alloc_open_clump(mem);
1992
                        allocate_success = true;
1993
                        break;
1994
                    }
1995
                }
1996
            }
1997
        }
1998
#endif
1999
2000
0
#undef CAN_ALLOC_AT_END
2001
2002
15.5M
        if (!allocate_success) {
2003
            /* Add another clump. */
2004
903k
            clump_t *cp =
2005
903k
                alloc_add_clump(mem, mem->clump_size, "clump");
2006
2007
903k
            if (cp) {
2008
                /* mem->cc == cp */
2009
903k
                ptr = (obj_header_t *)cp->cbot;
2010
903k
                allocate_success = true;
2011
903k
            }
2012
903k
        }
2013
2014
        /*
2015
         * If no success, try to scavenge from low free memory. This is
2016
         * only enabled for controlled memory (currently only async
2017
         * renderer) because it's too much work to prevent it from
2018
         * examining outer save levels in the general case.
2019
         */
2020
15.5M
        if (allocate_success)
2021
15.5M
            mem->cc->cbot = (byte *) ptr + asize;
2022
0
        else if (!mem->is_controlled ||
2023
0
                 (ptr = scavenge_low_free(mem, lsize)) == 0)
2024
0
            return 0; /* allocation failed */
2025
15.5M
        ptr->o_pad = 0;
2026
15.5M
        ptr->o_alone = 0;
2027
15.5M
        ptr->o_size = lsize;
2028
15.5M
    }
2029
37.5M
done:
2030
37.5M
    ptr->o_type = pstype;
2031
#   if IGC_PTR_STABILITY_CHECK
2032
        ptr->d.o.space_id = mem->space_id;
2033
#   endif
2034
37.5M
    ptr++;
2035
37.5M
    gs_alloc_fill(ptr, gs_alloc_fill_alloc, lsize);
2036
37.5M
    return Memento_label(ptr, cname);
2037
37.5M
}
2038
2039
/*
2040
 * Consolidate free objects contiguous to free space at cbot onto the cbot
2041
 * area. Also keep track of end of highest internal free object
2042
 * (int_freed_top).
2043
 */
2044
static void
2045
consolidate_clump_free(clump_t *cp, gs_ref_memory_t *mem)
2046
7.86M
{
2047
7.86M
    obj_header_t *begin_free = 0;
2048
2049
7.86M
    cp->int_freed_top = cp->cbase;  /* below all objects in clump */
2050
68.3M
    SCAN_CLUMP_OBJECTS(cp)
2051
68.3M
    DO_ALL
2052
68.3M
        if (pre->o_type == &st_free) {
2053
5.70M
            if (begin_free == 0)
2054
3.34M
                begin_free = pre;
2055
62.6M
        } else {
2056
62.6M
            if (begin_free)
2057
354k
                cp->int_freed_top = (byte *)pre; /* first byte following internal free */
2058
62.6M
            begin_free = 0;
2059
62.6M
        }
2060
68.3M
    END_OBJECTS_SCAN
2061
7.86M
    if (begin_free) {
2062
        /* We found free objects at the top of the object area. */
2063
        /* Remove the free objects from the freelists. */
2064
2.98M
        remove_range_from_freelist(mem, begin_free, cp->cbot);
2065
2.98M
        if_debug4m('a', (const gs_memory_t *)mem,
2066
2.98M
                   "[a]resetting clump "PRI_INTPTR" cbot from "PRI_INTPTR" to "PRI_INTPTR" (%lu free)\n",
2067
2.98M
                   (intptr_t)cp, (intptr_t)cp->cbot, (intptr_t)begin_free,
2068
2.98M
                   (intptr_t)((byte *)cp->cbot - (byte *)begin_free));
2069
2.98M
        cp->cbot = (byte *) begin_free;
2070
2.98M
    }
2071
7.86M
}
2072
2073
static splay_app_result_t
2074
consolidate(clump_t *cp, void *arg)
2075
0
{
2076
0
    gs_ref_memory_t *mem = (gs_ref_memory_t *)arg;
2077
2078
0
    consolidate_clump_free(cp, mem);
2079
0
    if (cp->cbot == cp->cbase && cp->ctop == cp->climit) {
2080
        /* The entire clump is free. */
2081
0
        if (!mem->is_controlled) {
2082
0
            alloc_free_clump(cp, mem);
2083
0
            if (mem->cc == cp)
2084
0
                mem->cc = NULL;
2085
0
        }
2086
0
    }
2087
2088
0
    return SPLAY_APP_CONTINUE;
2089
0
}
2090
2091
/* Consolidate free objects. */
2092
void
2093
ialloc_consolidate_free(gs_ref_memory_t *mem)
2094
0
{
2095
0
    alloc_close_clump(mem);
2096
2097
    /* We used to visit clumps in reverse order to encourage LIFO behavior,
2098
     * but with binary trees this is not possible (unless you want to
2099
     * either change the tree during the process, recurse, or otherwise
2100
     * hold the state). */
2101
0
    clump_splay_app(mem->root, mem, consolidate, mem);
2102
2103
    /* NOTE: Previously, if we freed the current clump, we'd move to whatever the
2104
     * bigger of it's children was. We now just move to the root. */
2105
0
    if (mem->cc == NULL)
2106
0
        mem->cc = mem->root;
2107
2108
0
    alloc_open_clump(mem);
2109
0
}
2110
static void
2111
i_consolidate_free(gs_memory_t *mem)
2112
0
{
2113
0
    ialloc_consolidate_free((gs_ref_memory_t *)mem);
2114
0
}
2115
2116
typedef struct
2117
{
2118
    uint need_free;
2119
    obj_header_t *found_pre;
2120
    gs_ref_memory_t *mem;
2121
    obj_size_t request_size;
2122
} scavenge_data;
2123
2124
static splay_app_result_t
2125
scavenge(clump_t *cp, void *arg)
2126
0
{
2127
0
    scavenge_data *sd = (scavenge_data *)arg;
2128
0
    obj_header_t *begin_free = NULL;
2129
0
    obj_size_t found_free = 0;
2130
2131
0
    sd->found_pre = NULL;
2132
2133
0
    SCAN_CLUMP_OBJECTS(cp)
2134
0
    DO_ALL
2135
0
        if (pre->o_type == &st_free) {
2136
0
            if (begin_free == 0) {
2137
0
                found_free = 0;
2138
0
                begin_free = pre;
2139
0
            }
2140
0
            found_free += pre_obj_rounded_size(pre);
2141
0
            if (begin_free != 0 && found_free >= sd->need_free)
2142
0
                break;
2143
0
        } else
2144
0
            begin_free = 0;
2145
0
    END_OBJECTS_SCAN_NO_ABORT
2146
2147
0
    if (begin_free != 0 && found_free >= sd->need_free) {
2148
        /* Fish found pieces out of various freelists */
2149
0
        remove_range_from_freelist(sd->mem, (char*)begin_free,
2150
0
                                   (char*)begin_free + found_free);
2151
2152
        /* Prepare found object */
2153
0
        sd->found_pre = begin_free;
2154
0
        sd->found_pre->o_type = &st_free;  /* don't confuse GC if gets lost */
2155
0
        sd->found_pre->o_size = found_free - sizeof(obj_header_t);
2156
2157
        /* Chop off excess tail piece & toss it back into free pool */
2158
0
        trim_obj(sd->mem, sd->found_pre + 1, sd->request_size, cp);
2159
0
        return SPLAY_APP_STOP;
2160
0
    }
2161
2162
0
    return SPLAY_APP_CONTINUE;
2163
0
}
2164
2165
/* try to free-up given amount of space from freespace below clump base */
2166
static obj_header_t * /* returns uninitialized object hdr, NULL if none found */
2167
scavenge_low_free(gs_ref_memory_t *mem, obj_size_t request_size)
2168
0
{
2169
    /* find 1st range of memory that can be glued back together to fill request */
2170
0
    scavenge_data sd;
2171
0
    obj_size_t request_size_rounded = obj_size_round(request_size);
2172
2173
0
    sd.found_pre = 0;
2174
0
    sd.need_free = request_size_rounded + sizeof(obj_header_t);    /* room for GC's dummy hdr */
2175
0
    sd.mem = mem;
2176
0
    sd.request_size = request_size;
2177
2178
0
    clump_splay_app(mem->root, mem, scavenge, &sd);
2179
0
    return sd.found_pre;
2180
0
}
2181
2182
/* Remove range of memory from a mem's freelists */
2183
static void
2184
remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top)
2185
2.98M
{
2186
2.98M
    int num_free[num_freelists];
2187
2.98M
    int smallest = num_freelists, largest = -1;
2188
2.98M
    const obj_header_t *cur;
2189
2.98M
    obj_size_t size;
2190
2.98M
    int i;
2191
2.98M
    obj_size_t removed = 0;
2192
2193
    /*
2194
     * Scan from bottom to top, a range containing only free objects,
2195
     * counting the number of objects of each size.
2196
     */
2197
2198
8.02M
    for (cur = bottom; cur != top;
2199
5.03M
         cur = (const obj_header_t *)
2200
5.03M
             ((const byte *)cur + obj_size_round(size))
2201
5.03M
        ) {
2202
5.03M
        size = cur->o_size;
2203
5.03M
        i = (size > max_freelist_size ? LARGE_FREELIST_INDEX :
2204
5.03M
             (size + obj_align_mask) >> log2_obj_align_mod);
2205
5.03M
        if (i < smallest) {
2206
            /*
2207
             * 0-length free blocks aren't kept on any list, because
2208
             * they don't have room for a pointer.
2209
             */
2210
3.28M
            if (i == 0)
2211
360
                continue;
2212
3.28M
            if (smallest < num_freelists)
2213
294k
                memset(&num_free[i], 0, (smallest - i) * sizeof(int));
2214
2.98M
            else
2215
2.98M
                num_free[i] = 0;
2216
3.28M
            smallest = i;
2217
3.28M
        }
2218
5.03M
        if (i > largest) {
2219
3.59M
            if (largest >= 0)
2220
609k
                memset(&num_free[largest + 1], 0, (i - largest) * sizeof(int));
2221
3.59M
            largest = i;
2222
3.59M
        }
2223
5.03M
        num_free[i]++;
2224
5.03M
    }
2225
2226
    /*
2227
     * Remove free objects from the freelists, adjusting lost.objects by
2228
     * subtracting the size of the region being processed minus the amount
2229
     * of space reclaimed.
2230
     */
2231
2232
29.4M
    for (i = smallest; i <= largest; i++) {
2233
26.4M
        int count = num_free[i];
2234
26.4M
        obj_header_t *pfree;
2235
26.4M
        obj_header_t **ppfprev;
2236
2237
26.4M
        if (!count)
2238
22.4M
            continue;
2239
4.06M
        ppfprev = &mem->freelists[i];
2240
9.10M
        for (;;) {
2241
9.10M
            pfree = *ppfprev;
2242
9.10M
            if (PTR_GE(pfree, bottom) && PTR_LT(pfree, top)) {
2243
                /* We're removing an object. */
2244
5.03M
                *ppfprev = *(obj_header_t **) pfree;
2245
5.03M
                removed += obj_align_round(pfree[-1].o_size);
2246
5.03M
                if (!--count)
2247
4.06M
                    break;
2248
5.03M
            } else
2249
4.06M
                ppfprev = (obj_header_t **) pfree;
2250
9.10M
        }
2251
4.06M
    }
2252
2.98M
    mem->lost.objects -= (char*)top - (char*)bottom - removed;
2253
2.98M
}
2254
2255
/* Trim a memory object down to a given size */
2256
static void
2257
trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, obj_size_t size, clump_t *cp)
2258
/* Obj must have rounded size == req'd size, or have enough room for */
2259
/* trailing dummy obj_header */
2260
7.88M
{
2261
7.88M
    obj_size_t rounded_size = obj_align_round(size);
2262
7.88M
    obj_header_t *pre_obj = obj - 1;
2263
7.88M
    obj_header_t *excess_pre = (obj_header_t*)((char*)obj + rounded_size);
2264
7.88M
    obj_size_t old_rounded_size = obj_align_round(pre_obj->o_size);
2265
7.88M
    obj_size_t excess_size = old_rounded_size - rounded_size - sizeof(obj_header_t);
2266
2267
    /* trim object's size to desired */
2268
7.88M
    pre_obj->o_size = size;
2269
7.88M
    if (old_rounded_size == rounded_size)
2270
7.78M
        return;  /* nothing more to do here */
2271
    /*
2272
     * If the object is alone in its clump, move cbot to point to the end
2273
     * of the object.
2274
     */
2275
94.6k
    if (pre_obj->o_alone) {
2276
0
        if (!cp) {
2277
0
            mem->cfreed.memory = mem;
2278
0
            if (clump_locate(obj, &mem->cfreed)) {
2279
0
                cp = mem->cfreed.cp;
2280
0
            }
2281
0
        }
2282
0
        if (cp) {
2283
#ifdef DEBUG
2284
            if (cp->cbot != (byte *)obj + old_rounded_size) {
2285
                lprintf3("resizing "PRI_INTPTR", old size %u, new size %u, cbot wrong!\n",
2286
                         (intptr_t)obj, old_rounded_size, size);
2287
                /* gs_abort */
2288
            } else
2289
#endif
2290
0
                {
2291
0
                    cp->cbot = (byte *)excess_pre;
2292
0
                    return;
2293
0
                }
2294
0
        }
2295
        /*
2296
         * Something very weird is going on.  This probably shouldn't
2297
         * ever happen, but if it does....
2298
         */
2299
0
        pre_obj->o_pad = 0;
2300
0
        pre_obj->o_alone = 0;
2301
0
    }
2302
    /* make excess into free obj */
2303
94.6k
    excess_pre->o_type = &st_free;  /* don't confuse GC */
2304
94.6k
    excess_pre->o_size = excess_size;
2305
94.6k
    excess_pre->o_pad = 0;
2306
94.6k
    excess_pre->o_alone = 0;
2307
94.6k
    if (excess_size >= obj_align_mod) {
2308
        /* Put excess object on a freelist */
2309
91.6k
        obj_header_t **pfl;
2310
2311
91.6k
        if (mem->cc && (byte *)excess_pre >= mem->cc->int_freed_top)
2312
13.1k
            mem->cc->int_freed_top = (byte *)excess_pre + excess_size;
2313
91.6k
        if (excess_size <= max_freelist_size)
2314
70.9k
            pfl = &mem->freelists[(excess_size + obj_align_mask) >>
2315
70.9k
                                 log2_obj_align_mod];
2316
20.6k
        else {
2317
20.6k
            uint rounded_size = obj_align_round(excess_size);
2318
2319
20.6k
            pfl = &mem->freelists[LARGE_FREELIST_INDEX];
2320
20.6k
            if (rounded_size > mem->largest_free_size)
2321
0
                mem->largest_free_size = rounded_size;
2322
20.6k
        }
2323
91.6k
        *(obj_header_t **) (excess_pre + 1) = *pfl;
2324
91.6k
        *pfl = excess_pre + 1;
2325
91.6k
        mem->cfreed.memory = mem;
2326
91.6k
    } else {
2327
        /* excess piece will be "lost" memory */
2328
3.02k
        mem->lost.objects += excess_size + sizeof(obj_header_t);
2329
3.02k
    }
2330
94.6k
}
2331
2332
/* ================ Roots ================ */
2333
2334
/* Register a root. */
2335
static int
2336
i_register_root(gs_memory_t * mem, gs_gc_root_t ** rpp, gs_ptr_type_t ptype,
2337
                void **up, client_name_t cname)
2338
351k
{
2339
351k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
2340
351k
    gs_gc_root_t *rp;
2341
2342
351k
    if (rpp == NULL || *rpp == NULL) {
2343
0
        rp = gs_raw_alloc_struct_immovable(imem->non_gc_memory, &st_gc_root_t,
2344
0
                                           "i_register_root");
2345
0
        if (rp == 0)
2346
0
            return_error(gs_error_VMerror);
2347
0
        rp->free_on_unregister = true;
2348
0
        if (rpp && *rpp == NULL)
2349
0
            *rpp = rp;
2350
351k
    } else {
2351
351k
        rp = *rpp;
2352
351k
        rp->free_on_unregister = false;
2353
351k
    }
2354
351k
    if_debug3m('8', mem, "[8]register root(%s) "PRI_INTPTR" -> "PRI_INTPTR"\n",
2355
351k
               client_name_string(cname), (intptr_t)rp, (intptr_t)up);
2356
351k
    rp->ptype = ptype;
2357
351k
    rp->p = up;
2358
351k
    rp->next = imem->roots;
2359
351k
    imem->roots = rp;
2360
351k
    return 0;
2361
351k
}
2362
2363
/* Unregister a root. */
2364
static void
2365
i_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname)
2366
323k
{
2367
323k
    gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
2368
323k
    gs_gc_root_t **rpp = &imem->roots;
2369
2370
323k
    if_debug2m('8', mem, "[8]unregister root(%s) "PRI_INTPTR"\n",
2371
323k
               client_name_string(cname), (intptr_t)rp);
2372
323k
    while (*rpp != rp)
2373
0
        rpp = &(*rpp)->next;
2374
323k
    *rpp = (*rpp)->next;
2375
323k
    if (rp->free_on_unregister)
2376
0
        gs_free_object(imem->non_gc_memory, rp, "i_unregister_root");
2377
323k
}
2378
2379
/* ================ clumps ================ */
2380
2381
public_st_clump();
2382
2383
/* Insert a clump in the chain.  This is exported for the GC and for */
2384
/* the forget_save operation. */
2385
void
2386
alloc_link_clump(clump_t * cp, gs_ref_memory_t * imem)
2387
23.4M
{
2388
23.4M
    splay_insert(cp, imem);
2389
23.4M
    SANITY_CHECK(cp);
2390
23.4M
}
2391
2392
/* Add a clump for ordinary allocation. */
2393
static clump_t *
2394
alloc_add_clump(gs_ref_memory_t * mem, size_t csize, client_name_t cname)
2395
903k
{
2396
903k
    clump_t *cp = alloc_acquire_clump(mem, csize, true, cname);
2397
2398
903k
    if (cp) {
2399
903k
        alloc_close_clump(mem);
2400
903k
        mem->cc = cp;
2401
903k
        gs_alloc_fill(mem->cc->cbase, gs_alloc_fill_free,
2402
903k
                      mem->cc->climit - mem->cc->cbase);
2403
903k
    }
2404
903k
    return cp;
2405
903k
}
2406
2407
/* Acquire a clump.  If we would exceed MaxLocalVM (if relevant), */
2408
/* or if we would exceed the VMThreshold and psignal is NULL, */
2409
/* return 0; if we would exceed the VMThreshold but psignal is valid, */
2410
/* just set the signal and return successfully. */
2411
static clump_t *
2412
alloc_acquire_clump(gs_ref_memory_t * mem, size_t csize, bool has_strings,
2413
                    client_name_t cname)
2414
22.9M
{
2415
22.9M
    gs_memory_t *parent = mem->non_gc_memory;
2416
22.9M
    clump_t *cp;
2417
22.9M
    byte *cdata;
2418
2419
22.9M
#if ARCH_SIZEOF_LONG > ARCH_SIZEOF_INT
2420
    /* If csize is larger than max_uint, punt. */
2421
22.9M
    if (csize != (uint) csize)
2422
0
        return 0;
2423
22.9M
#endif
2424
22.9M
    cp = gs_raw_alloc_struct_immovable(parent, &st_clump, cname);
2425
2426
    /* gc_status.signal_value is initialised to zero when the
2427
     * allocator is created, only the Postscript interpreter
2428
     * (which implement garbage collection) takes the action to set
2429
     * it to anything other than zero
2430
     */
2431
22.9M
    if( mem->gc_status.signal_value != 0) {
2432
        /* we have a garbage collector */
2433
22.6M
        if (mem->allocated >= mem->limit) {
2434
9.53k
            mem->gc_status.requested += csize;
2435
9.53k
            if (mem->limit >= mem->gc_status.max_vm) {
2436
0
                gs_free_object(parent, cp, cname);
2437
0
                return 0;
2438
0
            }
2439
9.53k
            if_debug4m('0', (const gs_memory_t *)mem,
2440
9.53k
                       "[0]signaling space=%d, allocated=%"PRIdSIZE", limit=%"PRIdSIZE", requested=%"PRIdSIZE"\n",
2441
9.53k
                       mem->space, mem->allocated,
2442
9.53k
                       mem->limit, mem->gc_status.requested);
2443
9.53k
            mem->gs_lib_ctx->gcsignal = mem->gc_status.signal_value;
2444
9.53k
        }
2445
22.6M
    }
2446
22.9M
    cdata = gs_alloc_bytes_immovable(parent, csize, cname);
2447
22.9M
    if (cp == 0 || cdata == 0) {
2448
8
        gs_free_object(parent, cdata, cname);
2449
8
        gs_free_object(parent, cp, cname);
2450
8
        mem->gc_status.requested = csize;
2451
8
        return 0;
2452
8
    }
2453
22.9M
    alloc_init_clump(cp, cdata, cdata + csize, has_strings, (clump_t *) 0);
2454
22.9M
    alloc_link_clump(cp, mem);
2455
22.9M
    mem->allocated += st_clump.ssize + csize;
2456
22.9M
    SANITY_CHECK(cp);
2457
22.9M
    return cp;
2458
22.9M
}
2459
2460
/* Initialize the pointers in a clump.  This is exported for save/restore. */
2461
/* The bottom pointer must be aligned, but the top pointer need not */
2462
/* be aligned. */
2463
void
2464
alloc_init_clump(clump_t * cp, byte * bot, byte * top, bool has_strings,
2465
                 clump_t * outer)
2466
23.5M
{
2467
23.5M
    byte *cdata = bot;
2468
2469
23.5M
    if (outer != 0)
2470
541k
        outer->inner_count++;
2471
23.5M
    cp->chead = (clump_head_t *) cdata;
2472
23.5M
    cdata += sizeof(clump_head_t);
2473
23.5M
    cp->cbot = cp->cbase = cp->int_freed_top = cdata;
2474
23.5M
    cp->cend = top;
2475
23.5M
    cp->rcur = 0;
2476
23.5M
    cp->rtop = 0;
2477
23.5M
    cp->outer = outer;
2478
23.5M
    cp->inner_count = 0;
2479
23.5M
    cp->has_refs = false;
2480
23.5M
    cp->sbase = cdata;
2481
23.5M
    cp->c_alone = false; /* should be set correctly by caller */
2482
23.5M
    if (has_strings && top - cdata >= string_space_quantum + sizeof(long) - 1) {
2483
        /*
2484
         * We allocate a large enough string marking and reloc table
2485
         * to cover the entire clump.
2486
         */
2487
1.56M
        uint nquanta = string_space_quanta(top - cdata);
2488
2489
1.56M
        cp->climit = cdata + nquanta * string_data_quantum;
2490
1.56M
        cp->smark = cp->climit;
2491
1.56M
        cp->smark_size = string_quanta_mark_size(nquanta);
2492
1.56M
        cp->sreloc =
2493
1.56M
            (string_reloc_offset *) (cp->smark + cp->smark_size);
2494
1.56M
        cp->sfree1 = (uint *) cp->sreloc;
2495
21.9M
    } else {
2496
        /* No strings, don't need the string GC tables. */
2497
21.9M
        cp->climit = cp->cend;
2498
21.9M
        cp->sfree1 = 0;
2499
21.9M
        cp->smark = 0;
2500
21.9M
        cp->smark_size = 0;
2501
21.9M
        cp->sreloc = 0;
2502
21.9M
    }
2503
23.5M
    cp->ctop = cp->climit;
2504
23.5M
    alloc_init_free_strings(cp);
2505
23.5M
}
2506
2507
/* Initialize the string freelists in a clump. */
2508
void
2509
alloc_init_free_strings(clump_t * cp)
2510
23.5M
{
2511
23.5M
    if (cp->sfree1)
2512
1.56M
        memset(cp->sfree1, 0, STRING_FREELIST_SPACE(cp));
2513
23.5M
    cp->sfree = 0;
2514
23.5M
}
2515
2516
/* Close up the current clump. */
2517
/* This is exported for save/restore and the GC. */
2518
void
2519
alloc_close_clump(gs_ref_memory_t * mem)
2520
102M
{
2521
#ifdef DEBUG
2522
    if (gs_debug_c('A')) {
2523
        dmlprintf1((const gs_memory_t *)mem, "[a%d]", alloc_trace_space(mem));
2524
        dmprintf_clump((const gs_memory_t *)mem, "closing clump", mem->cc);
2525
    }
2526
#endif
2527
102M
}
2528
2529
/* Reopen the current clump after a GC or restore. */
2530
void
2531
alloc_open_clump(gs_ref_memory_t * mem)
2532
100M
{
2533
#ifdef DEBUG
2534
    if (gs_debug_c('A')) {
2535
        dmlprintf1((const gs_memory_t *)mem, "[a%d]", alloc_trace_space(mem));
2536
        dmprintf_clump((const gs_memory_t *)mem, "opening clump", mem->cc);
2537
    }
2538
#endif
2539
100M
}
2540
2541
#ifdef DEBUG
2542
static splay_app_result_t
2543
check_in_clump(clump_t *cp, void *arg)
2544
{
2545
    clump_t **cpp = (clump_t **)arg;
2546
2547
    if (*cpp != cp)
2548
        return SPLAY_APP_CONTINUE;
2549
    *cpp = NULL;
2550
2551
    return SPLAY_APP_STOP;
2552
}
2553
#endif
2554
2555
/* Remove a clump from the chain.  This is exported for the GC. */
2556
void
2557
alloc_unlink_clump(clump_t * cp, gs_ref_memory_t * mem)
2558
23.5M
{
2559
23.5M
    SANITY_CHECK_MID(cp);
2560
#ifdef DEBUG
2561
    if (gs_alloc_debug) { /* Check to make sure this clump belongs to this allocator. */
2562
        clump_t *found = cp;
2563
        clump_splay_app(mem->root, mem, check_in_clump, &found);
2564
2565
        if (found != NULL) {
2566
            mlprintf2((const gs_memory_t *)mem, "unlink_clump "PRI_INTPTR" not owned by memory "PRI_INTPTR"!\n",
2567
                      (intptr_t)cp, (intptr_t)mem);
2568
            return;   /*gs_abort(); */
2569
        }
2570
    }
2571
#endif
2572
23.5M
    (void)clump_splay_remove(cp, mem);
2573
23.5M
    if (mem->cc == cp) {
2574
153k
        mem->cc = NULL;
2575
153k
    }
2576
23.5M
}
2577
2578
/*
2579
 * Free a clump.  This is exported for the GC.  Since we eventually use
2580
 * this to free the clump containing the allocator itself, we must be
2581
 * careful not to reference anything in the allocator after freeing the
2582
 * clump data.
2583
 */
2584
void
2585
alloc_free_clump(clump_t * cp, gs_ref_memory_t * mem)
2586
23.5M
{
2587
23.5M
    gs_memory_t *parent = mem->non_gc_memory;
2588
23.5M
    byte *cdata = (byte *)cp->chead;
2589
23.5M
    ulong csize = (byte *)cp->cend - cdata;
2590
2591
23.5M
    alloc_unlink_clump(cp, mem);
2592
23.5M
    mem->allocated -= st_clump.ssize;
2593
23.5M
    if (mem->cfreed.cp == cp)
2594
130k
        mem->cfreed.cp = 0;
2595
23.5M
    if (cp->outer == 0) {
2596
22.9M
        mem->allocated -= csize;
2597
22.9M
        gs_free_object(parent, cdata, "alloc_free_clump(data)");
2598
22.9M
    } else {
2599
541k
        cp->outer->inner_count--;
2600
541k
        gs_alloc_fill(cdata, gs_alloc_fill_free, csize);
2601
541k
    }
2602
23.5M
    gs_free_object(parent, cp, "alloc_free_clump(clump struct)");
2603
23.5M
}
2604
2605
/* Find the clump for a pointer. */
2606
/* Note that this only searches the current save level. */
2607
/* Since a given save level can't contain both a clump and an inner clump */
2608
/* of that clump, we can stop when is_within_clump succeeds, and just test */
2609
/* is_in_inner_clump then. */
2610
bool
2611
clump_locate_ptr(const void *ptr, clump_locator_t * clp)
2612
94.5M
{
2613
94.5M
    clump_t *cp = clp->memory->root;
2614
2615
242M
    while (cp)
2616
237M
    {
2617
237M
        if (PTR_LT(ptr, cp->cbase))
2618
81.8M
        {
2619
81.8M
            cp = cp->left;
2620
81.8M
            continue;
2621
81.8M
        }
2622
155M
        if (PTR_GE(ptr, cp->cend))
2623
66.1M
        {
2624
66.1M
            cp = cp->right;
2625
66.1M
            continue;
2626
66.1M
        }
2627
        /* Found it! */
2628
89.1M
        splay_move_to_root(cp, clp->memory);
2629
89.1M
        clp->cp = cp;
2630
89.1M
        return !ptr_is_in_inner_clump(ptr, cp);
2631
155M
    }
2632
5.47M
    return false;
2633
94.5M
}
2634
2635
bool ptr_is_within_mem_clumps(const void *ptr, gs_ref_memory_t *mem)
2636
533k
{
2637
533k
    clump_t *cp = mem->root;
2638
2639
2.70M
    while (cp)
2640
2.17M
    {
2641
2.17M
        if (PTR_LT(ptr, cp->cbase))
2642
1.70M
        {
2643
1.70M
            cp = cp->left;
2644
1.70M
            continue;
2645
1.70M
        }
2646
471k
        if (PTR_GE(ptr, cp->cend))
2647
471k
        {
2648
471k
            cp = cp->right;
2649
471k
            continue;
2650
471k
        }
2651
        /* Found it! */
2652
2
        splay_move_to_root(cp, mem);
2653
2
        return true;
2654
471k
    }
2655
533k
    return false;
2656
533k
}
2657
2658
/* ------ Debugging ------ */
2659
2660
#ifdef DEBUG
2661
2662
#include "string_.h"
2663
2664
static inline bool
2665
obj_in_control_region(const void *obot, const void *otop,
2666
                      const dump_control_t *pdc)
2667
{
2668
    return
2669
        ((pdc->bottom == NULL || PTR_GT(otop, pdc->bottom)) &&
2670
         (pdc->top == NULL || PTR_LT(obot, pdc->top)));
2671
}
2672
2673
const dump_control_t dump_control_default =
2674
{
2675
    dump_do_default, NULL, NULL
2676
};
2677
const dump_control_t dump_control_all =
2678
{
2679
    dump_do_strings | dump_do_type_addresses | dump_do_pointers |
2680
    dump_do_pointed_strings | dump_do_contents, NULL, NULL
2681
};
2682
2683
const dump_control_t dump_control_no_contents =
2684
{
2685
    dump_do_strings | dump_do_type_addresses | dump_do_pointers |
2686
    dump_do_pointed_strings, NULL, NULL
2687
};
2688
2689
/*
2690
 * Internal procedure to dump a block of memory, in hex and optionally
2691
 * also as characters.
2692
 */
2693
static void
2694
debug_indent(const gs_memory_t *mem, int indent)
2695
{
2696
    int i;
2697
2698
    for (i = indent; i > 0; --i)
2699
        dmputc(mem, ' ');
2700
}
2701
static void
2702
debug_dump_contents(const gs_memory_t *mem, const byte * bot,
2703
                    const byte * top, int indent, bool as_chars)
2704
{
2705
    const byte *block;
2706
2707
#define block_size 16
2708
2709
    if (bot >= top)
2710
        return;
2711
    for (block = bot - ((bot - (byte *) 0) & (block_size - 1));
2712
         block < top; block += block_size
2713
        ) {
2714
        int i;
2715
        char label[12];
2716
2717
        /* Check for repeated blocks. */
2718
        if (block >= bot + block_size &&
2719
            block <= top - (block_size * 2) &&
2720
            !memcmp(block, block - block_size, block_size) &&
2721
            !memcmp(block, block + block_size, block_size)
2722
            ) {
2723
            if (block < bot + block_size * 2 ||
2724
                memcmp(block, block - block_size * 2, block_size)
2725
                ) {
2726
                debug_indent(mem, indent);
2727
                dmputs(mem, "  ...\n");
2728
            }
2729
            continue;
2730
        }
2731
        gs_snprintf(label, sizeof(label), PRI_INTPTR":", (intptr_t)block);
2732
        debug_indent(mem, indent);
2733
        dmputs(mem, label);
2734
        for (i = 0; i < block_size; ++i) {
2735
            const char *sepr = ((i & 3) == 0 && i != 0 ? "  " : " ");
2736
2737
            dmputs(mem, sepr);
2738
            if (block + i >= bot && block + i < top)
2739
                dmprintf1(mem, "%02x", block[i]);
2740
            else
2741
                dmputs(mem, "  ");
2742
        }
2743
        dmputc(mem, '\n');
2744
        if (as_chars) {
2745
            debug_indent(mem, indent + strlen(label));
2746
            for (i = 0; i < block_size; ++i) {
2747
                byte ch;
2748
2749
                if ((i & 3) == 0 && i != 0)
2750
                    dmputc(mem, ' ');
2751
                if (block + i >= bot && block + i < top &&
2752
                    (ch = block[i]) >= 32 && ch <= 126
2753
                    )
2754
                    dmprintf1(mem, "  %c", ch);
2755
                else
2756
                    dmputs(mem, "   ");
2757
            }
2758
            dmputc(mem, '\n');
2759
        }
2760
    }
2761
#undef block_size
2762
}
2763
2764
/* Print one object with the given options. */
2765
/* Relevant options: type_addresses, no_types, pointers, pointed_strings, */
2766
/* contents. */
2767
void
2768
debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t * control)
2769
{
2770
    const obj_header_t *pre = ((const obj_header_t *)obj) - 1;
2771
    ulong size = pre_obj_contents_size(pre);
2772
    const gs_memory_struct_type_t *type = pre->o_type;
2773
    dump_options_t options = control->options;
2774
2775
    dmprintf3(mem, "  pre="PRI_INTPTR"(obj="PRI_INTPTR") size=%lu",
2776
              (intptr_t) pre, (intptr_t) obj, size);
2777
    switch (options & (dump_do_type_addresses | dump_do_no_types)) {
2778
    case dump_do_type_addresses + dump_do_no_types: /* addresses only */
2779
        dmprintf1(mem, " type="PRI_INTPTR"", (intptr_t) type);
2780
        break;
2781
    case dump_do_type_addresses:  /* addresses & names */
2782
        dmprintf2(mem, " type=%s("PRI_INTPTR")", struct_type_name_string(type),
2783
                 (intptr_t)type);
2784
        break;
2785
    case 0:   /* names only */
2786
        dmprintf1(mem, " type=%s", struct_type_name_string(type));
2787
    case dump_do_no_types:  /* nothing */
2788
        ;
2789
    }
2790
    if (options & dump_do_marks) {
2791
        dmprintf2(mem, " smark/back=%u (0x%x)", pre->o_smark, pre->o_smark);
2792
    }
2793
    dmputc(mem, '\n');
2794
    if (type == &st_free)
2795
        return;
2796
    if (options & dump_do_pointers) {
2797
        struct_proc_enum_ptrs((*proc)) = type->enum_ptrs;
2798
        uint index = 0;
2799
        enum_ptr_t eptr;
2800
        gs_ptr_type_t ptype;
2801
2802
        if (proc != gs_no_struct_enum_ptrs) {
2803
            if (proc != 0) {
2804
                for (; (ptype = (*proc)(mem, pre + 1, size, index, &eptr, type, NULL)) != 0;
2805
                     ++index
2806
                     ) {
2807
                    const void *ptr = eptr.ptr;
2808
2809
                    dmprintf1(mem, "    ptr %u: ", index);
2810
                    if (ptype == ptr_string_type || ptype == ptr_const_string_type) {
2811
                        const gs_const_string *str = (const gs_const_string *)&eptr;
2812
                        if (!str)
2813
                            dmprintf(mem, "0x0");
2814
                        else
2815
                            dmprintf2(mem, PRI_INTPTR "(%u)", (intptr_t)str->data, str->size);
2816
                        if (options & dump_do_pointed_strings) {
2817
                            dmputs(mem, " =>\n");
2818
                            if (!str)
2819
                                dmprintf(mem, "(null)\n");
2820
                            else
2821
                                debug_dump_contents(mem, str->data, str->data + str->size, 6,
2822
                                                    true);
2823
                        } else {
2824
                            dmputc(mem, '\n');
2825
                        }
2826
                    } else {
2827
                        dmprintf1(mem, (PTR_BETWEEN(ptr, obj, (const byte *)obj + size) ?
2828
                                  "("PRI_INTPTR")\n" : PRI_INTPTR "\n"), (intptr_t) ptr);
2829
                    }
2830
                }
2831
            } else { /* proc == 0 */
2832
                dmprintf(mem, "previous line should be a ref\n");
2833
            }
2834
        } /* proc != gs_no_struct_enum_ptrs */
2835
    }
2836
    if (options & dump_do_contents) {
2837
        debug_dump_contents(mem, (const byte *)obj, (const byte *)obj + size,
2838
                            0, false);
2839
    }
2840
}
2841
2842
/* Print the contents of a clump with the given options. */
2843
/* Relevant options: all. */
2844
void
2845
debug_dump_clump(const gs_memory_t *mem, const clump_t * cp, const dump_control_t * control)
2846
{
2847
    dmprintf1(mem, "clump at "PRI_INTPTR":\n", (intptr_t) cp);
2848
    dmprintf3(mem, "   chead="PRI_INTPTR"  cbase="PRI_INTPTR" sbase="PRI_INTPTR"\n",
2849
              (intptr_t)cp->chead, (intptr_t)cp->cbase, (intptr_t)cp->sbase);
2850
    dmprintf3(mem, "    rcur="PRI_INTPTR"   rtop="PRI_INTPTR"  cbot="PRI_INTPTR"\n",
2851
              (intptr_t)cp->rcur, (intptr_t)cp->rtop, (intptr_t)cp->cbot);
2852
    dmprintf4(mem, "    ctop="PRI_INTPTR" climit="PRI_INTPTR" smark="PRI_INTPTR", size=%u\n",
2853
              (intptr_t)cp->ctop, (intptr_t)cp->climit, (intptr_t)cp->smark,
2854
              cp->smark_size);
2855
    dmprintf2(mem, "  sreloc="PRI_INTPTR"   cend="PRI_INTPTR"\n",
2856
              (intptr_t)cp->sreloc, (intptr_t)cp->cend);
2857
    dmprintf6(mem, "left="PRI_INTPTR" right="PRI_INTPTR" parent="PRI_INTPTR" outer="PRI_INTPTR" inner_count=%u has_refs=%s\n",
2858
              (intptr_t)cp->left, (intptr_t)cp->right, (intptr_t)cp->parent, (intptr_t)cp->outer,
2859
              cp->inner_count, (cp->has_refs ? "true" : "false"));
2860
2861
    dmprintf2(mem, "  sfree1="PRI_INTPTR"   sfree="PRI_INTPTR"\n",
2862
              (intptr_t)cp->sfree1, (intptr_t)cp->sfree);
2863
    if (control->options & dump_do_strings) {
2864
        debug_dump_contents(mem, (control->bottom == 0 ? cp->ctop :
2865
                             max(control->bottom, cp->ctop)),
2866
                            (control->top == 0 ? cp->climit :
2867
                             min(control->top, cp->climit)),
2868
                            0, true);
2869
    }
2870
    SCAN_CLUMP_OBJECTS(cp)
2871
        DO_ALL
2872
        if (obj_in_control_region(pre + 1,
2873
                                  (const byte *)(pre + 1) + size,
2874
                                  control)
2875
        )
2876
        debug_print_object(mem, pre + 1, control);
2877
    END_OBJECTS_SCAN_NO_ABORT
2878
}
2879
void
2880
debug_print_clump(const gs_memory_t *mem, const clump_t * cp)
2881
{
2882
    dump_control_t control;
2883
2884
    control = dump_control_default;
2885
    debug_dump_clump(mem, cp, &control);
2886
}
2887
2888
/* Print the contents of all clumps managed by an allocator. */
2889
/* Relevant options: all. */
2890
void
2891
debug_dump_memory(const gs_ref_memory_t * mem, const dump_control_t * control)
2892
{
2893
    const clump_t *cp;
2894
    clump_splay_walker sw;
2895
2896
    for (cp = clump_splay_walk_init(&sw, mem); cp != NULL; cp = clump_splay_walk_fwd(&sw))
2897
    {
2898
        if (obj_in_control_region(cp->cbase, cp->cend, control))
2899
            debug_dump_clump((const gs_memory_t *)mem, cp, control);
2900
    }
2901
}
2902
2903
void
2904
debug_dump_allocator(const gs_ref_memory_t *mem)
2905
{
2906
    debug_dump_memory(mem, &dump_control_no_contents);
2907
}
2908
2909
/* Find all the objects that contain a given pointer. */
2910
void
2911
debug_find_pointers(const gs_ref_memory_t *mem, const void *target)
2912
{
2913
    clump_splay_walker sw;
2914
    dump_control_t control;
2915
    const clump_t *cp;
2916
2917
    control.options = 0;
2918
    for (cp = clump_splay_walk_init(&sw, mem); cp; cp = clump_splay_walk_fwd(&sw))
2919
    {
2920
        SCAN_CLUMP_OBJECTS(cp);
2921
        DO_ALL
2922
            struct_proc_enum_ptrs((*proc)) = pre->o_type->enum_ptrs;
2923
            uint index = 0;
2924
            enum_ptr_t eptr;
2925
2926
            if (proc)   /* doesn't trace refs NB fix me. */
2927
                for (; (*proc)((const gs_memory_t *)mem, pre + 1, size, index,
2928
                               &eptr, pre->o_type, NULL);
2929
                     ++index)
2930
                    if (eptr.ptr == target) {
2931
                        dmprintf1((const gs_memory_t *)mem, "Index %d in", index);
2932
                        debug_print_object((const gs_memory_t *)mem, pre + 1, &control);
2933
                    }
2934
        END_OBJECTS_SCAN_NO_ABORT
2935
    }
2936
}
2937
static void ddct(const gs_memory_t *mem, clump_t *cp, clump_t *parent, int depth)
2938
{
2939
    int i;
2940
2941
    if (cp == NULL)
2942
        return;
2943
    for (i = 0; i < depth; i++)
2944
        dmlprintf(mem, " ");
2945
2946
    dmlprintf7(mem, "Clump "PRI_INTPTR":"PRI_INTPTR" parent="PRI_INTPTR" left="PRI_INTPTR":"PRI_INTPTR" right="PRI_INTPTR":"PRI_INTPTR"\n",
2947
        (intptr_t)cp, (intptr_t)cp->cbase, (intptr_t)cp->parent,
2948
        (intptr_t)cp->left, (intptr_t)(cp->left ? cp->left->cbase : NULL),
2949
        (intptr_t)cp->right, (intptr_t)(cp->right ? cp->right->cbase : NULL));
2950
    if (cp->parent != parent)
2951
        dmlprintf(mem, "Parent pointer mismatch!\n");
2952
    ddct(mem, cp->left, cp, depth+1);
2953
    ddct(mem, cp->right, cp, depth+1);
2954
}
2955
void
2956
debug_dump_clump_tree(const gs_ref_memory_t *mem)
2957
{
2958
    ddct((const gs_memory_t *)mem, mem->root, NULL, 0);
2959
}
2960
2961
#endif /* DEBUG */