Coverage Report

Created: 2025-06-10 07:27

/src/ghostpdl/psi/istack.c
Line
Count
Source (jump to first uncovered line)
1
/* Copyright (C) 2001-2023 Artifex Software, Inc.
2
   All Rights Reserved.
3
4
   This software is provided AS-IS with no warranty, either express or
5
   implied.
6
7
   This software is distributed under license and may not be copied,
8
   modified or distributed except as expressly authorized under the terms
9
   of the license contained in the file LICENSE in this distribution.
10
11
   Refer to licensing information at http://www.artifex.com or contact
12
   Artifex Software, Inc.,  39 Mesa Street, Suite 108A, San Francisco,
13
   CA 94129, USA, for further information.
14
*/
15
16
17
/* Manager for expandable stacks of refs */
18
#include "memory_.h"
19
#include "ghost.h"
20
#include "gsstruct.h"
21
#include "gsutil.h"
22
#include "ierrors.h"
23
#include "ialloc.h"
24
#include "istack.h"
25
#include "istkparm.h"
26
#include "istruct.h"    /* for RELOC_REF_VAR */
27
#include "iutil.h"
28
#include "ivmspace.h"   /* for local/global test */
29
#include "store.h"
30
#include "icstate.h"
31
#include "iname.h"
32
#include "dstack.h"
33
#include "idict.h"
34
35
/* Forward references */
36
static void init_block(ref_stack_t *pstack, const ref *pblock_array,
37
                        uint used);
38
static int ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add);
39
40
/* GC descriptors and procedures */
41
private_st_ref_stack_params();
42
static
43
CLEAR_MARKS_PROC(ref_stack_clear_marks)
44
0
{
45
0
    ref_stack_t *const sptr = vptr;
46
47
0
    r_clear_attrs(&sptr->current, l_mark);
48
0
}
49
static
50
4.69M
ENUM_PTRS_WITH(ref_stack_enum_ptrs, ref_stack_t *sptr) return 0;
51
2.01M
case 0: ENUM_RETURN_REF(&sptr->current);
52
2.01M
case 1: return ENUM_OBJ(sptr->params);
53
4.69M
ENUM_PTRS_END
54
1.00M
static RELOC_PTRS_WITH(ref_stack_reloc_ptrs, ref_stack_t *sptr)
55
1.00M
{
56
    /* Note that the relocation must be a multiple of sizeof(ref_packed) */
57
    /* * align_packed_per_ref, but it need not be a multiple of */
58
    /* sizeof(ref).  Therefore, we must do the adjustments using */
59
    /* ref_packed pointers rather than ref pointers. */
60
1.00M
    ref_packed *bot = (ref_packed *) sptr->current.value.refs;
61
1.00M
    long reloc;
62
63
1.00M
    RELOC_REF_VAR(sptr->current);
64
1.00M
    r_clear_attrs(&sptr->current, l_mark);
65
1.00M
    reloc = bot - (ref_packed *) sptr->current.value.refs;
66
1.00M
#define RELOC_P(p)\
67
3.01M
  sptr->p = (ref *)((ref_packed *)sptr->p - reloc);
68
1.00M
    RELOC_P(p);
69
1.00M
    RELOC_P(bot);
70
1.00M
    RELOC_P(top);
71
1.00M
#undef RELOC_P
72
1.00M
    RELOC_OBJ_VAR(sptr->params);
73
1.00M
} RELOC_PTRS_END
74
/* Structure type for a ref_stack. */
75
public_st_ref_stack();
76
77
/* Initialize a stack. */
78
int
79
ref_stack_init(ref_stack_t *pstack, const ref *pblock_array,
80
               uint bot_guard, uint top_guard, const ref *pguard_value,
81
               gs_ref_memory_t *mem, ref_stack_params_t *params)
82
479k
{
83
479k
    uint size = r_size(pblock_array);
84
479k
    uint avail = size - (stack_block_refs + bot_guard + top_guard);
85
479k
    ref_stack_block *pblock = (ref_stack_block *)pblock_array->value.refs;
86
479k
    s_ptr body = (s_ptr)(pblock + 1);
87
88
479k
    if (params == 0) {
89
479k
        params = gs_alloc_struct((gs_memory_t *)mem, ref_stack_params_t,
90
479k
                                 &st_ref_stack_params,
91
479k
                                 "ref_stack_alloc(stack.params)");
92
479k
        if (params == 0)
93
0
            return_error(-1); /* avoid binding in any error codes */
94
479k
    }
95
96
479k
    pstack->bot = body + bot_guard;
97
479k
    pstack->p = pstack->bot - 1;
98
479k
    pstack->top = pstack->p + avail;
99
479k
    pstack->current = *pblock_array;
100
479k
    pstack->extension_size = 0;
101
479k
    pstack->extension_used = 0;
102
103
479k
    make_int(&pstack->max_stack, avail);
104
479k
    pstack->requested = 0;
105
479k
    pstack->margin = 0;
106
479k
    pstack->body_size = avail;
107
108
479k
    pstack->params = params;
109
479k
    pstack->memory = mem;
110
111
479k
    params->bot_guard = bot_guard;
112
479k
    params->top_guard = top_guard;
113
479k
    params->block_size = size;
114
479k
    params->data_size = avail;
115
479k
    if (pguard_value != 0)
116
159k
        params->guard_value = *pguard_value;
117
319k
    else
118
319k
        make_tav(&params->guard_value, t__invalid, 0, intval, 0);
119
479k
    params->underflow_error = -1;
120
479k
    params->overflow_error = -1;
121
479k
    params->allow_expansion = true;
122
479k
    init_block(pstack, pblock_array, 0);
123
479k
    refset_null_new(pstack->bot, avail, 0);
124
479k
    make_empty_array(&pblock->next, 0);
125
479k
    return 0;
126
479k
}
127
128
/* Set whether a stack is allowed to expand.  The initial value is true. */
129
void
130
ref_stack_allow_expansion(ref_stack_t *pstack, bool expand)
131
159k
{
132
159k
    pstack->params->allow_expansion = expand;
133
159k
}
134
135
/* Set the error codes for under- and overflow.  The initial values are -1. */
136
void
137
ref_stack_set_error_codes(ref_stack_t *pstack, int underflow_error,
138
                          int overflow_error)
139
479k
{
140
479k
    pstack->params->underflow_error = underflow_error;
141
479k
    pstack->params->overflow_error = overflow_error;
142
479k
}
143
144
/* Set the maximum number of elements allowed on a stack. */
145
int
146
ref_stack_set_max_count(ref_stack_t *pstack, long nmax)
147
2.58M
{
148
2.58M
    long nmin;
149
150
    /* Bypass checking if we're setting the amximum to -1 'no limits' */
151
2.58M
    if (nmax == -1) {
152
0
        pstack->max_stack.value.intval = nmax;
153
0
        return 0;
154
0
    }
155
156
    /* check how many items we already have on the stack, don't allow
157
     * a maximum less than this.
158
     */
159
2.58M
    nmin = ref_stack_count_inline(pstack);
160
161
2.58M
    if (nmax < nmin)
162
0
        nmax = nmin;
163
2.58M
    if (nmax > max_uint / sizeof(ref))
164
0
        nmax = max_uint / sizeof(ref);
165
2.58M
    if (!pstack->params->allow_expansion) {
166
861k
        uint ncur = pstack->body_size;
167
168
861k
        if (nmax > ncur)
169
0
            nmax = ncur;
170
861k
    }
171
2.58M
    pstack->max_stack.value.intval = nmax;
172
2.58M
    return 0;
173
2.58M
}
174
175
/*
176
 * Set the margin between the limit and the top of the stack.
177
 * Note that this may require allocating a block.
178
 */
179
int
180
ref_stack_set_margin(ref_stack_t *pstack, uint margin)
181
0
{
182
0
    const ref_stack_params_t *params = pstack->params;
183
0
    uint data_size = params->data_size;
184
185
0
    if (margin <= pstack->margin) {
186
0
        refset_null_new(pstack->top + 1, pstack->margin - margin, 0);
187
0
    } else {
188
0
        if (margin > data_size >> 1)
189
0
            return_error(gs_error_rangecheck);
190
0
        if (pstack->top - pstack->p < margin) {
191
0
            uint used = pstack->p + 1 - pstack->bot;
192
0
            uint keep = data_size - margin;
193
0
            int code = ref_stack_push_block(pstack, keep, used - keep);
194
195
0
            if (code < 0)
196
0
                return code;
197
0
        }
198
0
    }
199
0
    pstack->margin = margin;
200
0
    pstack->body_size = data_size - margin;
201
0
    pstack->top = pstack->bot + pstack->body_size - 1;
202
0
    return 0;
203
0
}
204
205
/* Return the number of elements on a stack. */
206
uint
207
ref_stack_count(const ref_stack_t *pstack)
208
248M
{
209
248M
    return ref_stack_count_inline(pstack);
210
248M
}
211
212
/*
213
 * Return a pointer to a given element from the stack, counting from
214
 * 0 as the top element.  If the index is out of range, return 0.
215
 */
216
ref *
217
ref_stack_index(const ref_stack_t *pstack, long idx)
218
22.4G
{
219
22.4G
    ref_stack_block *pblock;
220
22.4G
    uint used = pstack->p + 1 - pstack->bot;
221
222
22.4G
    if (idx < 0)
223
0
        return NULL;
224
22.4G
    if (idx < used)    /* common case */
225
20.0G
        return pstack->p - (uint) idx;
226
2.45G
    pblock = (ref_stack_block *) pstack->current.value.refs;
227
22.3G
    do {
228
22.3G
        pblock = (ref_stack_block *) pblock->next.value.refs;
229
22.3G
        if (pblock == 0)
230
2.55M
            return NULL;
231
22.3G
        idx -= used;
232
22.3G
        used = r_size(&pblock->used);
233
22.3G
    } while (idx >= used);
234
2.45G
    return pblock->used.value.refs + (used - 1 - (uint) idx);
235
2.45G
}
236
237
/*
238
 * Count the number of elements down to and including the first mark.
239
 * If no mark is found, return 0.
240
 */
241
uint
242
ref_stack_counttomark(const ref_stack_t *pstack)
243
1.15G
{
244
1.15G
    uint scanned = 0;
245
1.15G
    ref_stack_enum_t rsenum;
246
247
1.15G
    ref_stack_enum_begin(&rsenum, pstack);
248
1.16G
    do {
249
1.16G
        uint count = rsenum.size;
250
1.16G
        const ref *p = rsenum.ptr + count - 1;
251
252
8.58G
        for (; count; count--, p--)
253
8.57G
            if (r_has_type(p, t_mark))
254
1.15G
                return scanned + (rsenum.size - count + 1);
255
4.66M
        scanned += rsenum.size;
256
4.66M
    } while (ref_stack_enum_next(&rsenum));
257
247
    return 0;
258
1.15G
}
259
260
/*
261
 * Do the store check for storing 'count' elements of a stack, starting
262
 * 'skip' elements below the top, into an array.  Return 0 or gs_error_invalidaccess.
263
 */
264
int
265
ref_stack_store_check(const ref_stack_t *pstack, ref *parray, uint count,
266
                      uint skip)
267
913k
{
268
913k
    uint space = r_space(parray);
269
270
913k
    if (space != avm_local) {
271
173k
        uint left = count, pass = skip;
272
173k
        ref_stack_enum_t rsenum;
273
274
173k
        ref_stack_enum_begin(&rsenum, pstack);
275
346k
        do {
276
346k
            ref *ptr = rsenum.ptr;
277
346k
            uint size = rsenum.size;
278
279
346k
            if (size <= pass)
280
0
                pass -= size;
281
346k
            else {
282
346k
                int code;
283
284
346k
                if (pass != 0)
285
173k
                    size -= pass, pass = 0;
286
346k
                ptr += size;
287
346k
                if (size > left)
288
173k
                    size = left;
289
346k
                left -= size;
290
346k
                code = refs_check_space(ptr - size, size, space);
291
346k
                if (code < 0)
292
0
                    return code;
293
346k
                if (left == 0)
294
173k
                    break;
295
346k
            }
296
346k
        } while (ref_stack_enum_next(&rsenum));
297
173k
    }
298
913k
    return 0;
299
913k
}
300
301
int
302
ref_stack_array_sanitize(i_ctx_t *i_ctx_p, ref *sarr, ref *darr, int depth)
303
0
{
304
0
    int i, code;
305
0
    ref obj, arr2;
306
0
    ref *pobj2;
307
0
    gs_memory_t *mem = (gs_memory_t *)idmemory->current;
308
309
0
    if (!r_is_array(sarr) || !r_has_type(darr, t_array))
310
0
        return_error(gs_error_typecheck);
311
312
0
    for (i = 0; i < r_size(sarr); i++) {
313
0
        code = array_get(mem, sarr, i, &obj);
314
0
        if (code < 0)
315
0
            make_null(&obj);
316
0
        switch(r_type(&obj)) {
317
0
          case t_operator:
318
0
          {
319
0
            int index = op_index(&obj);
320
321
0
            if (index > 0 && index < op_def_count) {
322
0
                const byte *data = (const byte *)(op_index_def(index)->oname + 1);
323
0
                if (dict_find_string(systemdict, (const char *)data, &pobj2) <= 0) {
324
0
                    byte *s = gs_alloc_bytes(mem, strlen((char *)data) + 5, "ref_stack_array_sanitize");
325
0
                    if (s) {
326
0
                        s[0] =  '\0';
327
0
                        strcpy((char *)s, "--");
328
0
                        strcpy((char *)s + 2, (char *)data);
329
0
                        strcpy((char *)s + strlen((char *)data) + 2, "--");
330
0
                    }
331
0
                    else {
332
0
                        s = (byte *)data;
333
0
                    }
334
0
                    code = name_ref(imemory, s, strlen((char *)s), &obj, 1);
335
0
                    if (code < 0) make_null(&obj);
336
0
                    if (s != data)
337
0
                        gs_free_object(mem, s, "ref_stack_array_sanitize");
338
0
                }
339
0
            }
340
0
            else {
341
0
                make_null(&obj);
342
0
            }
343
0
            ref_assign(darr->value.refs + i, &obj);
344
0
            break;
345
0
          }
346
0
          case t_array:
347
0
          case t_shortarray:
348
0
          case t_mixedarray:
349
0
          {
350
0
            int attrs = r_type_attrs(&obj) & (a_write | a_read | a_execute | a_executable);
351
            /* We only want to copy executable arrays */
352
0
            if (attrs & (a_execute | a_executable)) {
353
0
                if (++depth > 50) {
354
0
                    code = gs_error_limitcheck;
355
0
                }
356
0
                else {
357
0
                    code = ialloc_ref_array(&arr2, attrs, r_size(&obj), "ref_stack_array_sanitize");
358
0
                }
359
0
                if (code < 0) {
360
0
                    make_null(&arr2);
361
0
                }
362
0
                else {
363
0
                    code = ref_stack_array_sanitize(i_ctx_p, &obj, &arr2, depth);
364
0
                    if (code < 0) {
365
0
                        ifree_ref_array(&arr2, "ref_stack_array_sanitize");
366
0
                        return code;
367
0
                    }
368
0
                }
369
0
                ref_assign(darr->value.refs + i, &arr2);
370
0
            }
371
0
            else {
372
0
                ref_assign(darr->value.refs + i, &obj);
373
0
            }
374
0
            break;
375
0
          }
376
0
          default:
377
0
            ref_assign(darr->value.refs + i, &obj);
378
0
        }
379
0
    }
380
0
    return 0;
381
0
}
382
383
384
/*
385
 * Store the top 'count' elements of a stack, starting 'skip' elements below
386
 * the top, into an array, with or without store/undo checking.  age=-1 for
387
 * no check, 0 for old, 1 for new.  May return gs_error_rangecheck or
388
 * gs_error_invalidaccess.
389
 */
390
#undef idmemory     /****** NOTA BENE ******/
391
int
392
ref_stack_store(const ref_stack_t *pstack, ref *parray, uint count,
393
                uint skip, int age, bool check, gs_dual_memory_t *idmemory,
394
                client_name_t cname)
395
22.6M
{
396
22.6M
    uint left, pass;
397
22.6M
    ref *to;
398
22.6M
    ref_stack_enum_t rsenum;
399
400
22.6M
    if (count > ref_stack_count(pstack) || count > r_size(parray))
401
0
        return_error(gs_error_rangecheck);
402
22.6M
    if (check) {
403
538k
        int code = ref_stack_store_check(pstack, parray, count, skip);
404
405
538k
        if (code < 0)
406
0
            return code;
407
538k
    }
408
22.6M
    to = parray->value.refs + count;
409
22.6M
    left = count, pass = skip;
410
22.6M
    ref_stack_enum_begin(&rsenum, pstack);
411
22.9M
    do {
412
22.9M
        ref *from = rsenum.ptr;
413
22.9M
        uint size = rsenum.size;
414
415
22.9M
        if (size <= pass)
416
0
            pass -= size;
417
22.9M
        else {
418
22.9M
            if (pass != 0)
419
175k
                size -= pass, pass = 0;
420
22.9M
            from += size;
421
22.9M
            if (size > left)
422
22.3M
                size = left;
423
22.9M
            left -= size;
424
22.9M
            switch (age) {
425
0
            case -1:    /* not an array */
426
0
                while (size--) {
427
0
                    from--, to--;
428
0
                    ref_assign(to, from);
429
0
                }
430
0
                break;
431
782k
            case 0:   /* old array */
432
216M
                while (size--) {
433
215M
                    from--, to--;
434
215M
                    ref_assign_old(parray, to, from, cname);
435
215M
                }
436
782k
                break;
437
22.1M
            case 1:   /* new array */
438
199M
                while (size--) {
439
177M
                    from--, to--;
440
177M
                    ref_assign_new(to, from);
441
177M
                }
442
22.1M
                break;
443
22.9M
            }
444
22.9M
            if (left == 0)
445
22.6M
                break;
446
22.9M
        }
447
22.9M
    } while (ref_stack_enum_next(&rsenum));
448
22.6M
    r_set_size(parray, count);
449
22.6M
    return 0;
450
22.6M
}
451
452
/*
453
 * Pop the top N elements off a stack.
454
 * The number must not exceed the number of elements in use.
455
 */
456
void
457
ref_stack_pop(ref_stack_t *pstack, uint count)
458
1.74G
{
459
1.74G
    uint used;
460
461
1.74G
    while ((used = pstack->p + 1 - pstack->bot) <= count &&
462
1.74G
            pstack->extension_used > 0) {
463
4.99M
        count -= used;
464
4.99M
        pstack->p = pstack->bot - 1;
465
4.99M
        ref_stack_pop_block(pstack);
466
4.99M
    }
467
1.74G
    pstack->p -= count;
468
1.74G
}
469
470
/* Pop the top block off a stack.  May return underflow_error. */
471
int
472
ref_stack_pop_block(ref_stack_t *pstack)
473
5.04M
{
474
5.04M
    s_ptr bot = pstack->bot;
475
5.04M
    uint count = pstack->p + 1 - bot;
476
5.04M
    ref_stack_block *pcur =
477
5.04M
    (ref_stack_block *) pstack->current.value.refs;
478
5.04M
    ref_stack_block *pnext =
479
5.04M
    (ref_stack_block *) pcur->next.value.refs;
480
5.04M
    uint used;
481
5.04M
    ref *body;
482
5.04M
    ref next;
483
484
5.04M
    if (pnext == 0)
485
3.12k
        return_error(pstack->params->underflow_error);
486
5.03M
    used = r_size(&pnext->used);
487
5.03M
    body = (ref *) (pnext + 1) + pstack->params->bot_guard;
488
5.03M
    next = pcur->next;
489
    /*
490
       * If the contents of the two blocks won't fit in a single block, we
491
       * move up the used part of the top block, and copy up as much of
492
       * the contents of the next block under it as will fit.  If the
493
       * contents of both blocks fit in a single block, we copy the used
494
       * part of the top block to the top of the next block, and free the
495
       * top block.
496
     */
497
5.03M
    if (used + count > pstack->body_size) {
498
        /*
499
         * The contents of the two blocks won't fit into a single block.
500
         * On the assumption that we're recovering from a local stack
501
         * underflow and need to increase the number of contiguous
502
         * elements available, move up the used part of the top block, and
503
         * copy up as much of the contents of the next block under it as
504
         * will fit.
505
         */
506
11.9k
        uint moved = pstack->body_size - count;
507
11.9k
        uint left;
508
509
11.9k
        if (moved == 0)
510
0
            return_error(gs_error_Fatal);
511
11.9k
        memmove(bot + moved, bot, count * sizeof(ref));
512
11.9k
        left = used - moved;
513
11.9k
        memcpy(bot, body + left, moved * sizeof(ref));
514
11.9k
        refset_null_new(body + left, moved, 0);
515
11.9k
        r_dec_size(&pnext->used, moved);
516
11.9k
        pstack->p = pstack->top;
517
11.9k
        pstack->extension_used -= moved;
518
5.02M
    } else {
519
        /*
520
           * The contents of the two blocks will fit into a single block.
521
           * Copy the used part of the top block to the top of the next
522
           * block, and free the top block.
523
         */
524
5.02M
        memcpy(body + used, bot, count * sizeof(ref));
525
5.02M
        pstack->bot = bot = body;
526
5.02M
        pstack->top = bot + pstack->body_size - 1;
527
5.02M
        gs_free_ref_array(pstack->memory, &pstack->current,
528
5.02M
                          "ref_stack_pop_block");
529
5.02M
        pstack->current = next;
530
5.02M
        pstack->p = bot + (used + count - 1);
531
5.02M
        pstack->extension_size -= pstack->body_size;
532
5.02M
        pstack->extension_used -= used;
533
5.02M
    }
534
5.03M
    return 0;
535
5.03M
}
536
537
/*
538
 * Extend a stack to recover from an overflow condition.
539
 * May return overflow_error or gs_error_VMerror.
540
 */
541
int
542
ref_stack_extend(ref_stack_t *pstack, uint request)
543
4.90M
{
544
4.90M
    uint keep = (pstack->top - pstack->bot + 1) / 3;
545
4.90M
    uint count = pstack->p - pstack->bot + 1;
546
4.90M
    const ref_stack_params_t *params = pstack->params;
547
548
4.90M
    if (request > params->data_size)
549
0
        return_error(params->overflow_error);
550
4.90M
    if (keep + request > pstack->body_size)
551
10
        keep = pstack->body_size - request;
552
4.90M
    if (keep > count)
553
0
        keep = count;   /* required by ref_stack_push_block */
554
4.90M
    return ref_stack_push_block(pstack, keep, request);
555
4.90M
}
556
557
/*
558
 * Push N empty slots onto a stack.  These slots are not initialized:
559
 * the caller must immediately fill them.  May return overflow_error
560
 * (if max_stack would be exceeded, or the stack has no allocator)
561
 * or gs_error_VMerror.
562
 */
563
int
564
ref_stack_push(ref_stack_t *pstack, uint count)
565
54.2k
{
566
    /* Don't bother to pre-check for overflow: we must be able to */
567
    /* back out in the case of a VMerror anyway, and */
568
    /* ref_stack_push_block will make the check itself. */
569
54.2k
    uint needed = count;
570
54.2k
    uint added;
571
572
177k
    for (; (added = pstack->top - pstack->p) < needed; needed -= added) {
573
122k
        int code;
574
575
122k
        pstack->p = pstack->top;
576
122k
        code = ref_stack_push_block(pstack,
577
122k
                                    (pstack->top - pstack->bot + 1) / 3,
578
122k
                                    added);
579
122k
        if (code < 0) {
580
            /* Back out. */
581
13
            ref_stack_pop(pstack, count - needed + added);
582
13
            pstack->requested = count;
583
13
            return code;
584
13
        }
585
122k
    }
586
54.2k
    pstack->p += needed;
587
54.2k
    return 0;
588
54.2k
}
589
590
/*
591
 * Push a block onto the stack, specifying how many elements of the current
592
 * top block should remain in the top block and also how many elements we
593
 * are trying to add.  Requires keep <= count.  May return overflow_error or
594
 * gs_error_VMerror.
595
 */
596
static int
597
ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add)
598
5.02M
{
599
5.02M
    const ref_stack_params_t *params = pstack->params;
600
5.02M
    uint count = pstack->p - pstack->bot + 1;
601
5.02M
    uint move = count - keep;
602
5.02M
    ref_stack_block *pcur = (ref_stack_block *) pstack->current.value.refs;
603
5.02M
    ref next;
604
5.02M
    ref_stack_block *pnext;
605
5.02M
    ref *body;
606
5.02M
    int code;
607
608
5.02M
    if (keep > count)
609
0
        return_error(gs_error_Fatal);
610
    /* Check for overflowing the maximum size, */
611
    /* or expansion not allowed.  */
612
    /* Or specifically allowing unlimited expansion */
613
5.02M
    if (pstack->max_stack.value.intval > 0) {
614
5.02M
        if (pstack->extension_used + (pstack->top - pstack->bot) + add >=
615
5.02M
            pstack->max_stack.value.intval ||
616
5.02M
            !params->allow_expansion
617
5.02M
            )
618
273
            return_error(params->overflow_error);
619
5.02M
    }
620
5.02M
    code = gs_alloc_ref_array(pstack->memory, &next, 0,
621
5.02M
                              params->block_size, "ref_stack_push_block");
622
5.02M
    if (code < 0)
623
0
        return code;
624
5.02M
    pnext = (ref_stack_block *) next.value.refs;
625
5.02M
    body = (ref *) (pnext + 1);
626
    /* Copy the top keep elements into the new block, */
627
    /* and make the new block the top block. */
628
5.02M
    init_block(pstack, &next, keep);
629
5.02M
    body += params->bot_guard;
630
5.02M
    memcpy(body, pstack->bot + move, keep * sizeof(ref));
631
    /* Clear the elements above the top of the new block. */
632
5.02M
    refset_null_new(body + keep, params->data_size - keep, 0);
633
    /* Clear the elements above the top of the old block. */
634
5.02M
    refset_null_new(pstack->bot + move, keep, 0);
635
5.02M
    pnext->next = pstack->current;
636
5.02M
    pcur->used.value.refs = pstack->bot;
637
5.02M
    r_set_size(&pcur->used, move);
638
5.02M
    pstack->current = next;
639
5.02M
    pstack->bot = body;
640
5.02M
    pstack->top = pstack->bot + pstack->body_size - 1;
641
5.02M
    pstack->p = pstack->bot + keep - 1;
642
5.02M
    pstack->extension_size += pstack->body_size;
643
5.02M
    pstack->extension_used += move;
644
5.02M
    return 0;
645
5.02M
}
646
647
/* Begin enumerating the blocks of a stack. */
648
void
649
ref_stack_enum_begin(ref_stack_enum_t *prse, const ref_stack_t *pstack)
650
1.44G
{
651
1.44G
    prse->block = (ref_stack_block *)pstack->current.value.refs;
652
1.44G
    prse->ptr = pstack->bot;
653
1.44G
    prse->size = pstack->p + 1 - pstack->bot;
654
1.44G
}
655
656
bool
657
ref_stack_enum_next(ref_stack_enum_t *prse)
658
15.3M
{
659
15.3M
    ref_stack_block *block =
660
15.3M
        prse->block = (ref_stack_block *)prse->block->next.value.refs;
661
662
15.3M
    if (block == 0)
663
10.2M
        return false;
664
5.11M
    prse->ptr = block->used.value.refs;
665
5.11M
    prse->size = r_size(&block->used);
666
5.11M
    return true;
667
15.3M
}
668
669
/* Clean up a stack for garbage collection. */
670
void
671
ref_stack_cleanup(ref_stack_t *pstack)
672
1.00M
{
673
1.00M
    ref_stack_block *pblock =
674
1.00M
        (ref_stack_block *) pstack->current.value.refs;
675
676
1.00M
    refset_null_new(pstack->p + 1, pstack->top - pstack->p, 0);
677
1.00M
    pblock->used = pstack->current; /* set attrs */
678
1.00M
    pblock->used.value.refs = pstack->bot;
679
1.00M
    r_set_size(&pblock->used, pstack->p + 1 - pstack->bot);
680
1.00M
}
681
682
/*
683
 * Free the entire contents of a stack, including the bottom block.
684
 * The client must still call ref_stack_free.  Note that after calling
685
 * ref_stack_release, the stack is no longer usable.
686
 */
687
void
688
ref_stack_release(ref_stack_t *pstack)
689
0
{
690
0
    gs_ref_memory_t *mem = pstack->memory;
691
692
0
    ref_stack_clear(pstack);
693
    /* Free the parameter structure. */
694
0
    gs_free_object((gs_memory_t *)mem, pstack->params,
695
0
                   "ref_stack_release(stack.params)");
696
    /* Free the original (bottom) block. */
697
0
    gs_free_ref_array(mem, &pstack->current, "ref_stack_release");
698
0
}
699
700
/*
701
 * Release a stack and then free the ref_stack object.
702
 */
703
void
704
ref_stack_free(ref_stack_t *pstack)
705
0
{
706
0
    gs_memory_t *mem = (gs_memory_t *)pstack->memory;
707
708
0
    ref_stack_release(pstack);
709
0
    gs_free_object(mem, pstack, "ref_stack_free");
710
0
}
711
712
/* ------ Internal routines ------ */
713
714
/* Initialize the guards and body of a stack block. */
715
static void
716
init_block(ref_stack_t *pstack, const ref *psb, uint used)
717
5.50M
{
718
5.50M
    ref_stack_params_t *params = pstack->params;
719
5.50M
    ref *brefs = psb->value.refs;
720
5.50M
    uint i;
721
5.50M
    ref *p;
722
723
5.50M
    for (i = params->bot_guard, p = brefs + stack_block_refs;
724
57.5M
         i != 0; i--, p++
725
5.50M
        )
726
52.0M
        ref_assign(p, &params->guard_value);
727
    /* The top guard elements will never be read, */
728
    /* but we need to initialize them for the sake of the GC. */
729
    /* We can use refset_null for this, because even though it uses */
730
    /* make_null_new and stack elements must not be marked new, */
731
    /* these slots will never actually be read or written. */
732
5.50M
    if (params->top_guard) {
733
5.34M
        ref *top = brefs + r_size(psb);
734
5.34M
        int top_guard = params->top_guard;
735
736
5.34M
        refset_null_new(top - top_guard, top_guard, 0);
737
5.34M
    } {
738
5.50M
        ref_stack_block *const pblock = (ref_stack_block *) brefs;
739
740
5.50M
        pblock->used = *psb;
741
5.50M
        pblock->used.value.refs = brefs + stack_block_refs + params->bot_guard;
742
5.50M
        r_set_size(&pblock->used, 0);
743
5.50M
    }
744
5.50M
}