Coverage Report

Created: 2025-06-10 07:24

/src/ghostpdl/psi/istack.c
Line
Count
Source (jump to first uncovered line)
1
/* Copyright (C) 2001-2023 Artifex Software, Inc.
2
   All Rights Reserved.
3
4
   This software is provided AS-IS with no warranty, either express or
5
   implied.
6
7
   This software is distributed under license and may not be copied,
8
   modified or distributed except as expressly authorized under the terms
9
   of the license contained in the file LICENSE in this distribution.
10
11
   Refer to licensing information at http://www.artifex.com or contact
12
   Artifex Software, Inc.,  39 Mesa Street, Suite 108A, San Francisco,
13
   CA 94129, USA, for further information.
14
*/
15
16
17
/* Manager for expandable stacks of refs */
18
#include "memory_.h"
19
#include "ghost.h"
20
#include "gsstruct.h"
21
#include "gsutil.h"
22
#include "ierrors.h"
23
#include "ialloc.h"
24
#include "istack.h"
25
#include "istkparm.h"
26
#include "istruct.h"    /* for RELOC_REF_VAR */
27
#include "iutil.h"
28
#include "ivmspace.h"   /* for local/global test */
29
#include "store.h"
30
#include "icstate.h"
31
#include "iname.h"
32
#include "dstack.h"
33
#include "idict.h"
34
35
/* Forward references */
36
static void init_block(ref_stack_t *pstack, const ref *pblock_array,
37
                        uint used);
38
static int ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add);
39
40
/* GC descriptors and procedures */
41
private_st_ref_stack_params();
42
static
43
CLEAR_MARKS_PROC(ref_stack_clear_marks)
44
0
{
45
0
    ref_stack_t *const sptr = vptr;
46
47
0
    r_clear_attrs(&sptr->current, l_mark);
48
0
}
49
static
50
278k
ENUM_PTRS_WITH(ref_stack_enum_ptrs, ref_stack_t *sptr) return 0;
51
119k
case 0: ENUM_RETURN_REF(&sptr->current);
52
119k
case 1: return ENUM_OBJ(sptr->params);
53
278k
ENUM_PTRS_END
54
59.7k
static RELOC_PTRS_WITH(ref_stack_reloc_ptrs, ref_stack_t *sptr)
55
59.7k
{
56
    /* Note that the relocation must be a multiple of sizeof(ref_packed) */
57
    /* * align_packed_per_ref, but it need not be a multiple of */
58
    /* sizeof(ref).  Therefore, we must do the adjustments using */
59
    /* ref_packed pointers rather than ref pointers. */
60
59.7k
    ref_packed *bot = (ref_packed *) sptr->current.value.refs;
61
59.7k
    long reloc;
62
63
59.7k
    RELOC_REF_VAR(sptr->current);
64
59.7k
    r_clear_attrs(&sptr->current, l_mark);
65
59.7k
    reloc = bot - (ref_packed *) sptr->current.value.refs;
66
59.7k
#define RELOC_P(p)\
67
179k
  sptr->p = (ref *)((ref_packed *)sptr->p - reloc);
68
59.7k
    RELOC_P(p);
69
59.7k
    RELOC_P(bot);
70
59.7k
    RELOC_P(top);
71
59.7k
#undef RELOC_P
72
59.7k
    RELOC_OBJ_VAR(sptr->params);
73
59.7k
} RELOC_PTRS_END
74
/* Structure type for a ref_stack. */
75
public_st_ref_stack();
76
77
/* Initialize a stack. */
78
int
79
ref_stack_init(ref_stack_t *pstack, const ref *pblock_array,
80
               uint bot_guard, uint top_guard, const ref *pguard_value,
81
               gs_ref_memory_t *mem, ref_stack_params_t *params)
82
29.3k
{
83
29.3k
    uint size = r_size(pblock_array);
84
29.3k
    uint avail = size - (stack_block_refs + bot_guard + top_guard);
85
29.3k
    ref_stack_block *pblock = (ref_stack_block *)pblock_array->value.refs;
86
29.3k
    s_ptr body = (s_ptr)(pblock + 1);
87
88
29.3k
    if (params == 0) {
89
29.3k
        params = gs_alloc_struct((gs_memory_t *)mem, ref_stack_params_t,
90
29.3k
                                 &st_ref_stack_params,
91
29.3k
                                 "ref_stack_alloc(stack.params)");
92
29.3k
        if (params == 0)
93
0
            return_error(-1); /* avoid binding in any error codes */
94
29.3k
    }
95
96
29.3k
    pstack->bot = body + bot_guard;
97
29.3k
    pstack->p = pstack->bot - 1;
98
29.3k
    pstack->top = pstack->p + avail;
99
29.3k
    pstack->current = *pblock_array;
100
29.3k
    pstack->extension_size = 0;
101
29.3k
    pstack->extension_used = 0;
102
103
29.3k
    make_int(&pstack->max_stack, avail);
104
29.3k
    pstack->requested = 0;
105
29.3k
    pstack->margin = 0;
106
29.3k
    pstack->body_size = avail;
107
108
29.3k
    pstack->params = params;
109
29.3k
    pstack->memory = mem;
110
111
29.3k
    params->bot_guard = bot_guard;
112
29.3k
    params->top_guard = top_guard;
113
29.3k
    params->block_size = size;
114
29.3k
    params->data_size = avail;
115
29.3k
    if (pguard_value != 0)
116
9.78k
        params->guard_value = *pguard_value;
117
19.5k
    else
118
19.5k
        make_tav(&params->guard_value, t__invalid, 0, intval, 0);
119
29.3k
    params->underflow_error = -1;
120
29.3k
    params->overflow_error = -1;
121
29.3k
    params->allow_expansion = true;
122
29.3k
    init_block(pstack, pblock_array, 0);
123
29.3k
    refset_null_new(pstack->bot, avail, 0);
124
29.3k
    make_empty_array(&pblock->next, 0);
125
29.3k
    return 0;
126
29.3k
}
127
128
/* Set whether a stack is allowed to expand.  The initial value is true. */
129
void
130
ref_stack_allow_expansion(ref_stack_t *pstack, bool expand)
131
9.78k
{
132
9.78k
    pstack->params->allow_expansion = expand;
133
9.78k
}
134
135
/* Set the error codes for under- and overflow.  The initial values are -1. */
136
void
137
ref_stack_set_error_codes(ref_stack_t *pstack, int underflow_error,
138
                          int overflow_error)
139
29.3k
{
140
29.3k
    pstack->params->underflow_error = underflow_error;
141
29.3k
    pstack->params->overflow_error = overflow_error;
142
29.3k
}
143
144
/* Set the maximum number of elements allowed on a stack. */
145
int
146
ref_stack_set_max_count(ref_stack_t *pstack, long nmax)
147
152k
{
148
152k
    long nmin;
149
150
    /* Bypass checking if we're setting the amximum to -1 'no limits' */
151
152k
    if (nmax == -1) {
152
0
        pstack->max_stack.value.intval = nmax;
153
0
        return 0;
154
0
    }
155
156
    /* check how many items we already have on the stack, don't allow
157
     * a maximum less than this.
158
     */
159
152k
    nmin = ref_stack_count_inline(pstack);
160
161
152k
    if (nmax < nmin)
162
0
        nmax = nmin;
163
152k
    if (nmax > max_uint / sizeof(ref))
164
0
        nmax = max_uint / sizeof(ref);
165
152k
    if (!pstack->params->allow_expansion) {
166
50.7k
        uint ncur = pstack->body_size;
167
168
50.7k
        if (nmax > ncur)
169
0
            nmax = ncur;
170
50.7k
    }
171
152k
    pstack->max_stack.value.intval = nmax;
172
152k
    return 0;
173
152k
}
174
175
/*
176
 * Set the margin between the limit and the top of the stack.
177
 * Note that this may require allocating a block.
178
 */
179
int
180
ref_stack_set_margin(ref_stack_t *pstack, uint margin)
181
0
{
182
0
    const ref_stack_params_t *params = pstack->params;
183
0
    uint data_size = params->data_size;
184
185
0
    if (margin <= pstack->margin) {
186
0
        refset_null_new(pstack->top + 1, pstack->margin - margin, 0);
187
0
    } else {
188
0
        if (margin > data_size >> 1)
189
0
            return_error(gs_error_rangecheck);
190
0
        if (pstack->top - pstack->p < margin) {
191
0
            uint used = pstack->p + 1 - pstack->bot;
192
0
            uint keep = data_size - margin;
193
0
            int code = ref_stack_push_block(pstack, keep, used - keep);
194
195
0
            if (code < 0)
196
0
                return code;
197
0
        }
198
0
    }
199
0
    pstack->margin = margin;
200
0
    pstack->body_size = data_size - margin;
201
0
    pstack->top = pstack->bot + pstack->body_size - 1;
202
0
    return 0;
203
0
}
204
205
/* Return the number of elements on a stack. */
206
uint
207
ref_stack_count(const ref_stack_t *pstack)
208
14.8M
{
209
14.8M
    return ref_stack_count_inline(pstack);
210
14.8M
}
211
212
/*
213
 * Return a pointer to a given element from the stack, counting from
214
 * 0 as the top element.  If the index is out of range, return 0.
215
 */
216
ref *
217
ref_stack_index(const ref_stack_t *pstack, long idx)
218
1.47G
{
219
1.47G
    ref_stack_block *pblock;
220
1.47G
    uint used = pstack->p + 1 - pstack->bot;
221
222
1.47G
    if (idx < 0)
223
0
        return NULL;
224
1.47G
    if (idx < used)    /* common case */
225
1.32G
        return pstack->p - (uint) idx;
226
147M
    pblock = (ref_stack_block *) pstack->current.value.refs;
227
1.19G
    do {
228
1.19G
        pblock = (ref_stack_block *) pblock->next.value.refs;
229
1.19G
        if (pblock == 0)
230
156k
            return NULL;
231
1.19G
        idx -= used;
232
1.19G
        used = r_size(&pblock->used);
233
1.19G
    } while (idx >= used);
234
147M
    return pblock->used.value.refs + (used - 1 - (uint) idx);
235
147M
}
236
237
/*
238
 * Count the number of elements down to and including the first mark.
239
 * If no mark is found, return 0.
240
 */
241
uint
242
ref_stack_counttomark(const ref_stack_t *pstack)
243
71.1M
{
244
71.1M
    uint scanned = 0;
245
71.1M
    ref_stack_enum_t rsenum;
246
247
71.1M
    ref_stack_enum_begin(&rsenum, pstack);
248
71.4M
    do {
249
71.4M
        uint count = rsenum.size;
250
71.4M
        const ref *p = rsenum.ptr + count - 1;
251
252
551M
        for (; count; count--, p--)
253
551M
            if (r_has_type(p, t_mark))
254
71.1M
                return scanned + (rsenum.size - count + 1);
255
285k
        scanned += rsenum.size;
256
285k
    } while (ref_stack_enum_next(&rsenum));
257
17
    return 0;
258
71.1M
}
259
260
/*
261
 * Do the store check for storing 'count' elements of a stack, starting
262
 * 'skip' elements below the top, into an array.  Return 0 or gs_error_invalidaccess.
263
 */
264
int
265
ref_stack_store_check(const ref_stack_t *pstack, ref *parray, uint count,
266
                      uint skip)
267
36.2k
{
268
36.2k
    uint space = r_space(parray);
269
270
36.2k
    if (space != avm_local) {
271
10.3k
        uint left = count, pass = skip;
272
10.3k
        ref_stack_enum_t rsenum;
273
274
10.3k
        ref_stack_enum_begin(&rsenum, pstack);
275
20.6k
        do {
276
20.6k
            ref *ptr = rsenum.ptr;
277
20.6k
            uint size = rsenum.size;
278
279
20.6k
            if (size <= pass)
280
0
                pass -= size;
281
20.6k
            else {
282
20.6k
                int code;
283
284
20.6k
                if (pass != 0)
285
10.3k
                    size -= pass, pass = 0;
286
20.6k
                ptr += size;
287
20.6k
                if (size > left)
288
10.3k
                    size = left;
289
20.6k
                left -= size;
290
20.6k
                code = refs_check_space(ptr - size, size, space);
291
20.6k
                if (code < 0)
292
0
                    return code;
293
20.6k
                if (left == 0)
294
10.3k
                    break;
295
20.6k
            }
296
20.6k
        } while (ref_stack_enum_next(&rsenum));
297
10.3k
    }
298
36.2k
    return 0;
299
36.2k
}
300
301
int
302
ref_stack_array_sanitize(i_ctx_t *i_ctx_p, ref *sarr, ref *darr, int depth)
303
0
{
304
0
    int i, code;
305
0
    ref obj, arr2;
306
0
    ref *pobj2;
307
0
    gs_memory_t *mem = (gs_memory_t *)idmemory->current;
308
309
0
    if (!r_is_array(sarr) || !r_has_type(darr, t_array))
310
0
        return_error(gs_error_typecheck);
311
312
0
    for (i = 0; i < r_size(sarr); i++) {
313
0
        code = array_get(mem, sarr, i, &obj);
314
0
        if (code < 0)
315
0
            make_null(&obj);
316
0
        switch(r_type(&obj)) {
317
0
          case t_operator:
318
0
          {
319
0
            int index = op_index(&obj);
320
321
0
            if (index > 0 && index < op_def_count) {
322
0
                const byte *data = (const byte *)(op_index_def(index)->oname + 1);
323
0
                if (dict_find_string(systemdict, (const char *)data, &pobj2) <= 0) {
324
0
                    byte *s = gs_alloc_bytes(mem, strlen((char *)data) + 5, "ref_stack_array_sanitize");
325
0
                    if (s) {
326
0
                        s[0] =  '\0';
327
0
                        strcpy((char *)s, "--");
328
0
                        strcpy((char *)s + 2, (char *)data);
329
0
                        strcpy((char *)s + strlen((char *)data) + 2, "--");
330
0
                    }
331
0
                    else {
332
0
                        s = (byte *)data;
333
0
                    }
334
0
                    code = name_ref(imemory, s, strlen((char *)s), &obj, 1);
335
0
                    if (code < 0) make_null(&obj);
336
0
                    if (s != data)
337
0
                        gs_free_object(mem, s, "ref_stack_array_sanitize");
338
0
                }
339
0
            }
340
0
            else {
341
0
                make_null(&obj);
342
0
            }
343
0
            ref_assign(darr->value.refs + i, &obj);
344
0
            break;
345
0
          }
346
0
          case t_array:
347
0
          case t_shortarray:
348
0
          case t_mixedarray:
349
0
          {
350
0
            int attrs = r_type_attrs(&obj) & (a_write | a_read | a_execute | a_executable);
351
            /* We only want to copy executable arrays */
352
0
            if (attrs & (a_execute | a_executable)) {
353
0
                if (++depth > 50) {
354
0
                    code = gs_error_limitcheck;
355
0
                }
356
0
                else {
357
0
                    code = ialloc_ref_array(&arr2, attrs, r_size(&obj), "ref_stack_array_sanitize");
358
0
                }
359
0
                if (code < 0) {
360
0
                    make_null(&arr2);
361
0
                }
362
0
                else {
363
0
                    code = ref_stack_array_sanitize(i_ctx_p, &obj, &arr2, depth);
364
0
                    if (code < 0) {
365
0
                        ifree_ref_array(&arr2, "ref_stack_array_sanitize");
366
0
                        return code;
367
0
                    }
368
0
                }
369
0
                ref_assign(darr->value.refs + i, &arr2);
370
0
            }
371
0
            else {
372
0
                ref_assign(darr->value.refs + i, &obj);
373
0
            }
374
0
            break;
375
0
          }
376
0
          default:
377
0
            ref_assign(darr->value.refs + i, &obj);
378
0
        }
379
0
    }
380
0
    return 0;
381
0
}
382
383
384
/*
385
 * Store the top 'count' elements of a stack, starting 'skip' elements below
386
 * the top, into an array, with or without store/undo checking.  age=-1 for
387
 * no check, 0 for old, 1 for new.  May return gs_error_rangecheck or
388
 * gs_error_invalidaccess.
389
 */
390
#undef idmemory     /****** NOTA BENE ******/
391
int
392
ref_stack_store(const ref_stack_t *pstack, ref *parray, uint count,
393
                uint skip, int age, bool check, gs_dual_memory_t *idmemory,
394
                client_name_t cname)
395
1.44M
{
396
1.44M
    uint left, pass;
397
1.44M
    ref *to;
398
1.44M
    ref_stack_enum_t rsenum;
399
400
1.44M
    if (count > ref_stack_count(pstack) || count > r_size(parray))
401
0
        return_error(gs_error_rangecheck);
402
1.44M
    if (check) {
403
23.1k
        int code = ref_stack_store_check(pstack, parray, count, skip);
404
405
23.1k
        if (code < 0)
406
0
            return code;
407
23.1k
    }
408
1.44M
    to = parray->value.refs + count;
409
1.44M
    left = count, pass = skip;
410
1.44M
    ref_stack_enum_begin(&rsenum, pstack);
411
1.45M
    do {
412
1.45M
        ref *from = rsenum.ptr;
413
1.45M
        uint size = rsenum.size;
414
415
1.45M
        if (size <= pass)
416
0
            pass -= size;
417
1.45M
        else {
418
1.45M
            if (pass != 0)
419
10.4k
                size -= pass, pass = 0;
420
1.45M
            from += size;
421
1.45M
            if (size > left)
422
1.42M
                size = left;
423
1.45M
            left -= size;
424
1.45M
            switch (age) {
425
0
            case -1:    /* not an array */
426
0
                while (size--) {
427
0
                    from--, to--;
428
0
                    ref_assign(to, from);
429
0
                }
430
0
                break;
431
36.1k
            case 0:   /* old array */
432
12.0M
                while (size--) {
433
12.0M
                    from--, to--;
434
12.0M
                    ref_assign_old(parray, to, from, cname);
435
12.0M
                }
436
36.1k
                break;
437
1.42M
            case 1:   /* new array */
438
11.6M
                while (size--) {
439
10.2M
                    from--, to--;
440
10.2M
                    ref_assign_new(to, from);
441
10.2M
                }
442
1.42M
                break;
443
1.45M
            }
444
1.45M
            if (left == 0)
445
1.44M
                break;
446
1.45M
        }
447
1.45M
    } while (ref_stack_enum_next(&rsenum));
448
1.44M
    r_set_size(parray, count);
449
1.44M
    return 0;
450
1.44M
}
451
452
/*
453
 * Pop the top N elements off a stack.
454
 * The number must not exceed the number of elements in use.
455
 */
456
void
457
ref_stack_pop(ref_stack_t *pstack, uint count)
458
104M
{
459
104M
    uint used;
460
461
104M
    while ((used = pstack->p + 1 - pstack->bot) <= count &&
462
104M
            pstack->extension_used > 0) {
463
294k
        count -= used;
464
294k
        pstack->p = pstack->bot - 1;
465
294k
        ref_stack_pop_block(pstack);
466
294k
    }
467
104M
    pstack->p -= count;
468
104M
}
469
470
/* Pop the top block off a stack.  May return underflow_error. */
471
int
472
ref_stack_pop_block(ref_stack_t *pstack)
473
295k
{
474
295k
    s_ptr bot = pstack->bot;
475
295k
    uint count = pstack->p + 1 - bot;
476
295k
    ref_stack_block *pcur =
477
295k
    (ref_stack_block *) pstack->current.value.refs;
478
295k
    ref_stack_block *pnext =
479
295k
    (ref_stack_block *) pcur->next.value.refs;
480
295k
    uint used;
481
295k
    ref *body;
482
295k
    ref next;
483
484
295k
    if (pnext == 0)
485
176
        return_error(pstack->params->underflow_error);
486
295k
    used = r_size(&pnext->used);
487
295k
    body = (ref *) (pnext + 1) + pstack->params->bot_guard;
488
295k
    next = pcur->next;
489
    /*
490
       * If the contents of the two blocks won't fit in a single block, we
491
       * move up the used part of the top block, and copy up as much of
492
       * the contents of the next block under it as will fit.  If the
493
       * contents of both blocks fit in a single block, we copy the used
494
       * part of the top block to the top of the next block, and free the
495
       * top block.
496
     */
497
295k
    if (used + count > pstack->body_size) {
498
        /*
499
         * The contents of the two blocks won't fit into a single block.
500
         * On the assumption that we're recovering from a local stack
501
         * underflow and need to increase the number of contiguous
502
         * elements available, move up the used part of the top block, and
503
         * copy up as much of the contents of the next block under it as
504
         * will fit.
505
         */
506
0
        uint moved = pstack->body_size - count;
507
0
        uint left;
508
509
0
        if (moved == 0)
510
0
            return_error(gs_error_Fatal);
511
0
        memmove(bot + moved, bot, count * sizeof(ref));
512
0
        left = used - moved;
513
0
        memcpy(bot, body + left, moved * sizeof(ref));
514
0
        refset_null_new(body + left, moved, 0);
515
0
        r_dec_size(&pnext->used, moved);
516
0
        pstack->p = pstack->top;
517
0
        pstack->extension_used -= moved;
518
295k
    } else {
519
        /*
520
           * The contents of the two blocks will fit into a single block.
521
           * Copy the used part of the top block to the top of the next
522
           * block, and free the top block.
523
         */
524
295k
        memcpy(body + used, bot, count * sizeof(ref));
525
295k
        pstack->bot = bot = body;
526
295k
        pstack->top = bot + pstack->body_size - 1;
527
295k
        gs_free_ref_array(pstack->memory, &pstack->current,
528
295k
                          "ref_stack_pop_block");
529
295k
        pstack->current = next;
530
295k
        pstack->p = bot + (used + count - 1);
531
295k
        pstack->extension_size -= pstack->body_size;
532
295k
        pstack->extension_used -= used;
533
295k
    }
534
295k
    return 0;
535
295k
}
536
537
/*
538
 * Extend a stack to recover from an overflow condition.
539
 * May return overflow_error or gs_error_VMerror.
540
 */
541
int
542
ref_stack_extend(ref_stack_t *pstack, uint request)
543
289k
{
544
289k
    uint keep = (pstack->top - pstack->bot + 1) / 3;
545
289k
    uint count = pstack->p - pstack->bot + 1;
546
289k
    const ref_stack_params_t *params = pstack->params;
547
548
289k
    if (request > params->data_size)
549
0
        return_error(params->overflow_error);
550
289k
    if (keep + request > pstack->body_size)
551
0
        keep = pstack->body_size - request;
552
289k
    if (keep > count)
553
0
        keep = count;   /* required by ref_stack_push_block */
554
289k
    return ref_stack_push_block(pstack, keep, request);
555
289k
}
556
557
/*
558
 * Push N empty slots onto a stack.  These slots are not initialized:
559
 * the caller must immediately fill them.  May return overflow_error
560
 * (if max_stack would be exceeded, or the stack has no allocator)
561
 * or gs_error_VMerror.
562
 */
563
int
564
ref_stack_push(ref_stack_t *pstack, uint count)
565
2.94k
{
566
    /* Don't bother to pre-check for overflow: we must be able to */
567
    /* back out in the case of a VMerror anyway, and */
568
    /* ref_stack_push_block will make the check itself. */
569
2.94k
    uint needed = count;
570
2.94k
    uint added;
571
572
8.40k
    for (; (added = pstack->top - pstack->p) < needed; needed -= added) {
573
5.46k
        int code;
574
575
5.46k
        pstack->p = pstack->top;
576
5.46k
        code = ref_stack_push_block(pstack,
577
5.46k
                                    (pstack->top - pstack->bot + 1) / 3,
578
5.46k
                                    added);
579
5.46k
        if (code < 0) {
580
            /* Back out. */
581
0
            ref_stack_pop(pstack, count - needed + added);
582
0
            pstack->requested = count;
583
0
            return code;
584
0
        }
585
5.46k
    }
586
2.94k
    pstack->p += needed;
587
2.94k
    return 0;
588
2.94k
}
589
590
/*
591
 * Push a block onto the stack, specifying how many elements of the current
592
 * top block should remain in the top block and also how many elements we
593
 * are trying to add.  Requires keep <= count.  May return overflow_error or
594
 * gs_error_VMerror.
595
 */
596
static int
597
ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add)
598
295k
{
599
295k
    const ref_stack_params_t *params = pstack->params;
600
295k
    uint count = pstack->p - pstack->bot + 1;
601
295k
    uint move = count - keep;
602
295k
    ref_stack_block *pcur = (ref_stack_block *) pstack->current.value.refs;
603
295k
    ref next;
604
295k
    ref_stack_block *pnext;
605
295k
    ref *body;
606
295k
    int code;
607
608
295k
    if (keep > count)
609
0
        return_error(gs_error_Fatal);
610
    /* Check for overflowing the maximum size, */
611
    /* or expansion not allowed.  */
612
    /* Or specifically allowing unlimited expansion */
613
295k
    if (pstack->max_stack.value.intval > 0) {
614
295k
        if (pstack->extension_used + (pstack->top - pstack->bot) + add >=
615
295k
            pstack->max_stack.value.intval ||
616
295k
            !params->allow_expansion
617
295k
            )
618
1
            return_error(params->overflow_error);
619
295k
    }
620
295k
    code = gs_alloc_ref_array(pstack->memory, &next, 0,
621
295k
                              params->block_size, "ref_stack_push_block");
622
295k
    if (code < 0)
623
0
        return code;
624
295k
    pnext = (ref_stack_block *) next.value.refs;
625
295k
    body = (ref *) (pnext + 1);
626
    /* Copy the top keep elements into the new block, */
627
    /* and make the new block the top block. */
628
295k
    init_block(pstack, &next, keep);
629
295k
    body += params->bot_guard;
630
295k
    memcpy(body, pstack->bot + move, keep * sizeof(ref));
631
    /* Clear the elements above the top of the new block. */
632
295k
    refset_null_new(body + keep, params->data_size - keep, 0);
633
    /* Clear the elements above the top of the old block. */
634
295k
    refset_null_new(pstack->bot + move, keep, 0);
635
295k
    pnext->next = pstack->current;
636
295k
    pcur->used.value.refs = pstack->bot;
637
295k
    r_set_size(&pcur->used, move);
638
295k
    pstack->current = next;
639
295k
    pstack->bot = body;
640
295k
    pstack->top = pstack->bot + pstack->body_size - 1;
641
295k
    pstack->p = pstack->bot + keep - 1;
642
295k
    pstack->extension_size += pstack->body_size;
643
295k
    pstack->extension_used += move;
644
295k
    return 0;
645
295k
}
646
647
/* Begin enumerating the blocks of a stack. */
648
void
649
ref_stack_enum_begin(ref_stack_enum_t *prse, const ref_stack_t *pstack)
650
88.0M
{
651
88.0M
    prse->block = (ref_stack_block *)pstack->current.value.refs;
652
88.0M
    prse->ptr = pstack->bot;
653
88.0M
    prse->size = pstack->p + 1 - pstack->bot;
654
88.0M
}
655
656
bool
657
ref_stack_enum_next(ref_stack_enum_t *prse)
658
851k
{
659
851k
    ref_stack_block *block =
660
851k
        prse->block = (ref_stack_block *)prse->block->next.value.refs;
661
662
851k
    if (block == 0)
663
541k
        return false;
664
309k
    prse->ptr = block->used.value.refs;
665
309k
    prse->size = r_size(&block->used);
666
309k
    return true;
667
851k
}
668
669
/* Clean up a stack for garbage collection. */
670
void
671
ref_stack_cleanup(ref_stack_t *pstack)
672
59.7k
{
673
59.7k
    ref_stack_block *pblock =
674
59.7k
        (ref_stack_block *) pstack->current.value.refs;
675
676
59.7k
    refset_null_new(pstack->p + 1, pstack->top - pstack->p, 0);
677
59.7k
    pblock->used = pstack->current; /* set attrs */
678
59.7k
    pblock->used.value.refs = pstack->bot;
679
59.7k
    r_set_size(&pblock->used, pstack->p + 1 - pstack->bot);
680
59.7k
}
681
682
/*
683
 * Free the entire contents of a stack, including the bottom block.
684
 * The client must still call ref_stack_free.  Note that after calling
685
 * ref_stack_release, the stack is no longer usable.
686
 */
687
void
688
ref_stack_release(ref_stack_t *pstack)
689
0
{
690
0
    gs_ref_memory_t *mem = pstack->memory;
691
692
0
    ref_stack_clear(pstack);
693
    /* Free the parameter structure. */
694
0
    gs_free_object((gs_memory_t *)mem, pstack->params,
695
0
                   "ref_stack_release(stack.params)");
696
    /* Free the original (bottom) block. */
697
0
    gs_free_ref_array(mem, &pstack->current, "ref_stack_release");
698
0
}
699
700
/*
701
 * Release a stack and then free the ref_stack object.
702
 */
703
void
704
ref_stack_free(ref_stack_t *pstack)
705
0
{
706
0
    gs_memory_t *mem = (gs_memory_t *)pstack->memory;
707
708
0
    ref_stack_release(pstack);
709
0
    gs_free_object(mem, pstack, "ref_stack_free");
710
0
}
711
712
/* ------ Internal routines ------ */
713
714
/* Initialize the guards and body of a stack block. */
715
static void
716
init_block(ref_stack_t *pstack, const ref *psb, uint used)
717
324k
{
718
324k
    ref_stack_params_t *params = pstack->params;
719
324k
    ref *brefs = psb->value.refs;
720
324k
    uint i;
721
324k
    ref *p;
722
723
324k
    for (i = params->bot_guard, p = brefs + stack_block_refs;
724
3.38M
         i != 0; i--, p++
725
324k
        )
726
3.06M
        ref_assign(p, &params->guard_value);
727
    /* The top guard elements will never be read, */
728
    /* but we need to initialize them for the sake of the GC. */
729
    /* We can use refset_null for this, because even though it uses */
730
    /* make_null_new and stack elements must not be marked new, */
731
    /* these slots will never actually be read or written. */
732
324k
    if (params->top_guard) {
733
314k
        ref *top = brefs + r_size(psb);
734
314k
        int top_guard = params->top_guard;
735
736
314k
        refset_null_new(top - top_guard, top_guard, 0);
737
314k
    } {
738
324k
        ref_stack_block *const pblock = (ref_stack_block *) brefs;
739
740
324k
        pblock->used = *psb;
741
324k
        pblock->used.value.refs = brefs + stack_block_refs + params->bot_guard;
742
324k
        r_set_size(&pblock->used, 0);
743
324k
    }
744
324k
}