Coverage Report

Created: 2025-06-10 07:17

/src/ghostpdl/psi/istack.c
Line
Count
Source (jump to first uncovered line)
1
/* Copyright (C) 2001-2023 Artifex Software, Inc.
2
   All Rights Reserved.
3
4
   This software is provided AS-IS with no warranty, either express or
5
   implied.
6
7
   This software is distributed under license and may not be copied,
8
   modified or distributed except as expressly authorized under the terms
9
   of the license contained in the file LICENSE in this distribution.
10
11
   Refer to licensing information at http://www.artifex.com or contact
12
   Artifex Software, Inc.,  39 Mesa Street, Suite 108A, San Francisco,
13
   CA 94129, USA, for further information.
14
*/
15
16
17
/* Manager for expandable stacks of refs */
18
#include "memory_.h"
19
#include "ghost.h"
20
#include "gsstruct.h"
21
#include "gsutil.h"
22
#include "ierrors.h"
23
#include "ialloc.h"
24
#include "istack.h"
25
#include "istkparm.h"
26
#include "istruct.h"    /* for RELOC_REF_VAR */
27
#include "iutil.h"
28
#include "ivmspace.h"   /* for local/global test */
29
#include "store.h"
30
#include "icstate.h"
31
#include "iname.h"
32
#include "dstack.h"
33
#include "idict.h"
34
35
/* Forward references */
36
static void init_block(ref_stack_t *pstack, const ref *pblock_array,
37
                        uint used);
38
static int ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add);
39
40
/* GC descriptors and procedures */
41
private_st_ref_stack_params();
42
static
43
CLEAR_MARKS_PROC(ref_stack_clear_marks)
44
0
{
45
0
    ref_stack_t *const sptr = vptr;
46
47
0
    r_clear_attrs(&sptr->current, l_mark);
48
0
}
49
static
50
283k
ENUM_PTRS_WITH(ref_stack_enum_ptrs, ref_stack_t *sptr) return 0;
51
121k
case 0: ENUM_RETURN_REF(&sptr->current);
52
121k
case 1: return ENUM_OBJ(sptr->params);
53
283k
ENUM_PTRS_END
54
60.6k
static RELOC_PTRS_WITH(ref_stack_reloc_ptrs, ref_stack_t *sptr)
55
60.6k
{
56
    /* Note that the relocation must be a multiple of sizeof(ref_packed) */
57
    /* * align_packed_per_ref, but it need not be a multiple of */
58
    /* sizeof(ref).  Therefore, we must do the adjustments using */
59
    /* ref_packed pointers rather than ref pointers. */
60
60.6k
    ref_packed *bot = (ref_packed *) sptr->current.value.refs;
61
60.6k
    long reloc;
62
63
60.6k
    RELOC_REF_VAR(sptr->current);
64
60.6k
    r_clear_attrs(&sptr->current, l_mark);
65
60.6k
    reloc = bot - (ref_packed *) sptr->current.value.refs;
66
60.6k
#define RELOC_P(p)\
67
182k
  sptr->p = (ref *)((ref_packed *)sptr->p - reloc);
68
60.6k
    RELOC_P(p);
69
60.6k
    RELOC_P(bot);
70
60.6k
    RELOC_P(top);
71
60.6k
#undef RELOC_P
72
60.6k
    RELOC_OBJ_VAR(sptr->params);
73
60.6k
} RELOC_PTRS_END
74
/* Structure type for a ref_stack. */
75
public_st_ref_stack();
76
77
/* Initialize a stack. */
78
int
79
ref_stack_init(ref_stack_t *pstack, const ref *pblock_array,
80
               uint bot_guard, uint top_guard, const ref *pguard_value,
81
               gs_ref_memory_t *mem, ref_stack_params_t *params)
82
28.6k
{
83
28.6k
    uint size = r_size(pblock_array);
84
28.6k
    uint avail = size - (stack_block_refs + bot_guard + top_guard);
85
28.6k
    ref_stack_block *pblock = (ref_stack_block *)pblock_array->value.refs;
86
28.6k
    s_ptr body = (s_ptr)(pblock + 1);
87
88
28.6k
    if (params == 0) {
89
28.6k
        params = gs_alloc_struct((gs_memory_t *)mem, ref_stack_params_t,
90
28.6k
                                 &st_ref_stack_params,
91
28.6k
                                 "ref_stack_alloc(stack.params)");
92
28.6k
        if (params == 0)
93
0
            return_error(-1); /* avoid binding in any error codes */
94
28.6k
    }
95
96
28.6k
    pstack->bot = body + bot_guard;
97
28.6k
    pstack->p = pstack->bot - 1;
98
28.6k
    pstack->top = pstack->p + avail;
99
28.6k
    pstack->current = *pblock_array;
100
28.6k
    pstack->extension_size = 0;
101
28.6k
    pstack->extension_used = 0;
102
103
28.6k
    make_int(&pstack->max_stack, avail);
104
28.6k
    pstack->requested = 0;
105
28.6k
    pstack->margin = 0;
106
28.6k
    pstack->body_size = avail;
107
108
28.6k
    pstack->params = params;
109
28.6k
    pstack->memory = mem;
110
111
28.6k
    params->bot_guard = bot_guard;
112
28.6k
    params->top_guard = top_guard;
113
28.6k
    params->block_size = size;
114
28.6k
    params->data_size = avail;
115
28.6k
    if (pguard_value != 0)
116
9.56k
        params->guard_value = *pguard_value;
117
19.1k
    else
118
19.1k
        make_tav(&params->guard_value, t__invalid, 0, intval, 0);
119
28.6k
    params->underflow_error = -1;
120
28.6k
    params->overflow_error = -1;
121
28.6k
    params->allow_expansion = true;
122
28.6k
    init_block(pstack, pblock_array, 0);
123
28.6k
    refset_null_new(pstack->bot, avail, 0);
124
28.6k
    make_empty_array(&pblock->next, 0);
125
28.6k
    return 0;
126
28.6k
}
127
128
/* Set whether a stack is allowed to expand.  The initial value is true. */
129
void
130
ref_stack_allow_expansion(ref_stack_t *pstack, bool expand)
131
9.56k
{
132
9.56k
    pstack->params->allow_expansion = expand;
133
9.56k
}
134
135
/* Set the error codes for under- and overflow.  The initial values are -1. */
136
void
137
ref_stack_set_error_codes(ref_stack_t *pstack, int underflow_error,
138
                          int overflow_error)
139
28.6k
{
140
28.6k
    pstack->params->underflow_error = underflow_error;
141
28.6k
    pstack->params->overflow_error = overflow_error;
142
28.6k
}
143
144
/* Set the maximum number of elements allowed on a stack. */
145
int
146
ref_stack_set_max_count(ref_stack_t *pstack, long nmax)
147
145k
{
148
145k
    long nmin;
149
150
    /* Bypass checking if we're setting the amximum to -1 'no limits' */
151
145k
    if (nmax == -1) {
152
0
        pstack->max_stack.value.intval = nmax;
153
0
        return 0;
154
0
    }
155
156
    /* check how many items we already have on the stack, don't allow
157
     * a maximum less than this.
158
     */
159
145k
    nmin = ref_stack_count_inline(pstack);
160
161
145k
    if (nmax < nmin)
162
0
        nmax = nmin;
163
145k
    if (nmax > max_uint / sizeof(ref))
164
0
        nmax = max_uint / sizeof(ref);
165
145k
    if (!pstack->params->allow_expansion) {
166
48.6k
        uint ncur = pstack->body_size;
167
168
48.6k
        if (nmax > ncur)
169
0
            nmax = ncur;
170
48.6k
    }
171
145k
    pstack->max_stack.value.intval = nmax;
172
145k
    return 0;
173
145k
}
174
175
/*
176
 * Set the margin between the limit and the top of the stack.
177
 * Note that this may require allocating a block.
178
 */
179
int
180
ref_stack_set_margin(ref_stack_t *pstack, uint margin)
181
0
{
182
0
    const ref_stack_params_t *params = pstack->params;
183
0
    uint data_size = params->data_size;
184
185
0
    if (margin <= pstack->margin) {
186
0
        refset_null_new(pstack->top + 1, pstack->margin - margin, 0);
187
0
    } else {
188
0
        if (margin > data_size >> 1)
189
0
            return_error(gs_error_rangecheck);
190
0
        if (pstack->top - pstack->p < margin) {
191
0
            uint used = pstack->p + 1 - pstack->bot;
192
0
            uint keep = data_size - margin;
193
0
            int code = ref_stack_push_block(pstack, keep, used - keep);
194
195
0
            if (code < 0)
196
0
                return code;
197
0
        }
198
0
    }
199
0
    pstack->margin = margin;
200
0
    pstack->body_size = data_size - margin;
201
0
    pstack->top = pstack->bot + pstack->body_size - 1;
202
0
    return 0;
203
0
}
204
205
/* Return the number of elements on a stack. */
206
uint
207
ref_stack_count(const ref_stack_t *pstack)
208
14.8M
{
209
14.8M
    return ref_stack_count_inline(pstack);
210
14.8M
}
211
212
/*
213
 * Return a pointer to a given element from the stack, counting from
214
 * 0 as the top element.  If the index is out of range, return 0.
215
 */
216
ref *
217
ref_stack_index(const ref_stack_t *pstack, long idx)
218
1.32G
{
219
1.32G
    ref_stack_block *pblock;
220
1.32G
    uint used = pstack->p + 1 - pstack->bot;
221
222
1.32G
    if (idx < 0)
223
0
        return NULL;
224
1.32G
    if (idx < used)    /* common case */
225
1.18G
        return pstack->p - (uint) idx;
226
146M
    pblock = (ref_stack_block *) pstack->current.value.refs;
227
1.31G
    do {
228
1.31G
        pblock = (ref_stack_block *) pblock->next.value.refs;
229
1.31G
        if (pblock == 0)
230
153k
            return NULL;
231
1.31G
        idx -= used;
232
1.31G
        used = r_size(&pblock->used);
233
1.31G
    } while (idx >= used);
234
146M
    return pblock->used.value.refs + (used - 1 - (uint) idx);
235
146M
}
236
237
/*
238
 * Count the number of elements down to and including the first mark.
239
 * If no mark is found, return 0.
240
 */
241
uint
242
ref_stack_counttomark(const ref_stack_t *pstack)
243
68.9M
{
244
68.9M
    uint scanned = 0;
245
68.9M
    ref_stack_enum_t rsenum;
246
247
68.9M
    ref_stack_enum_begin(&rsenum, pstack);
248
69.2M
    do {
249
69.2M
        uint count = rsenum.size;
250
69.2M
        const ref *p = rsenum.ptr + count - 1;
251
252
496M
        for (; count; count--, p--)
253
496M
            if (r_has_type(p, t_mark))
254
68.9M
                return scanned + (rsenum.size - count + 1);
255
278k
        scanned += rsenum.size;
256
278k
    } while (ref_stack_enum_next(&rsenum));
257
17
    return 0;
258
68.9M
}
259
260
/*
261
 * Do the store check for storing 'count' elements of a stack, starting
262
 * 'skip' elements below the top, into an array.  Return 0 or gs_error_invalidaccess.
263
 */
264
int
265
ref_stack_store_check(const ref_stack_t *pstack, ref *parray, uint count,
266
                      uint skip)
267
50.4k
{
268
50.4k
    uint space = r_space(parray);
269
270
50.4k
    if (space != avm_local) {
271
10.4k
        uint left = count, pass = skip;
272
10.4k
        ref_stack_enum_t rsenum;
273
274
10.4k
        ref_stack_enum_begin(&rsenum, pstack);
275
20.8k
        do {
276
20.8k
            ref *ptr = rsenum.ptr;
277
20.8k
            uint size = rsenum.size;
278
279
20.8k
            if (size <= pass)
280
0
                pass -= size;
281
20.8k
            else {
282
20.8k
                int code;
283
284
20.8k
                if (pass != 0)
285
10.4k
                    size -= pass, pass = 0;
286
20.8k
                ptr += size;
287
20.8k
                if (size > left)
288
10.4k
                    size = left;
289
20.8k
                left -= size;
290
20.8k
                code = refs_check_space(ptr - size, size, space);
291
20.8k
                if (code < 0)
292
0
                    return code;
293
20.8k
                if (left == 0)
294
10.4k
                    break;
295
20.8k
            }
296
20.8k
        } while (ref_stack_enum_next(&rsenum));
297
10.4k
    }
298
50.4k
    return 0;
299
50.4k
}
300
301
int
302
ref_stack_array_sanitize(i_ctx_t *i_ctx_p, ref *sarr, ref *darr, int depth)
303
0
{
304
0
    int i, code;
305
0
    ref obj, arr2;
306
0
    ref *pobj2;
307
0
    gs_memory_t *mem = (gs_memory_t *)idmemory->current;
308
309
0
    if (!r_is_array(sarr) || !r_has_type(darr, t_array))
310
0
        return_error(gs_error_typecheck);
311
312
0
    for (i = 0; i < r_size(sarr); i++) {
313
0
        code = array_get(mem, sarr, i, &obj);
314
0
        if (code < 0)
315
0
            make_null(&obj);
316
0
        switch(r_type(&obj)) {
317
0
          case t_operator:
318
0
          {
319
0
            int index = op_index(&obj);
320
321
0
            if (index > 0 && index < op_def_count) {
322
0
                const byte *data = (const byte *)(op_index_def(index)->oname + 1);
323
0
                if (dict_find_string(systemdict, (const char *)data, &pobj2) <= 0) {
324
0
                    byte *s = gs_alloc_bytes(mem, strlen((char *)data) + 5, "ref_stack_array_sanitize");
325
0
                    if (s) {
326
0
                        s[0] =  '\0';
327
0
                        strcpy((char *)s, "--");
328
0
                        strcpy((char *)s + 2, (char *)data);
329
0
                        strcpy((char *)s + strlen((char *)data) + 2, "--");
330
0
                    }
331
0
                    else {
332
0
                        s = (byte *)data;
333
0
                    }
334
0
                    code = name_ref(imemory, s, strlen((char *)s), &obj, 1);
335
0
                    if (code < 0) make_null(&obj);
336
0
                    if (s != data)
337
0
                        gs_free_object(mem, s, "ref_stack_array_sanitize");
338
0
                }
339
0
            }
340
0
            else {
341
0
                make_null(&obj);
342
0
            }
343
0
            ref_assign(darr->value.refs + i, &obj);
344
0
            break;
345
0
          }
346
0
          case t_array:
347
0
          case t_shortarray:
348
0
          case t_mixedarray:
349
0
          {
350
0
            int attrs = r_type_attrs(&obj) & (a_write | a_read | a_execute | a_executable);
351
            /* We only want to copy executable arrays */
352
0
            if (attrs & (a_execute | a_executable)) {
353
0
                if (++depth > 50) {
354
0
                    code = gs_error_limitcheck;
355
0
                }
356
0
                else {
357
0
                    code = ialloc_ref_array(&arr2, attrs, r_size(&obj), "ref_stack_array_sanitize");
358
0
                }
359
0
                if (code < 0) {
360
0
                    make_null(&arr2);
361
0
                }
362
0
                else {
363
0
                    code = ref_stack_array_sanitize(i_ctx_p, &obj, &arr2, depth);
364
0
                    if (code < 0) {
365
0
                        ifree_ref_array(&arr2, "ref_stack_array_sanitize");
366
0
                        return code;
367
0
                    }
368
0
                }
369
0
                ref_assign(darr->value.refs + i, &arr2);
370
0
            }
371
0
            else {
372
0
                ref_assign(darr->value.refs + i, &obj);
373
0
            }
374
0
            break;
375
0
          }
376
0
          default:
377
0
            ref_assign(darr->value.refs + i, &obj);
378
0
        }
379
0
    }
380
0
    return 0;
381
0
}
382
383
384
/*
385
 * Store the top 'count' elements of a stack, starting 'skip' elements below
386
 * the top, into an array, with or without store/undo checking.  age=-1 for
387
 * no check, 0 for old, 1 for new.  May return gs_error_rangecheck or
388
 * gs_error_invalidaccess.
389
 */
390
#undef idmemory     /****** NOTA BENE ******/
391
int
392
ref_stack_store(const ref_stack_t *pstack, ref *parray, uint count,
393
                uint skip, int age, bool check, gs_dual_memory_t *idmemory,
394
                client_name_t cname)
395
1.30M
{
396
1.30M
    uint left, pass;
397
1.30M
    ref *to;
398
1.30M
    ref_stack_enum_t rsenum;
399
400
1.30M
    if (count > ref_stack_count(pstack) || count > r_size(parray))
401
0
        return_error(gs_error_rangecheck);
402
1.30M
    if (check) {
403
30.0k
        int code = ref_stack_store_check(pstack, parray, count, skip);
404
405
30.0k
        if (code < 0)
406
0
            return code;
407
30.0k
    }
408
1.30M
    to = parray->value.refs + count;
409
1.30M
    left = count, pass = skip;
410
1.30M
    ref_stack_enum_begin(&rsenum, pstack);
411
1.32M
    do {
412
1.32M
        ref *from = rsenum.ptr;
413
1.32M
        uint size = rsenum.size;
414
415
1.32M
        if (size <= pass)
416
0
            pass -= size;
417
1.32M
        else {
418
1.32M
            if (pass != 0)
419
10.5k
                size -= pass, pass = 0;
420
1.32M
            from += size;
421
1.32M
            if (size > left)
422
1.28M
                size = left;
423
1.32M
            left -= size;
424
1.32M
            switch (age) {
425
0
            case -1:    /* not an array */
426
0
                while (size--) {
427
0
                    from--, to--;
428
0
                    ref_assign(to, from);
429
0
                }
430
0
                break;
431
45.3k
            case 0:   /* old array */
432
13.3M
                while (size--) {
433
13.2M
                    from--, to--;
434
13.2M
                    ref_assign_old(parray, to, from, cname);
435
13.2M
                }
436
45.3k
                break;
437
1.27M
            case 1:   /* new array */
438
13.5M
                while (size--) {
439
12.2M
                    from--, to--;
440
12.2M
                    ref_assign_new(to, from);
441
12.2M
                }
442
1.27M
                break;
443
1.32M
            }
444
1.32M
            if (left == 0)
445
1.30M
                break;
446
1.32M
        }
447
1.32M
    } while (ref_stack_enum_next(&rsenum));
448
1.30M
    r_set_size(parray, count);
449
1.30M
    return 0;
450
1.30M
}
451
452
/*
453
 * Pop the top N elements off a stack.
454
 * The number must not exceed the number of elements in use.
455
 */
456
void
457
ref_stack_pop(ref_stack_t *pstack, uint count)
458
101M
{
459
101M
    uint used;
460
461
102M
    while ((used = pstack->p + 1 - pstack->bot) <= count &&
462
102M
            pstack->extension_used > 0) {
463
314k
        count -= used;
464
314k
        pstack->p = pstack->bot - 1;
465
314k
        ref_stack_pop_block(pstack);
466
314k
    }
467
101M
    pstack->p -= count;
468
101M
}
469
470
/* Pop the top block off a stack.  May return underflow_error. */
471
int
472
ref_stack_pop_block(ref_stack_t *pstack)
473
317k
{
474
317k
    s_ptr bot = pstack->bot;
475
317k
    uint count = pstack->p + 1 - bot;
476
317k
    ref_stack_block *pcur =
477
317k
    (ref_stack_block *) pstack->current.value.refs;
478
317k
    ref_stack_block *pnext =
479
317k
    (ref_stack_block *) pcur->next.value.refs;
480
317k
    uint used;
481
317k
    ref *body;
482
317k
    ref next;
483
484
317k
    if (pnext == 0)
485
197
        return_error(pstack->params->underflow_error);
486
317k
    used = r_size(&pnext->used);
487
317k
    body = (ref *) (pnext + 1) + pstack->params->bot_guard;
488
317k
    next = pcur->next;
489
    /*
490
       * If the contents of the two blocks won't fit in a single block, we
491
       * move up the used part of the top block, and copy up as much of
492
       * the contents of the next block under it as will fit.  If the
493
       * contents of both blocks fit in a single block, we copy the used
494
       * part of the top block to the top of the next block, and free the
495
       * top block.
496
     */
497
317k
    if (used + count > pstack->body_size) {
498
        /*
499
         * The contents of the two blocks won't fit into a single block.
500
         * On the assumption that we're recovering from a local stack
501
         * underflow and need to increase the number of contiguous
502
         * elements available, move up the used part of the top block, and
503
         * copy up as much of the contents of the next block under it as
504
         * will fit.
505
         */
506
490
        uint moved = pstack->body_size - count;
507
490
        uint left;
508
509
490
        if (moved == 0)
510
0
            return_error(gs_error_Fatal);
511
490
        memmove(bot + moved, bot, count * sizeof(ref));
512
490
        left = used - moved;
513
490
        memcpy(bot, body + left, moved * sizeof(ref));
514
490
        refset_null_new(body + left, moved, 0);
515
490
        r_dec_size(&pnext->used, moved);
516
490
        pstack->p = pstack->top;
517
490
        pstack->extension_used -= moved;
518
316k
    } else {
519
        /*
520
           * The contents of the two blocks will fit into a single block.
521
           * Copy the used part of the top block to the top of the next
522
           * block, and free the top block.
523
         */
524
316k
        memcpy(body + used, bot, count * sizeof(ref));
525
316k
        pstack->bot = bot = body;
526
316k
        pstack->top = bot + pstack->body_size - 1;
527
316k
        gs_free_ref_array(pstack->memory, &pstack->current,
528
316k
                          "ref_stack_pop_block");
529
316k
        pstack->current = next;
530
316k
        pstack->p = bot + (used + count - 1);
531
316k
        pstack->extension_size -= pstack->body_size;
532
316k
        pstack->extension_used -= used;
533
316k
    }
534
317k
    return 0;
535
317k
}
536
537
/*
538
 * Extend a stack to recover from an overflow condition.
539
 * May return overflow_error or gs_error_VMerror.
540
 */
541
int
542
ref_stack_extend(ref_stack_t *pstack, uint request)
543
309k
{
544
309k
    uint keep = (pstack->top - pstack->bot + 1) / 3;
545
309k
    uint count = pstack->p - pstack->bot + 1;
546
309k
    const ref_stack_params_t *params = pstack->params;
547
548
309k
    if (request > params->data_size)
549
0
        return_error(params->overflow_error);
550
309k
    if (keep + request > pstack->body_size)
551
1
        keep = pstack->body_size - request;
552
309k
    if (keep > count)
553
0
        keep = count;   /* required by ref_stack_push_block */
554
309k
    return ref_stack_push_block(pstack, keep, request);
555
309k
}
556
557
/*
558
 * Push N empty slots onto a stack.  These slots are not initialized:
559
 * the caller must immediately fill them.  May return overflow_error
560
 * (if max_stack would be exceeded, or the stack has no allocator)
561
 * or gs_error_VMerror.
562
 */
563
int
564
ref_stack_push(ref_stack_t *pstack, uint count)
565
2.94k
{
566
    /* Don't bother to pre-check for overflow: we must be able to */
567
    /* back out in the case of a VMerror anyway, and */
568
    /* ref_stack_push_block will make the check itself. */
569
2.94k
    uint needed = count;
570
2.94k
    uint added;
571
572
10.6k
    for (; (added = pstack->top - pstack->p) < needed; needed -= added) {
573
7.66k
        int code;
574
575
7.66k
        pstack->p = pstack->top;
576
7.66k
        code = ref_stack_push_block(pstack,
577
7.66k
                                    (pstack->top - pstack->bot + 1) / 3,
578
7.66k
                                    added);
579
7.66k
        if (code < 0) {
580
            /* Back out. */
581
1
            ref_stack_pop(pstack, count - needed + added);
582
1
            pstack->requested = count;
583
1
            return code;
584
1
        }
585
7.66k
    }
586
2.94k
    pstack->p += needed;
587
2.94k
    return 0;
588
2.94k
}
589
590
/*
591
 * Push a block onto the stack, specifying how many elements of the current
592
 * top block should remain in the top block and also how many elements we
593
 * are trying to add.  Requires keep <= count.  May return overflow_error or
594
 * gs_error_VMerror.
595
 */
596
static int
597
ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add)
598
316k
{
599
316k
    const ref_stack_params_t *params = pstack->params;
600
316k
    uint count = pstack->p - pstack->bot + 1;
601
316k
    uint move = count - keep;
602
316k
    ref_stack_block *pcur = (ref_stack_block *) pstack->current.value.refs;
603
316k
    ref next;
604
316k
    ref_stack_block *pnext;
605
316k
    ref *body;
606
316k
    int code;
607
608
316k
    if (keep > count)
609
0
        return_error(gs_error_Fatal);
610
    /* Check for overflowing the maximum size, */
611
    /* or expansion not allowed.  */
612
    /* Or specifically allowing unlimited expansion */
613
316k
    if (pstack->max_stack.value.intval > 0) {
614
316k
        if (pstack->extension_used + (pstack->top - pstack->bot) + add >=
615
316k
            pstack->max_stack.value.intval ||
616
316k
            !params->allow_expansion
617
316k
            )
618
42
            return_error(params->overflow_error);
619
316k
    }
620
316k
    code = gs_alloc_ref_array(pstack->memory, &next, 0,
621
316k
                              params->block_size, "ref_stack_push_block");
622
316k
    if (code < 0)
623
0
        return code;
624
316k
    pnext = (ref_stack_block *) next.value.refs;
625
316k
    body = (ref *) (pnext + 1);
626
    /* Copy the top keep elements into the new block, */
627
    /* and make the new block the top block. */
628
316k
    init_block(pstack, &next, keep);
629
316k
    body += params->bot_guard;
630
316k
    memcpy(body, pstack->bot + move, keep * sizeof(ref));
631
    /* Clear the elements above the top of the new block. */
632
316k
    refset_null_new(body + keep, params->data_size - keep, 0);
633
    /* Clear the elements above the top of the old block. */
634
316k
    refset_null_new(pstack->bot + move, keep, 0);
635
316k
    pnext->next = pstack->current;
636
316k
    pcur->used.value.refs = pstack->bot;
637
316k
    r_set_size(&pcur->used, move);
638
316k
    pstack->current = next;
639
316k
    pstack->bot = body;
640
316k
    pstack->top = pstack->bot + pstack->body_size - 1;
641
316k
    pstack->p = pstack->bot + keep - 1;
642
316k
    pstack->extension_size += pstack->body_size;
643
316k
    pstack->extension_used += move;
644
316k
    return 0;
645
316k
}
646
647
/* Begin enumerating the blocks of a stack. */
648
void
649
ref_stack_enum_begin(ref_stack_enum_t *prse, const ref_stack_t *pstack)
650
85.9M
{
651
85.9M
    prse->block = (ref_stack_block *)pstack->current.value.refs;
652
85.9M
    prse->ptr = pstack->bot;
653
85.9M
    prse->size = pstack->p + 1 - pstack->bot;
654
85.9M
}
655
656
bool
657
ref_stack_enum_next(ref_stack_enum_t *prse)
658
965k
{
659
965k
    ref_stack_block *block =
660
965k
        prse->block = (ref_stack_block *)prse->block->next.value.refs;
661
662
965k
    if (block == 0)
663
655k
        return false;
664
309k
    prse->ptr = block->used.value.refs;
665
309k
    prse->size = r_size(&block->used);
666
309k
    return true;
667
965k
}
668
669
/* Clean up a stack for garbage collection. */
670
void
671
ref_stack_cleanup(ref_stack_t *pstack)
672
60.6k
{
673
60.6k
    ref_stack_block *pblock =
674
60.6k
        (ref_stack_block *) pstack->current.value.refs;
675
676
60.6k
    refset_null_new(pstack->p + 1, pstack->top - pstack->p, 0);
677
60.6k
    pblock->used = pstack->current; /* set attrs */
678
60.6k
    pblock->used.value.refs = pstack->bot;
679
60.6k
    r_set_size(&pblock->used, pstack->p + 1 - pstack->bot);
680
60.6k
}
681
682
/*
683
 * Free the entire contents of a stack, including the bottom block.
684
 * The client must still call ref_stack_free.  Note that after calling
685
 * ref_stack_release, the stack is no longer usable.
686
 */
687
void
688
ref_stack_release(ref_stack_t *pstack)
689
0
{
690
0
    gs_ref_memory_t *mem = pstack->memory;
691
692
0
    ref_stack_clear(pstack);
693
    /* Free the parameter structure. */
694
0
    gs_free_object((gs_memory_t *)mem, pstack->params,
695
0
                   "ref_stack_release(stack.params)");
696
    /* Free the original (bottom) block. */
697
0
    gs_free_ref_array(mem, &pstack->current, "ref_stack_release");
698
0
}
699
700
/*
701
 * Release a stack and then free the ref_stack object.
702
 */
703
void
704
ref_stack_free(ref_stack_t *pstack)
705
0
{
706
0
    gs_memory_t *mem = (gs_memory_t *)pstack->memory;
707
708
0
    ref_stack_release(pstack);
709
0
    gs_free_object(mem, pstack, "ref_stack_free");
710
0
}
711
712
/* ------ Internal routines ------ */
713
714
/* Initialize the guards and body of a stack block. */
715
static void
716
init_block(ref_stack_t *pstack, const ref *psb, uint used)
717
345k
{
718
345k
    ref_stack_params_t *params = pstack->params;
719
345k
    ref *brefs = psb->value.refs;
720
345k
    uint i;
721
345k
    ref *p;
722
723
345k
    for (i = params->bot_guard, p = brefs + stack_block_refs;
724
3.61M
         i != 0; i--, p++
725
345k
        )
726
3.27M
        ref_assign(p, &params->guard_value);
727
    /* The top guard elements will never be read, */
728
    /* but we need to initialize them for the sake of the GC. */
729
    /* We can use refset_null for this, because even though it uses */
730
    /* make_null_new and stack elements must not be marked new, */
731
    /* these slots will never actually be read or written. */
732
345k
    if (params->top_guard) {
733
335k
        ref *top = brefs + r_size(psb);
734
335k
        int top_guard = params->top_guard;
735
736
335k
        refset_null_new(top - top_guard, top_guard, 0);
737
335k
    } {
738
345k
        ref_stack_block *const pblock = (ref_stack_block *) brefs;
739
740
345k
        pblock->used = *psb;
741
345k
        pblock->used.value.refs = brefs + stack_block_refs + params->bot_guard;
742
345k
        r_set_size(&pblock->used, 0);
743
345k
    }
744
345k
}