/src/ghostpdl/psi/istack.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2023 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Manager for expandable stacks of refs */ |
18 | | #include "memory_.h" |
19 | | #include "ghost.h" |
20 | | #include "gsstruct.h" |
21 | | #include "gsutil.h" |
22 | | #include "ierrors.h" |
23 | | #include "ialloc.h" |
24 | | #include "istack.h" |
25 | | #include "istkparm.h" |
26 | | #include "istruct.h" /* for RELOC_REF_VAR */ |
27 | | #include "iutil.h" |
28 | | #include "ivmspace.h" /* for local/global test */ |
29 | | #include "store.h" |
30 | | #include "icstate.h" |
31 | | #include "iname.h" |
32 | | #include "dstack.h" |
33 | | #include "idict.h" |
34 | | |
35 | | /* Forward references */ |
36 | | static void init_block(ref_stack_t *pstack, const ref *pblock_array, |
37 | | uint used); |
38 | | static int ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add); |
39 | | |
40 | | /* GC descriptors and procedures */ |
41 | | private_st_ref_stack_params(); |
42 | | static |
43 | | CLEAR_MARKS_PROC(ref_stack_clear_marks) |
44 | 0 | { |
45 | 0 | ref_stack_t *const sptr = vptr; |
46 | |
|
47 | 0 | r_clear_attrs(&sptr->current, l_mark); |
48 | 0 | } |
49 | | static |
50 | 4.75M | ENUM_PTRS_WITH(ref_stack_enum_ptrs, ref_stack_t *sptr) return 0; |
51 | 2.03M | case 0: ENUM_RETURN_REF(&sptr->current); |
52 | 2.03M | case 1: return ENUM_OBJ(sptr->params); |
53 | 4.75M | ENUM_PTRS_END |
54 | 1.01M | static RELOC_PTRS_WITH(ref_stack_reloc_ptrs, ref_stack_t *sptr) |
55 | 1.01M | { |
56 | | /* Note that the relocation must be a multiple of sizeof(ref_packed) */ |
57 | | /* * align_packed_per_ref, but it need not be a multiple of */ |
58 | | /* sizeof(ref). Therefore, we must do the adjustments using */ |
59 | | /* ref_packed pointers rather than ref pointers. */ |
60 | 1.01M | ref_packed *bot = (ref_packed *) sptr->current.value.refs; |
61 | 1.01M | long reloc; |
62 | | |
63 | 1.01M | RELOC_REF_VAR(sptr->current); |
64 | 1.01M | r_clear_attrs(&sptr->current, l_mark); |
65 | 1.01M | reloc = bot - (ref_packed *) sptr->current.value.refs; |
66 | 1.01M | #define RELOC_P(p)\ |
67 | 3.05M | sptr->p = (ref *)((ref_packed *)sptr->p - reloc); |
68 | 1.01M | RELOC_P(p); |
69 | 1.01M | RELOC_P(bot); |
70 | 1.01M | RELOC_P(top); |
71 | 1.01M | #undef RELOC_P |
72 | 1.01M | RELOC_OBJ_VAR(sptr->params); |
73 | 1.01M | } RELOC_PTRS_END |
74 | | /* Structure type for a ref_stack. */ |
75 | | public_st_ref_stack(); |
76 | | |
77 | | /* Initialize a stack. */ |
78 | | int |
79 | | ref_stack_init(ref_stack_t *pstack, const ref *pblock_array, |
80 | | uint bot_guard, uint top_guard, const ref *pguard_value, |
81 | | gs_ref_memory_t *mem, ref_stack_params_t *params) |
82 | 487k | { |
83 | 487k | uint size = r_size(pblock_array); |
84 | 487k | uint avail = size - (stack_block_refs + bot_guard + top_guard); |
85 | 487k | ref_stack_block *pblock = (ref_stack_block *)pblock_array->value.refs; |
86 | 487k | s_ptr body = (s_ptr)(pblock + 1); |
87 | | |
88 | 487k | if (params == 0) { |
89 | 487k | params = gs_alloc_struct((gs_memory_t *)mem, ref_stack_params_t, |
90 | 487k | &st_ref_stack_params, |
91 | 487k | "ref_stack_alloc(stack.params)"); |
92 | 487k | if (params == 0) |
93 | 0 | return_error(-1); /* avoid binding in any error codes */ |
94 | 487k | } |
95 | | |
96 | 487k | pstack->bot = body + bot_guard; |
97 | 487k | pstack->p = pstack->bot - 1; |
98 | 487k | pstack->top = pstack->p + avail; |
99 | 487k | pstack->current = *pblock_array; |
100 | 487k | pstack->extension_size = 0; |
101 | 487k | pstack->extension_used = 0; |
102 | | |
103 | 487k | make_int(&pstack->max_stack, avail); |
104 | 487k | pstack->requested = 0; |
105 | 487k | pstack->margin = 0; |
106 | 487k | pstack->body_size = avail; |
107 | | |
108 | 487k | pstack->params = params; |
109 | 487k | pstack->memory = mem; |
110 | | |
111 | 487k | params->bot_guard = bot_guard; |
112 | 487k | params->top_guard = top_guard; |
113 | 487k | params->block_size = size; |
114 | 487k | params->data_size = avail; |
115 | 487k | if (pguard_value != 0) |
116 | 162k | params->guard_value = *pguard_value; |
117 | 324k | else |
118 | 324k | make_tav(¶ms->guard_value, t__invalid, 0, intval, 0); |
119 | 487k | params->underflow_error = -1; |
120 | 487k | params->overflow_error = -1; |
121 | 487k | params->allow_expansion = true; |
122 | 487k | init_block(pstack, pblock_array, 0); |
123 | 487k | refset_null_new(pstack->bot, avail, 0); |
124 | 487k | make_empty_array(&pblock->next, 0); |
125 | 487k | return 0; |
126 | 487k | } |
127 | | |
128 | | /* Set whether a stack is allowed to expand. The initial value is true. */ |
129 | | void |
130 | | ref_stack_allow_expansion(ref_stack_t *pstack, bool expand) |
131 | 162k | { |
132 | 162k | pstack->params->allow_expansion = expand; |
133 | 162k | } |
134 | | |
135 | | /* Set the error codes for under- and overflow. The initial values are -1. */ |
136 | | void |
137 | | ref_stack_set_error_codes(ref_stack_t *pstack, int underflow_error, |
138 | | int overflow_error) |
139 | 487k | { |
140 | 487k | pstack->params->underflow_error = underflow_error; |
141 | 487k | pstack->params->overflow_error = overflow_error; |
142 | 487k | } |
143 | | |
144 | | /* Set the maximum number of elements allowed on a stack. */ |
145 | | int |
146 | | ref_stack_set_max_count(ref_stack_t *pstack, long nmax) |
147 | 2.60M | { |
148 | 2.60M | long nmin; |
149 | | |
150 | | /* Bypass checking if we're setting the amximum to -1 'no limits' */ |
151 | 2.60M | if (nmax == -1) { |
152 | 0 | pstack->max_stack.value.intval = nmax; |
153 | 0 | return 0; |
154 | 0 | } |
155 | | |
156 | | /* check how many items we already have on the stack, don't allow |
157 | | * a maximum less than this. |
158 | | */ |
159 | 2.60M | nmin = ref_stack_count_inline(pstack); |
160 | | |
161 | 2.60M | if (nmax < nmin) |
162 | 0 | nmax = nmin; |
163 | 2.60M | if (nmax > max_uint / sizeof(ref)) |
164 | 0 | nmax = max_uint / sizeof(ref); |
165 | 2.60M | if (!pstack->params->allow_expansion) { |
166 | 868k | uint ncur = pstack->body_size; |
167 | | |
168 | 868k | if (nmax > ncur) |
169 | 0 | nmax = ncur; |
170 | 868k | } |
171 | 2.60M | pstack->max_stack.value.intval = nmax; |
172 | 2.60M | return 0; |
173 | 2.60M | } |
174 | | |
175 | | /* |
176 | | * Set the margin between the limit and the top of the stack. |
177 | | * Note that this may require allocating a block. |
178 | | */ |
179 | | int |
180 | | ref_stack_set_margin(ref_stack_t *pstack, uint margin) |
181 | 0 | { |
182 | 0 | const ref_stack_params_t *params = pstack->params; |
183 | 0 | uint data_size = params->data_size; |
184 | |
|
185 | 0 | if (margin <= pstack->margin) { |
186 | 0 | refset_null_new(pstack->top + 1, pstack->margin - margin, 0); |
187 | 0 | } else { |
188 | 0 | if (margin > data_size >> 1) |
189 | 0 | return_error(gs_error_rangecheck); |
190 | 0 | if (pstack->top - pstack->p < margin) { |
191 | 0 | uint used = pstack->p + 1 - pstack->bot; |
192 | 0 | uint keep = data_size - margin; |
193 | 0 | int code = ref_stack_push_block(pstack, keep, used - keep); |
194 | |
|
195 | 0 | if (code < 0) |
196 | 0 | return code; |
197 | 0 | } |
198 | 0 | } |
199 | 0 | pstack->margin = margin; |
200 | 0 | pstack->body_size = data_size - margin; |
201 | 0 | pstack->top = pstack->bot + pstack->body_size - 1; |
202 | 0 | return 0; |
203 | 0 | } |
204 | | |
205 | | /* Return the number of elements on a stack. */ |
206 | | uint |
207 | | ref_stack_count(const ref_stack_t *pstack) |
208 | 252M | { |
209 | 252M | return ref_stack_count_inline(pstack); |
210 | 252M | } |
211 | | |
212 | | /* |
213 | | * Return a pointer to a given element from the stack, counting from |
214 | | * 0 as the top element. If the index is out of range, return 0. |
215 | | */ |
216 | | ref * |
217 | | ref_stack_index(const ref_stack_t *pstack, long idx) |
218 | 22.5G | { |
219 | 22.5G | ref_stack_block *pblock; |
220 | 22.5G | uint used = pstack->p + 1 - pstack->bot; |
221 | | |
222 | 22.5G | if (idx < 0) |
223 | 0 | return NULL; |
224 | 22.5G | if (idx < used) /* common case */ |
225 | 20.0G | return pstack->p - (uint) idx; |
226 | 2.49G | pblock = (ref_stack_block *) pstack->current.value.refs; |
227 | 22.6G | do { |
228 | 22.6G | pblock = (ref_stack_block *) pblock->next.value.refs; |
229 | 22.6G | if (pblock == 0) |
230 | 2.60M | return NULL; |
231 | 22.6G | idx -= used; |
232 | 22.6G | used = r_size(&pblock->used); |
233 | 22.6G | } while (idx >= used); |
234 | 2.48G | return pblock->used.value.refs + (used - 1 - (uint) idx); |
235 | 2.49G | } |
236 | | |
237 | | /* |
238 | | * Count the number of elements down to and including the first mark. |
239 | | * If no mark is found, return 0. |
240 | | */ |
241 | | uint |
242 | | ref_stack_counttomark(const ref_stack_t *pstack) |
243 | 1.17G | { |
244 | 1.17G | uint scanned = 0; |
245 | 1.17G | ref_stack_enum_t rsenum; |
246 | | |
247 | 1.17G | ref_stack_enum_begin(&rsenum, pstack); |
248 | 1.18G | do { |
249 | 1.18G | uint count = rsenum.size; |
250 | 1.18G | const ref *p = rsenum.ptr + count - 1; |
251 | | |
252 | 8.68G | for (; count; count--, p--) |
253 | 8.67G | if (r_has_type(p, t_mark)) |
254 | 1.17G | return scanned + (rsenum.size - count + 1); |
255 | 4.73M | scanned += rsenum.size; |
256 | 4.73M | } while (ref_stack_enum_next(&rsenum)); |
257 | 253 | return 0; |
258 | 1.17G | } |
259 | | |
260 | | /* |
261 | | * Do the store check for storing 'count' elements of a stack, starting |
262 | | * 'skip' elements below the top, into an array. Return 0 or gs_error_invalidaccess. |
263 | | */ |
264 | | int |
265 | | ref_stack_store_check(const ref_stack_t *pstack, ref *parray, uint count, |
266 | | uint skip) |
267 | 882k | { |
268 | 882k | uint space = r_space(parray); |
269 | | |
270 | 882k | if (space != avm_local) { |
271 | 175k | uint left = count, pass = skip; |
272 | 175k | ref_stack_enum_t rsenum; |
273 | | |
274 | 175k | ref_stack_enum_begin(&rsenum, pstack); |
275 | 350k | do { |
276 | 350k | ref *ptr = rsenum.ptr; |
277 | 350k | uint size = rsenum.size; |
278 | | |
279 | 350k | if (size <= pass) |
280 | 0 | pass -= size; |
281 | 350k | else { |
282 | 350k | int code; |
283 | | |
284 | 350k | if (pass != 0) |
285 | 175k | size -= pass, pass = 0; |
286 | 350k | ptr += size; |
287 | 350k | if (size > left) |
288 | 175k | size = left; |
289 | 350k | left -= size; |
290 | 350k | code = refs_check_space(ptr - size, size, space); |
291 | 350k | if (code < 0) |
292 | 0 | return code; |
293 | 350k | if (left == 0) |
294 | 175k | break; |
295 | 350k | } |
296 | 350k | } while (ref_stack_enum_next(&rsenum)); |
297 | 175k | } |
298 | 882k | return 0; |
299 | 882k | } |
300 | | |
301 | | int |
302 | | ref_stack_array_sanitize(i_ctx_t *i_ctx_p, ref *sarr, ref *darr, int depth) |
303 | 0 | { |
304 | 0 | int i, code; |
305 | 0 | ref obj, arr2; |
306 | 0 | ref *pobj2; |
307 | 0 | gs_memory_t *mem = (gs_memory_t *)idmemory->current; |
308 | |
|
309 | 0 | if (!r_is_array(sarr) || !r_has_type(darr, t_array)) |
310 | 0 | return_error(gs_error_typecheck); |
311 | | |
312 | 0 | for (i = 0; i < r_size(sarr); i++) { |
313 | 0 | code = array_get(mem, sarr, i, &obj); |
314 | 0 | if (code < 0) |
315 | 0 | make_null(&obj); |
316 | 0 | switch(r_type(&obj)) { |
317 | 0 | case t_operator: |
318 | 0 | { |
319 | 0 | int index = op_index(&obj); |
320 | |
|
321 | 0 | if (index > 0 && index < op_def_count) { |
322 | 0 | const byte *data = (const byte *)(op_index_def(index)->oname + 1); |
323 | 0 | if (dict_find_string(systemdict, (const char *)data, &pobj2) <= 0) { |
324 | 0 | byte *s = gs_alloc_bytes(mem, strlen((char *)data) + 5, "ref_stack_array_sanitize"); |
325 | 0 | if (s) { |
326 | 0 | s[0] = '\0'; |
327 | 0 | strcpy((char *)s, "--"); |
328 | 0 | strcpy((char *)s + 2, (char *)data); |
329 | 0 | strcpy((char *)s + strlen((char *)data) + 2, "--"); |
330 | 0 | } |
331 | 0 | else { |
332 | 0 | s = (byte *)data; |
333 | 0 | } |
334 | 0 | code = name_ref(imemory, s, strlen((char *)s), &obj, 1); |
335 | 0 | if (code < 0) make_null(&obj); |
336 | 0 | if (s != data) |
337 | 0 | gs_free_object(mem, s, "ref_stack_array_sanitize"); |
338 | 0 | } |
339 | 0 | } |
340 | 0 | else { |
341 | 0 | make_null(&obj); |
342 | 0 | } |
343 | 0 | ref_assign(darr->value.refs + i, &obj); |
344 | 0 | break; |
345 | 0 | } |
346 | 0 | case t_array: |
347 | 0 | case t_shortarray: |
348 | 0 | case t_mixedarray: |
349 | 0 | { |
350 | 0 | int attrs = r_type_attrs(&obj) & (a_write | a_read | a_execute | a_executable); |
351 | | /* We only want to copy executable arrays */ |
352 | 0 | if (attrs & (a_execute | a_executable)) { |
353 | 0 | if (++depth > 50) { |
354 | 0 | code = gs_error_limitcheck; |
355 | 0 | } |
356 | 0 | else { |
357 | 0 | code = ialloc_ref_array(&arr2, attrs, r_size(&obj), "ref_stack_array_sanitize"); |
358 | 0 | } |
359 | 0 | if (code < 0) { |
360 | 0 | make_null(&arr2); |
361 | 0 | } |
362 | 0 | else { |
363 | 0 | code = ref_stack_array_sanitize(i_ctx_p, &obj, &arr2, depth); |
364 | 0 | if (code < 0) { |
365 | 0 | ifree_ref_array(&arr2, "ref_stack_array_sanitize"); |
366 | 0 | return code; |
367 | 0 | } |
368 | 0 | } |
369 | 0 | ref_assign(darr->value.refs + i, &arr2); |
370 | 0 | } |
371 | 0 | else { |
372 | 0 | ref_assign(darr->value.refs + i, &obj); |
373 | 0 | } |
374 | 0 | break; |
375 | 0 | } |
376 | 0 | default: |
377 | 0 | ref_assign(darr->value.refs + i, &obj); |
378 | 0 | } |
379 | 0 | } |
380 | 0 | return 0; |
381 | 0 | } |
382 | | |
383 | | |
384 | | /* |
385 | | * Store the top 'count' elements of a stack, starting 'skip' elements below |
386 | | * the top, into an array, with or without store/undo checking. age=-1 for |
387 | | * no check, 0 for old, 1 for new. May return gs_error_rangecheck or |
388 | | * gs_error_invalidaccess. |
389 | | */ |
390 | | #undef idmemory /****** NOTA BENE ******/ |
391 | | int |
392 | | ref_stack_store(const ref_stack_t *pstack, ref *parray, uint count, |
393 | | uint skip, int age, bool check, gs_dual_memory_t *idmemory, |
394 | | client_name_t cname) |
395 | 22.6M | { |
396 | 22.6M | uint left, pass; |
397 | 22.6M | ref *to; |
398 | 22.6M | ref_stack_enum_t rsenum; |
399 | | |
400 | 22.6M | if (count > ref_stack_count(pstack) || count > r_size(parray)) |
401 | 0 | return_error(gs_error_rangecheck); |
402 | 22.6M | if (check) { |
403 | 523k | int code = ref_stack_store_check(pstack, parray, count, skip); |
404 | | |
405 | 523k | if (code < 0) |
406 | 0 | return code; |
407 | 523k | } |
408 | 22.6M | to = parray->value.refs + count; |
409 | 22.6M | left = count, pass = skip; |
410 | 22.6M | ref_stack_enum_begin(&rsenum, pstack); |
411 | 22.9M | do { |
412 | 22.9M | ref *from = rsenum.ptr; |
413 | 22.9M | uint size = rsenum.size; |
414 | | |
415 | 22.9M | if (size <= pass) |
416 | 0 | pass -= size; |
417 | 22.9M | else { |
418 | 22.9M | if (pass != 0) |
419 | 177k | size -= pass, pass = 0; |
420 | 22.9M | from += size; |
421 | 22.9M | if (size > left) |
422 | 22.3M | size = left; |
423 | 22.9M | left -= size; |
424 | 22.9M | switch (age) { |
425 | 0 | case -1: /* not an array */ |
426 | 0 | while (size--) { |
427 | 0 | from--, to--; |
428 | 0 | ref_assign(to, from); |
429 | 0 | } |
430 | 0 | break; |
431 | 771k | case 0: /* old array */ |
432 | 219M | while (size--) { |
433 | 218M | from--, to--; |
434 | 218M | ref_assign_old(parray, to, from, cname); |
435 | 218M | } |
436 | 771k | break; |
437 | 22.1M | case 1: /* new array */ |
438 | 201M | while (size--) { |
439 | 178M | from--, to--; |
440 | 178M | ref_assign_new(to, from); |
441 | 178M | } |
442 | 22.1M | break; |
443 | 22.9M | } |
444 | 22.9M | if (left == 0) |
445 | 22.6M | break; |
446 | 22.9M | } |
447 | 22.9M | } while (ref_stack_enum_next(&rsenum)); |
448 | 22.6M | r_set_size(parray, count); |
449 | 22.6M | return 0; |
450 | 22.6M | } |
451 | | |
452 | | /* |
453 | | * Pop the top N elements off a stack. |
454 | | * The number must not exceed the number of elements in use. |
455 | | */ |
456 | | void |
457 | | ref_stack_pop(ref_stack_t *pstack, uint count) |
458 | 1.76G | { |
459 | 1.76G | uint used; |
460 | | |
461 | 1.77G | while ((used = pstack->p + 1 - pstack->bot) <= count && |
462 | 1.77G | pstack->extension_used > 0) { |
463 | 5.06M | count -= used; |
464 | 5.06M | pstack->p = pstack->bot - 1; |
465 | 5.06M | ref_stack_pop_block(pstack); |
466 | 5.06M | } |
467 | 1.76G | pstack->p -= count; |
468 | 1.76G | } |
469 | | |
470 | | /* Pop the top block off a stack. May return underflow_error. */ |
471 | | int |
472 | | ref_stack_pop_block(ref_stack_t *pstack) |
473 | 5.11M | { |
474 | 5.11M | s_ptr bot = pstack->bot; |
475 | 5.11M | uint count = pstack->p + 1 - bot; |
476 | 5.11M | ref_stack_block *pcur = |
477 | 5.11M | (ref_stack_block *) pstack->current.value.refs; |
478 | 5.11M | ref_stack_block *pnext = |
479 | 5.11M | (ref_stack_block *) pcur->next.value.refs; |
480 | 5.11M | uint used; |
481 | 5.11M | ref *body; |
482 | 5.11M | ref next; |
483 | | |
484 | 5.11M | if (pnext == 0) |
485 | 3.23k | return_error(pstack->params->underflow_error); |
486 | 5.10M | used = r_size(&pnext->used); |
487 | 5.10M | body = (ref *) (pnext + 1) + pstack->params->bot_guard; |
488 | 5.10M | next = pcur->next; |
489 | | /* |
490 | | * If the contents of the two blocks won't fit in a single block, we |
491 | | * move up the used part of the top block, and copy up as much of |
492 | | * the contents of the next block under it as will fit. If the |
493 | | * contents of both blocks fit in a single block, we copy the used |
494 | | * part of the top block to the top of the next block, and free the |
495 | | * top block. |
496 | | */ |
497 | 5.10M | if (used + count > pstack->body_size) { |
498 | | /* |
499 | | * The contents of the two blocks won't fit into a single block. |
500 | | * On the assumption that we're recovering from a local stack |
501 | | * underflow and need to increase the number of contiguous |
502 | | * elements available, move up the used part of the top block, and |
503 | | * copy up as much of the contents of the next block under it as |
504 | | * will fit. |
505 | | */ |
506 | 11.9k | uint moved = pstack->body_size - count; |
507 | 11.9k | uint left; |
508 | | |
509 | 11.9k | if (moved == 0) |
510 | 0 | return_error(gs_error_Fatal); |
511 | 11.9k | memmove(bot + moved, bot, count * sizeof(ref)); |
512 | 11.9k | left = used - moved; |
513 | 11.9k | memcpy(bot, body + left, moved * sizeof(ref)); |
514 | 11.9k | refset_null_new(body + left, moved, 0); |
515 | 11.9k | r_dec_size(&pnext->used, moved); |
516 | 11.9k | pstack->p = pstack->top; |
517 | 11.9k | pstack->extension_used -= moved; |
518 | 5.09M | } else { |
519 | | /* |
520 | | * The contents of the two blocks will fit into a single block. |
521 | | * Copy the used part of the top block to the top of the next |
522 | | * block, and free the top block. |
523 | | */ |
524 | 5.09M | memcpy(body + used, bot, count * sizeof(ref)); |
525 | 5.09M | pstack->bot = bot = body; |
526 | 5.09M | pstack->top = bot + pstack->body_size - 1; |
527 | 5.09M | gs_free_ref_array(pstack->memory, &pstack->current, |
528 | 5.09M | "ref_stack_pop_block"); |
529 | 5.09M | pstack->current = next; |
530 | 5.09M | pstack->p = bot + (used + count - 1); |
531 | 5.09M | pstack->extension_size -= pstack->body_size; |
532 | 5.09M | pstack->extension_used -= used; |
533 | 5.09M | } |
534 | 5.10M | return 0; |
535 | 5.10M | } |
536 | | |
537 | | /* |
538 | | * Extend a stack to recover from an overflow condition. |
539 | | * May return overflow_error or gs_error_VMerror. |
540 | | */ |
541 | | int |
542 | | ref_stack_extend(ref_stack_t *pstack, uint request) |
543 | 4.97M | { |
544 | 4.97M | uint keep = (pstack->top - pstack->bot + 1) / 3; |
545 | 4.97M | uint count = pstack->p - pstack->bot + 1; |
546 | 4.97M | const ref_stack_params_t *params = pstack->params; |
547 | | |
548 | 4.97M | if (request > params->data_size) |
549 | 0 | return_error(params->overflow_error); |
550 | 4.97M | if (keep + request > pstack->body_size) |
551 | 10 | keep = pstack->body_size - request; |
552 | 4.97M | if (keep > count) |
553 | 0 | keep = count; /* required by ref_stack_push_block */ |
554 | 4.97M | return ref_stack_push_block(pstack, keep, request); |
555 | 4.97M | } |
556 | | |
557 | | /* |
558 | | * Push N empty slots onto a stack. These slots are not initialized: |
559 | | * the caller must immediately fill them. May return overflow_error |
560 | | * (if max_stack would be exceeded, or the stack has no allocator) |
561 | | * or gs_error_VMerror. |
562 | | */ |
563 | | int |
564 | | ref_stack_push(ref_stack_t *pstack, uint count) |
565 | 50.6k | { |
566 | | /* Don't bother to pre-check for overflow: we must be able to */ |
567 | | /* back out in the case of a VMerror anyway, and */ |
568 | | /* ref_stack_push_block will make the check itself. */ |
569 | 50.6k | uint needed = count; |
570 | 50.6k | uint added; |
571 | | |
572 | 171k | for (; (added = pstack->top - pstack->p) < needed; needed -= added) { |
573 | 120k | int code; |
574 | | |
575 | 120k | pstack->p = pstack->top; |
576 | 120k | code = ref_stack_push_block(pstack, |
577 | 120k | (pstack->top - pstack->bot + 1) / 3, |
578 | 120k | added); |
579 | 120k | if (code < 0) { |
580 | | /* Back out. */ |
581 | 13 | ref_stack_pop(pstack, count - needed + added); |
582 | 13 | pstack->requested = count; |
583 | 13 | return code; |
584 | 13 | } |
585 | 120k | } |
586 | 50.5k | pstack->p += needed; |
587 | 50.5k | return 0; |
588 | 50.6k | } |
589 | | |
590 | | /* |
591 | | * Push a block onto the stack, specifying how many elements of the current |
592 | | * top block should remain in the top block and also how many elements we |
593 | | * are trying to add. Requires keep <= count. May return overflow_error or |
594 | | * gs_error_VMerror. |
595 | | */ |
596 | | static int |
597 | | ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add) |
598 | 5.09M | { |
599 | 5.09M | const ref_stack_params_t *params = pstack->params; |
600 | 5.09M | uint count = pstack->p - pstack->bot + 1; |
601 | 5.09M | uint move = count - keep; |
602 | 5.09M | ref_stack_block *pcur = (ref_stack_block *) pstack->current.value.refs; |
603 | 5.09M | ref next; |
604 | 5.09M | ref_stack_block *pnext; |
605 | 5.09M | ref *body; |
606 | 5.09M | int code; |
607 | | |
608 | 5.09M | if (keep > count) |
609 | 0 | return_error(gs_error_Fatal); |
610 | | /* Check for overflowing the maximum size, */ |
611 | | /* or expansion not allowed. */ |
612 | | /* Or specifically allowing unlimited expansion */ |
613 | 5.09M | if (pstack->max_stack.value.intval > 0) { |
614 | 5.09M | if (pstack->extension_used + (pstack->top - pstack->bot) + add >= |
615 | 5.09M | pstack->max_stack.value.intval || |
616 | 5.09M | !params->allow_expansion |
617 | 5.09M | ) |
618 | 277 | return_error(params->overflow_error); |
619 | 5.09M | } |
620 | 5.09M | code = gs_alloc_ref_array(pstack->memory, &next, 0, |
621 | 5.09M | params->block_size, "ref_stack_push_block"); |
622 | 5.09M | if (code < 0) |
623 | 0 | return code; |
624 | 5.09M | pnext = (ref_stack_block *) next.value.refs; |
625 | 5.09M | body = (ref *) (pnext + 1); |
626 | | /* Copy the top keep elements into the new block, */ |
627 | | /* and make the new block the top block. */ |
628 | 5.09M | init_block(pstack, &next, keep); |
629 | 5.09M | body += params->bot_guard; |
630 | 5.09M | memcpy(body, pstack->bot + move, keep * sizeof(ref)); |
631 | | /* Clear the elements above the top of the new block. */ |
632 | 5.09M | refset_null_new(body + keep, params->data_size - keep, 0); |
633 | | /* Clear the elements above the top of the old block. */ |
634 | 5.09M | refset_null_new(pstack->bot + move, keep, 0); |
635 | 5.09M | pnext->next = pstack->current; |
636 | 5.09M | pcur->used.value.refs = pstack->bot; |
637 | 5.09M | r_set_size(&pcur->used, move); |
638 | 5.09M | pstack->current = next; |
639 | 5.09M | pstack->bot = body; |
640 | 5.09M | pstack->top = pstack->bot + pstack->body_size - 1; |
641 | 5.09M | pstack->p = pstack->bot + keep - 1; |
642 | 5.09M | pstack->extension_size += pstack->body_size; |
643 | 5.09M | pstack->extension_used += move; |
644 | 5.09M | return 0; |
645 | 5.09M | } |
646 | | |
647 | | /* Begin enumerating the blocks of a stack. */ |
648 | | void |
649 | | ref_stack_enum_begin(ref_stack_enum_t *prse, const ref_stack_t *pstack) |
650 | 1.46G | { |
651 | 1.46G | prse->block = (ref_stack_block *)pstack->current.value.refs; |
652 | 1.46G | prse->ptr = pstack->bot; |
653 | 1.46G | prse->size = pstack->p + 1 - pstack->bot; |
654 | 1.46G | } |
655 | | |
656 | | bool |
657 | | ref_stack_enum_next(ref_stack_enum_t *prse) |
658 | 15.5M | { |
659 | 15.5M | ref_stack_block *block = |
660 | 15.5M | prse->block = (ref_stack_block *)prse->block->next.value.refs; |
661 | | |
662 | 15.5M | if (block == 0) |
663 | 10.3M | return false; |
664 | 5.19M | prse->ptr = block->used.value.refs; |
665 | 5.19M | prse->size = r_size(&block->used); |
666 | 5.19M | return true; |
667 | 15.5M | } |
668 | | |
669 | | /* Clean up a stack for garbage collection. */ |
670 | | void |
671 | | ref_stack_cleanup(ref_stack_t *pstack) |
672 | 1.01M | { |
673 | 1.01M | ref_stack_block *pblock = |
674 | 1.01M | (ref_stack_block *) pstack->current.value.refs; |
675 | | |
676 | 1.01M | refset_null_new(pstack->p + 1, pstack->top - pstack->p, 0); |
677 | 1.01M | pblock->used = pstack->current; /* set attrs */ |
678 | 1.01M | pblock->used.value.refs = pstack->bot; |
679 | 1.01M | r_set_size(&pblock->used, pstack->p + 1 - pstack->bot); |
680 | 1.01M | } |
681 | | |
682 | | /* |
683 | | * Free the entire contents of a stack, including the bottom block. |
684 | | * The client must still call ref_stack_free. Note that after calling |
685 | | * ref_stack_release, the stack is no longer usable. |
686 | | */ |
687 | | void |
688 | | ref_stack_release(ref_stack_t *pstack) |
689 | 0 | { |
690 | 0 | gs_ref_memory_t *mem = pstack->memory; |
691 | |
|
692 | 0 | ref_stack_clear(pstack); |
693 | | /* Free the parameter structure. */ |
694 | 0 | gs_free_object((gs_memory_t *)mem, pstack->params, |
695 | 0 | "ref_stack_release(stack.params)"); |
696 | | /* Free the original (bottom) block. */ |
697 | 0 | gs_free_ref_array(mem, &pstack->current, "ref_stack_release"); |
698 | 0 | } |
699 | | |
700 | | /* |
701 | | * Release a stack and then free the ref_stack object. |
702 | | */ |
703 | | void |
704 | | ref_stack_free(ref_stack_t *pstack) |
705 | 0 | { |
706 | 0 | gs_memory_t *mem = (gs_memory_t *)pstack->memory; |
707 | |
|
708 | 0 | ref_stack_release(pstack); |
709 | 0 | gs_free_object(mem, pstack, "ref_stack_free"); |
710 | 0 | } |
711 | | |
712 | | /* ------ Internal routines ------ */ |
713 | | |
714 | | /* Initialize the guards and body of a stack block. */ |
715 | | static void |
716 | | init_block(ref_stack_t *pstack, const ref *psb, uint used) |
717 | 5.58M | { |
718 | 5.58M | ref_stack_params_t *params = pstack->params; |
719 | 5.58M | ref *brefs = psb->value.refs; |
720 | 5.58M | uint i; |
721 | 5.58M | ref *p; |
722 | | |
723 | 5.58M | for (i = params->bot_guard, p = brefs + stack_block_refs; |
724 | 58.3M | i != 0; i--, p++ |
725 | 5.58M | ) |
726 | 52.7M | ref_assign(p, ¶ms->guard_value); |
727 | | /* The top guard elements will never be read, */ |
728 | | /* but we need to initialize them for the sake of the GC. */ |
729 | | /* We can use refset_null for this, because even though it uses */ |
730 | | /* make_null_new and stack elements must not be marked new, */ |
731 | | /* these slots will never actually be read or written. */ |
732 | 5.58M | if (params->top_guard) { |
733 | 5.41M | ref *top = brefs + r_size(psb); |
734 | 5.41M | int top_guard = params->top_guard; |
735 | | |
736 | 5.41M | refset_null_new(top - top_guard, top_guard, 0); |
737 | 5.41M | } { |
738 | 5.58M | ref_stack_block *const pblock = (ref_stack_block *) brefs; |
739 | | |
740 | 5.58M | pblock->used = *psb; |
741 | 5.58M | pblock->used.value.refs = brefs + stack_block_refs + params->bot_guard; |
742 | 5.58M | r_set_size(&pblock->used, 0); |
743 | 5.58M | } |
744 | 5.58M | } |