/src/ghostpdl/psi/istack.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2021 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 1305 Grant Avenue - Suite 200, Novato, |
13 | | CA 94945, U.S.A., +1(415)492-9861, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Manager for expandable stacks of refs */ |
18 | | #include "memory_.h" |
19 | | #include "ghost.h" |
20 | | #include "gsstruct.h" |
21 | | #include "gsutil.h" |
22 | | #include "ierrors.h" |
23 | | #include "ialloc.h" |
24 | | #include "istack.h" |
25 | | #include "istkparm.h" |
26 | | #include "istruct.h" /* for RELOC_REF_VAR */ |
27 | | #include "iutil.h" |
28 | | #include "ivmspace.h" /* for local/global test */ |
29 | | #include "store.h" |
30 | | #include "icstate.h" |
31 | | #include "iname.h" |
32 | | #include "dstack.h" |
33 | | #include "idict.h" |
34 | | |
35 | | /* Forward references */ |
36 | | static void init_block(ref_stack_t *pstack, const ref *pblock_array, |
37 | | uint used); |
38 | | static int ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add); |
39 | | |
40 | | /* GC descriptors and procedures */ |
41 | | private_st_ref_stack_params(); |
42 | | static |
43 | | CLEAR_MARKS_PROC(ref_stack_clear_marks) |
44 | 0 | { |
45 | 0 | ref_stack_t *const sptr = vptr; |
46 | |
|
47 | 0 | r_clear_attrs(&sptr->current, l_mark); |
48 | 0 | } |
49 | | static |
50 | 19.6k | ENUM_PTRS_WITH(ref_stack_enum_ptrs, ref_stack_t *sptr) return 0; |
51 | 8.40k | case 0: ENUM_RETURN_REF(&sptr->current); |
52 | 8.40k | case 1: return ENUM_OBJ(sptr->params); |
53 | 19.6k | ENUM_PTRS_END |
54 | 4.20k | static RELOC_PTRS_WITH(ref_stack_reloc_ptrs, ref_stack_t *sptr) |
55 | 4.20k | { |
56 | | /* Note that the relocation must be a multiple of sizeof(ref_packed) */ |
57 | | /* * align_packed_per_ref, but it need not be a multiple of */ |
58 | | /* sizeof(ref). Therefore, we must do the adjustments using */ |
59 | | /* ref_packed pointers rather than ref pointers. */ |
60 | 4.20k | ref_packed *bot = (ref_packed *) sptr->current.value.refs; |
61 | 4.20k | long reloc; |
62 | | |
63 | 4.20k | RELOC_REF_VAR(sptr->current); |
64 | 4.20k | r_clear_attrs(&sptr->current, l_mark); |
65 | 4.20k | reloc = bot - (ref_packed *) sptr->current.value.refs; |
66 | 4.20k | #define RELOC_P(p)\ |
67 | 12.6k | sptr->p = (ref *)((ref_packed *)sptr->p - reloc); |
68 | 4.20k | RELOC_P(p); |
69 | 4.20k | RELOC_P(bot); |
70 | 4.20k | RELOC_P(top); |
71 | 4.20k | #undef RELOC_P |
72 | 4.20k | RELOC_OBJ_VAR(sptr->params); |
73 | 4.20k | } RELOC_PTRS_END |
74 | | /* Structure type for a ref_stack. */ |
75 | | public_st_ref_stack(); |
76 | | |
77 | | /* Initialize a stack. */ |
78 | | int |
79 | | ref_stack_init(ref_stack_t *pstack, const ref *pblock_array, |
80 | | uint bot_guard, uint top_guard, const ref *pguard_value, |
81 | | gs_ref_memory_t *mem, ref_stack_params_t *params) |
82 | 2.04k | { |
83 | 2.04k | uint size = r_size(pblock_array); |
84 | 2.04k | uint avail = size - (stack_block_refs + bot_guard + top_guard); |
85 | 2.04k | ref_stack_block *pblock = (ref_stack_block *)pblock_array->value.refs; |
86 | 2.04k | s_ptr body = (s_ptr)(pblock + 1); |
87 | | |
88 | 2.04k | if (params == 0) { |
89 | 2.04k | params = gs_alloc_struct((gs_memory_t *)mem, ref_stack_params_t, |
90 | 2.04k | &st_ref_stack_params, |
91 | 2.04k | "ref_stack_alloc(stack.params)"); |
92 | 2.04k | if (params == 0) |
93 | 0 | return_error(-1); /* avoid binding in any error codes */ |
94 | 2.04k | } |
95 | | |
96 | 2.04k | pstack->bot = body + bot_guard; |
97 | 2.04k | pstack->p = pstack->bot - 1; |
98 | 2.04k | pstack->top = pstack->p + avail; |
99 | 2.04k | pstack->current = *pblock_array; |
100 | 2.04k | pstack->extension_size = 0; |
101 | 2.04k | pstack->extension_used = 0; |
102 | | |
103 | 2.04k | make_int(&pstack->max_stack, avail); |
104 | 2.04k | pstack->requested = 0; |
105 | 2.04k | pstack->margin = 0; |
106 | 2.04k | pstack->body_size = avail; |
107 | | |
108 | 2.04k | pstack->params = params; |
109 | 2.04k | pstack->memory = mem; |
110 | | |
111 | 2.04k | params->bot_guard = bot_guard; |
112 | 2.04k | params->top_guard = top_guard; |
113 | 2.04k | params->block_size = size; |
114 | 2.04k | params->data_size = avail; |
115 | 2.04k | if (pguard_value != 0) |
116 | 683 | params->guard_value = *pguard_value; |
117 | 1.36k | else |
118 | 1.36k | make_tav(¶ms->guard_value, t__invalid, 0, intval, 0); |
119 | 2.04k | params->underflow_error = -1; |
120 | 2.04k | params->overflow_error = -1; |
121 | 2.04k | params->allow_expansion = true; |
122 | 2.04k | init_block(pstack, pblock_array, 0); |
123 | 2.04k | refset_null_new(pstack->bot, avail, 0); |
124 | 2.04k | make_empty_array(&pblock->next, 0); |
125 | 2.04k | return 0; |
126 | 2.04k | } |
127 | | |
128 | | /* Set whether a stack is allowed to expand. The initial value is true. */ |
129 | | void |
130 | | ref_stack_allow_expansion(ref_stack_t *pstack, bool expand) |
131 | 683 | { |
132 | 683 | pstack->params->allow_expansion = expand; |
133 | 683 | } |
134 | | |
135 | | /* Set the error codes for under- and overflow. The initial values are -1. */ |
136 | | void |
137 | | ref_stack_set_error_codes(ref_stack_t *pstack, int underflow_error, |
138 | | int overflow_error) |
139 | 2.04k | { |
140 | 2.04k | pstack->params->underflow_error = underflow_error; |
141 | 2.04k | pstack->params->overflow_error = overflow_error; |
142 | 2.04k | } |
143 | | |
144 | | /* Set the maximum number of elements allowed on a stack. */ |
145 | | int |
146 | | ref_stack_set_max_count(ref_stack_t *pstack, long nmax) |
147 | 10.3k | { |
148 | 10.3k | long nmin; |
149 | | |
150 | | /* Bypass checking if we're setting the amximum to -1 'no limits' */ |
151 | 10.3k | if (nmax == -1) { |
152 | 0 | pstack->max_stack.value.intval = nmax; |
153 | 0 | return 0; |
154 | 0 | } |
155 | | |
156 | | /* check how many items we already have on the stack, don't allow |
157 | | * a maximum less than this. |
158 | | */ |
159 | 10.3k | nmin = ref_stack_count_inline(pstack); |
160 | | |
161 | 10.3k | if (nmax < nmin) |
162 | 0 | nmax = nmin; |
163 | 10.3k | if (nmax > max_uint / sizeof(ref)) |
164 | 0 | nmax = max_uint / sizeof(ref); |
165 | 10.3k | if (!pstack->params->allow_expansion) { |
166 | 3.43k | uint ncur = pstack->body_size; |
167 | | |
168 | 3.43k | if (nmax > ncur) |
169 | 0 | nmax = ncur; |
170 | 3.43k | } |
171 | 10.3k | pstack->max_stack.value.intval = nmax; |
172 | 10.3k | return 0; |
173 | 10.3k | } |
174 | | |
175 | | /* |
176 | | * Set the margin between the limit and the top of the stack. |
177 | | * Note that this may require allocating a block. |
178 | | */ |
179 | | int |
180 | | ref_stack_set_margin(ref_stack_t *pstack, uint margin) |
181 | 0 | { |
182 | 0 | const ref_stack_params_t *params = pstack->params; |
183 | 0 | uint data_size = params->data_size; |
184 | |
|
185 | 0 | if (margin <= pstack->margin) { |
186 | 0 | refset_null_new(pstack->top + 1, pstack->margin - margin, 0); |
187 | 0 | } else { |
188 | 0 | if (margin > data_size >> 1) |
189 | 0 | return_error(gs_error_rangecheck); |
190 | 0 | if (pstack->top - pstack->p < margin) { |
191 | 0 | uint used = pstack->p + 1 - pstack->bot; |
192 | 0 | uint keep = data_size - margin; |
193 | 0 | int code = ref_stack_push_block(pstack, keep, used - keep); |
194 | |
|
195 | 0 | if (code < 0) |
196 | 0 | return code; |
197 | 0 | } |
198 | 0 | } |
199 | 0 | pstack->margin = margin; |
200 | 0 | pstack->body_size = data_size - margin; |
201 | 0 | pstack->top = pstack->bot + pstack->body_size - 1; |
202 | 0 | return 0; |
203 | 0 | } |
204 | | |
205 | | /* Return the number of elements on a stack. */ |
206 | | uint |
207 | | ref_stack_count(const ref_stack_t *pstack) |
208 | 834k | { |
209 | 834k | return ref_stack_count_inline(pstack); |
210 | 834k | } |
211 | | |
212 | | /* |
213 | | * Return a pointer to a given element from the stack, counting from |
214 | | * 0 as the top element. If the index is out of range, return 0. |
215 | | */ |
216 | | ref * |
217 | | ref_stack_index(const ref_stack_t *pstack, long idx) |
218 | 139M | { |
219 | 139M | ref_stack_block *pblock; |
220 | 139M | uint used = pstack->p + 1 - pstack->bot; |
221 | | |
222 | 139M | if (idx < 0) |
223 | 0 | return NULL; |
224 | 139M | if (idx < used) /* common case */ |
225 | 129M | return pstack->p - (uint) idx; |
226 | 10.2M | pblock = (ref_stack_block *) pstack->current.value.refs; |
227 | 81.4M | do { |
228 | 81.4M | pblock = (ref_stack_block *) pblock->next.value.refs; |
229 | 81.4M | if (pblock == 0) |
230 | 11.6k | return NULL; |
231 | 81.3M | idx -= used; |
232 | 81.3M | used = r_size(&pblock->used); |
233 | 81.3M | } while (idx >= used); |
234 | 10.2M | return pblock->used.value.refs + (used - 1 - (uint) idx); |
235 | 10.2M | } |
236 | | |
237 | | /* |
238 | | * Count the number of elements down to and including the first mark. |
239 | | * If no mark is found, return 0. |
240 | | */ |
241 | | uint |
242 | | ref_stack_counttomark(const ref_stack_t *pstack) |
243 | 4.84M | { |
244 | 4.84M | uint scanned = 0; |
245 | 4.84M | ref_stack_enum_t rsenum; |
246 | | |
247 | 4.84M | ref_stack_enum_begin(&rsenum, pstack); |
248 | 4.86M | do { |
249 | 4.86M | uint count = rsenum.size; |
250 | 4.86M | const ref *p = rsenum.ptr + count - 1; |
251 | | |
252 | 31.6M | for (; count; count--, p--) |
253 | 31.6M | if (r_has_type(p, t_mark)) |
254 | 4.84M | return scanned + (rsenum.size - count + 1); |
255 | 19.8k | scanned += rsenum.size; |
256 | 19.8k | } while (ref_stack_enum_next(&rsenum)); |
257 | 0 | return 0; |
258 | 4.84M | } |
259 | | |
260 | | /* |
261 | | * Do the store check for storing 'count' elements of a stack, starting |
262 | | * 'skip' elements below the top, into an array. Return 0 or gs_error_invalidaccess. |
263 | | */ |
264 | | int |
265 | | ref_stack_store_check(const ref_stack_t *pstack, ref *parray, uint count, |
266 | | uint skip) |
267 | 2.08k | { |
268 | 2.08k | uint space = r_space(parray); |
269 | | |
270 | 2.08k | if (space != avm_local) { |
271 | 683 | uint left = count, pass = skip; |
272 | 683 | ref_stack_enum_t rsenum; |
273 | | |
274 | 683 | ref_stack_enum_begin(&rsenum, pstack); |
275 | 1.36k | do { |
276 | 1.36k | ref *ptr = rsenum.ptr; |
277 | 1.36k | uint size = rsenum.size; |
278 | | |
279 | 1.36k | if (size <= pass) |
280 | 0 | pass -= size; |
281 | 1.36k | else { |
282 | 1.36k | int code; |
283 | | |
284 | 1.36k | if (pass != 0) |
285 | 683 | size -= pass, pass = 0; |
286 | 1.36k | ptr += size; |
287 | 1.36k | if (size > left) |
288 | 683 | size = left; |
289 | 1.36k | left -= size; |
290 | 1.36k | code = refs_check_space(ptr - size, size, space); |
291 | 1.36k | if (code < 0) |
292 | 0 | return code; |
293 | 1.36k | if (left == 0) |
294 | 683 | break; |
295 | 1.36k | } |
296 | 1.36k | } while (ref_stack_enum_next(&rsenum)); |
297 | 683 | } |
298 | 2.08k | return 0; |
299 | 2.08k | } |
300 | | |
301 | | int |
302 | | ref_stack_array_sanitize(i_ctx_t *i_ctx_p, ref *sarr, ref *darr) |
303 | 0 | { |
304 | 0 | int i, code; |
305 | 0 | ref obj, arr2; |
306 | 0 | ref *pobj2; |
307 | 0 | gs_memory_t *mem = (gs_memory_t *)idmemory->current; |
308 | |
|
309 | 0 | if (!r_is_array(sarr) || !r_has_type(darr, t_array)) |
310 | 0 | return_error(gs_error_typecheck); |
311 | | |
312 | 0 | for (i = 0; i < r_size(sarr); i++) { |
313 | 0 | code = array_get(mem, sarr, i, &obj); |
314 | 0 | if (code < 0) |
315 | 0 | make_null(&obj); |
316 | 0 | switch(r_type(&obj)) { |
317 | 0 | case t_operator: |
318 | 0 | { |
319 | 0 | int index = op_index(&obj); |
320 | |
|
321 | 0 | if (index > 0 && index < op_def_count) { |
322 | 0 | const byte *data = (const byte *)(op_index_def(index)->oname + 1); |
323 | 0 | if (dict_find_string(systemdict, (const char *)data, &pobj2) <= 0) { |
324 | 0 | byte *s = gs_alloc_bytes(mem, strlen((char *)data) + 5, "ref_stack_array_sanitize"); |
325 | 0 | if (s) { |
326 | 0 | s[0] = '\0'; |
327 | 0 | strcpy((char *)s, "--"); |
328 | 0 | strcpy((char *)s + 2, (char *)data); |
329 | 0 | strcpy((char *)s + strlen((char *)data) + 2, "--"); |
330 | 0 | } |
331 | 0 | else { |
332 | 0 | s = (byte *)data; |
333 | 0 | } |
334 | 0 | code = name_ref(imemory, s, strlen((char *)s), &obj, 1); |
335 | 0 | if (code < 0) make_null(&obj); |
336 | 0 | if (s != data) |
337 | 0 | gs_free_object(mem, s, "ref_stack_array_sanitize"); |
338 | 0 | } |
339 | 0 | } |
340 | 0 | else { |
341 | 0 | make_null(&obj); |
342 | 0 | } |
343 | 0 | ref_assign(darr->value.refs + i, &obj); |
344 | 0 | break; |
345 | 0 | } |
346 | 0 | case t_array: |
347 | 0 | case t_shortarray: |
348 | 0 | case t_mixedarray: |
349 | 0 | { |
350 | 0 | int attrs = r_type_attrs(&obj) & (a_write | a_read | a_execute | a_executable); |
351 | | /* We only want to copy executable arrays */ |
352 | 0 | if (attrs & (a_execute | a_executable)) { |
353 | 0 | code = ialloc_ref_array(&arr2, attrs, r_size(&obj), "ref_stack_array_sanitize"); |
354 | 0 | if (code < 0) { |
355 | 0 | make_null(&arr2); |
356 | 0 | } |
357 | 0 | else { |
358 | 0 | code = ref_stack_array_sanitize(i_ctx_p, &obj, &arr2); |
359 | 0 | if (code < 0) { |
360 | 0 | ifree_ref_array(&arr2, "ref_stack_array_sanitize"); |
361 | 0 | return code; |
362 | 0 | } |
363 | 0 | } |
364 | 0 | ref_assign(darr->value.refs + i, &arr2); |
365 | 0 | } |
366 | 0 | else { |
367 | 0 | ref_assign(darr->value.refs + i, &obj); |
368 | 0 | } |
369 | 0 | break; |
370 | 0 | } |
371 | 0 | default: |
372 | 0 | ref_assign(darr->value.refs + i, &obj); |
373 | 0 | } |
374 | 0 | } |
375 | 0 | return 0; |
376 | 0 | } |
377 | | |
378 | | |
379 | | /* |
380 | | * Store the top 'count' elements of a stack, starting 'skip' elements below |
381 | | * the top, into an array, with or without store/undo checking. age=-1 for |
382 | | * no check, 0 for old, 1 for new. May return gs_error_rangecheck or |
383 | | * gs_error_invalidaccess. |
384 | | */ |
385 | | #undef idmemory /****** NOTA BENE ******/ |
386 | | int |
387 | | ref_stack_store(const ref_stack_t *pstack, ref *parray, uint count, |
388 | | uint skip, int age, bool check, gs_dual_memory_t *idmemory, |
389 | | client_name_t cname) |
390 | 82.8k | { |
391 | 82.8k | uint left, pass; |
392 | 82.8k | ref *to; |
393 | 82.8k | ref_stack_enum_t rsenum; |
394 | | |
395 | 82.8k | if (count > ref_stack_count(pstack) || count > r_size(parray)) |
396 | 0 | return_error(gs_error_rangecheck); |
397 | 82.8k | if (check) { |
398 | 1.38k | int code = ref_stack_store_check(pstack, parray, count, skip); |
399 | | |
400 | 1.38k | if (code < 0) |
401 | 0 | return code; |
402 | 1.38k | } |
403 | 82.8k | to = parray->value.refs + count; |
404 | 82.8k | left = count, pass = skip; |
405 | 82.8k | ref_stack_enum_begin(&rsenum, pstack); |
406 | 83.6k | do { |
407 | 83.6k | ref *from = rsenum.ptr; |
408 | 83.6k | uint size = rsenum.size; |
409 | | |
410 | 83.6k | if (size <= pass) |
411 | 0 | pass -= size; |
412 | 83.6k | else { |
413 | 83.6k | if (pass != 0) |
414 | 684 | size -= pass, pass = 0; |
415 | 83.6k | from += size; |
416 | 83.6k | if (size > left) |
417 | 82.1k | size = left; |
418 | 83.6k | left -= size; |
419 | 83.6k | switch (age) { |
420 | 0 | case -1: /* not an array */ |
421 | 0 | while (size--) { |
422 | 0 | from--, to--; |
423 | 0 | ref_assign(to, from); |
424 | 0 | } |
425 | 0 | break; |
426 | 2.19k | case 0: /* old array */ |
427 | 774k | while (size--) { |
428 | 771k | from--, to--; |
429 | 771k | ref_assign_old(parray, to, from, cname); |
430 | 771k | } |
431 | 2.19k | break; |
432 | 81.4k | case 1: /* new array */ |
433 | 712k | while (size--) { |
434 | 630k | from--, to--; |
435 | 630k | ref_assign_new(to, from); |
436 | 630k | } |
437 | 81.4k | break; |
438 | 83.6k | } |
439 | 83.6k | if (left == 0) |
440 | 82.8k | break; |
441 | 83.6k | } |
442 | 83.6k | } while (ref_stack_enum_next(&rsenum)); |
443 | 82.8k | r_set_size(parray, count); |
444 | 82.8k | return 0; |
445 | 82.8k | } |
446 | | |
447 | | /* |
448 | | * Pop the top N elements off a stack. |
449 | | * The number must not exceed the number of elements in use. |
450 | | */ |
451 | | void |
452 | | ref_stack_pop(ref_stack_t *pstack, uint count) |
453 | 9.41M | { |
454 | 9.41M | uint used; |
455 | | |
456 | 9.43M | while ((used = pstack->p + 1 - pstack->bot) <= count && |
457 | 9.43M | pstack->extension_used > 0) { |
458 | 20.0k | count -= used; |
459 | 20.0k | pstack->p = pstack->bot - 1; |
460 | 20.0k | ref_stack_pop_block(pstack); |
461 | 20.0k | } |
462 | 9.41M | pstack->p -= count; |
463 | 9.41M | } |
464 | | |
465 | | /* Pop the top block off a stack. May return underflow_error. */ |
466 | | int |
467 | | ref_stack_pop_block(ref_stack_t *pstack) |
468 | 20.0k | { |
469 | 20.0k | s_ptr bot = pstack->bot; |
470 | 20.0k | uint count = pstack->p + 1 - bot; |
471 | 20.0k | ref_stack_block *pcur = |
472 | 20.0k | (ref_stack_block *) pstack->current.value.refs; |
473 | 20.0k | ref_stack_block *pnext = |
474 | 20.0k | (ref_stack_block *) pcur->next.value.refs; |
475 | 20.0k | uint used; |
476 | 20.0k | ref *body; |
477 | 20.0k | ref next; |
478 | | |
479 | 20.0k | if (pnext == 0) |
480 | 0 | return_error(pstack->params->underflow_error); |
481 | 20.0k | used = r_size(&pnext->used); |
482 | 20.0k | body = (ref *) (pnext + 1) + pstack->params->bot_guard; |
483 | 20.0k | next = pcur->next; |
484 | | /* |
485 | | * If the contents of the two blocks won't fit in a single block, we |
486 | | * move up the used part of the top block, and copy up as much of |
487 | | * the contents of the next block under it as will fit. If the |
488 | | * contents of both blocks fit in a single block, we copy the used |
489 | | * part of the top block to the top of the next block, and free the |
490 | | * top block. |
491 | | */ |
492 | 20.0k | if (used + count > pstack->body_size) { |
493 | | /* |
494 | | * The contents of the two blocks won't fit into a single block. |
495 | | * On the assumption that we're recovering from a local stack |
496 | | * underflow and need to increase the number of contiguous |
497 | | * elements available, move up the used part of the top block, and |
498 | | * copy up as much of the contents of the next block under it as |
499 | | * will fit. |
500 | | */ |
501 | 0 | uint moved = pstack->body_size - count; |
502 | 0 | uint left; |
503 | |
|
504 | 0 | if (moved == 0) |
505 | 0 | return_error(gs_error_Fatal); |
506 | 0 | memmove(bot + moved, bot, count * sizeof(ref)); |
507 | 0 | left = used - moved; |
508 | 0 | memcpy(bot, body + left, moved * sizeof(ref)); |
509 | 0 | refset_null_new(body + left, moved, 0); |
510 | 0 | r_dec_size(&pnext->used, moved); |
511 | 0 | pstack->p = pstack->top; |
512 | 0 | pstack->extension_used -= moved; |
513 | 20.0k | } else { |
514 | | /* |
515 | | * The contents of the two blocks will fit into a single block. |
516 | | * Copy the used part of the top block to the top of the next |
517 | | * block, and free the top block. |
518 | | */ |
519 | 20.0k | memcpy(body + used, bot, count * sizeof(ref)); |
520 | 20.0k | pstack->bot = bot = body; |
521 | 20.0k | pstack->top = bot + pstack->body_size - 1; |
522 | 20.0k | gs_free_ref_array(pstack->memory, &pstack->current, |
523 | 20.0k | "ref_stack_pop_block"); |
524 | 20.0k | pstack->current = next; |
525 | 20.0k | pstack->p = bot + (used + count - 1); |
526 | 20.0k | pstack->extension_size -= pstack->body_size; |
527 | 20.0k | pstack->extension_used -= used; |
528 | 20.0k | } |
529 | 20.0k | return 0; |
530 | 20.0k | } |
531 | | |
532 | | /* |
533 | | * Extend a stack to recover from an overflow condition. |
534 | | * May return overflow_error or gs_error_VMerror. |
535 | | */ |
536 | | int |
537 | | ref_stack_extend(ref_stack_t *pstack, uint request) |
538 | 20.9k | { |
539 | 20.9k | uint keep = (pstack->top - pstack->bot + 1) / 3; |
540 | 20.9k | uint count = pstack->p - pstack->bot + 1; |
541 | 20.9k | const ref_stack_params_t *params = pstack->params; |
542 | | |
543 | 20.9k | if (request > params->data_size) |
544 | 0 | return_error(params->overflow_error); |
545 | 20.9k | if (keep + request > pstack->body_size) |
546 | 0 | keep = pstack->body_size - request; |
547 | 20.9k | if (keep > count) |
548 | 0 | keep = count; /* required by ref_stack_push_block */ |
549 | 20.9k | return ref_stack_push_block(pstack, keep, request); |
550 | 20.9k | } |
551 | | |
552 | | /* |
553 | | * Push N empty slots onto a stack. These slots are not initialized: |
554 | | * the caller must immediately fill them. May return overflow_error |
555 | | * (if max_stack would be exceeded, or the stack has no allocator) |
556 | | * or gs_error_VMerror. |
557 | | */ |
558 | | int |
559 | | ref_stack_push(ref_stack_t *pstack, uint count) |
560 | 4 | { |
561 | | /* Don't bother to pre-check for overflow: we must be able to */ |
562 | | /* back out in the case of a VMerror anyway, and */ |
563 | | /* ref_stack_push_block will make the check itself. */ |
564 | 4 | uint needed = count; |
565 | 4 | uint added; |
566 | | |
567 | 136 | for (; (added = pstack->top - pstack->p) < needed; needed -= added) { |
568 | 132 | int code; |
569 | | |
570 | 132 | pstack->p = pstack->top; |
571 | 132 | code = ref_stack_push_block(pstack, |
572 | 132 | (pstack->top - pstack->bot + 1) / 3, |
573 | 132 | added); |
574 | 132 | if (code < 0) { |
575 | | /* Back out. */ |
576 | 0 | ref_stack_pop(pstack, count - needed + added); |
577 | 0 | pstack->requested = count; |
578 | 0 | return code; |
579 | 0 | } |
580 | 132 | } |
581 | 4 | pstack->p += needed; |
582 | 4 | return 0; |
583 | 4 | } |
584 | | |
585 | | /* |
586 | | * Push a block onto the stack, specifying how many elements of the current |
587 | | * top block should remain in the top block and also how many elements we |
588 | | * are trying to add. Requires keep <= count. May return overflow_error or |
589 | | * gs_error_VMerror. |
590 | | */ |
591 | | static int |
592 | | ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add) |
593 | 21.1k | { |
594 | 21.1k | const ref_stack_params_t *params = pstack->params; |
595 | 21.1k | uint count = pstack->p - pstack->bot + 1; |
596 | 21.1k | uint move = count - keep; |
597 | 21.1k | ref_stack_block *pcur = (ref_stack_block *) pstack->current.value.refs; |
598 | 21.1k | ref next; |
599 | 21.1k | ref_stack_block *pnext; |
600 | 21.1k | ref *body; |
601 | 21.1k | int code; |
602 | | |
603 | 21.1k | if (keep > count) |
604 | 0 | return_error(gs_error_Fatal); |
605 | | /* Check for overflowing the maximum size, */ |
606 | | /* or expansion not allowed. */ |
607 | | /* Or specifically allowing unlimited expansion */ |
608 | 21.1k | if (pstack->max_stack.value.intval > 0) { |
609 | 21.1k | if (pstack->extension_used + (pstack->top - pstack->bot) + add >= |
610 | 21.1k | pstack->max_stack.value.intval || |
611 | 21.1k | !params->allow_expansion |
612 | 21.1k | ) |
613 | 0 | return_error(params->overflow_error); |
614 | 21.1k | } |
615 | 21.1k | code = gs_alloc_ref_array(pstack->memory, &next, 0, |
616 | 21.1k | params->block_size, "ref_stack_push_block"); |
617 | 21.1k | if (code < 0) |
618 | 0 | return code; |
619 | 21.1k | pnext = (ref_stack_block *) next.value.refs; |
620 | 21.1k | body = (ref *) (pnext + 1); |
621 | | /* Copy the top keep elements into the new block, */ |
622 | | /* and make the new block the top block. */ |
623 | 21.1k | init_block(pstack, &next, keep); |
624 | 21.1k | body += params->bot_guard; |
625 | 21.1k | memcpy(body, pstack->bot + move, keep * sizeof(ref)); |
626 | | /* Clear the elements above the top of the new block. */ |
627 | 21.1k | refset_null_new(body + keep, params->data_size - keep, 0); |
628 | | /* Clear the elements above the top of the old block. */ |
629 | 21.1k | refset_null_new(pstack->bot + move, keep, 0); |
630 | 21.1k | pnext->next = pstack->current; |
631 | 21.1k | pcur->used.value.refs = pstack->bot; |
632 | 21.1k | r_set_size(&pcur->used, move); |
633 | 21.1k | pstack->current = next; |
634 | 21.1k | pstack->bot = body; |
635 | 21.1k | pstack->top = pstack->bot + pstack->body_size - 1; |
636 | 21.1k | pstack->p = pstack->bot + keep - 1; |
637 | 21.1k | pstack->extension_size += pstack->body_size; |
638 | 21.1k | pstack->extension_used += move; |
639 | 21.1k | return 0; |
640 | 21.1k | } |
641 | | |
642 | | /* Begin enumerating the blocks of a stack. */ |
643 | | void |
644 | | ref_stack_enum_begin(ref_stack_enum_t *prse, const ref_stack_t *pstack) |
645 | 5.84M | { |
646 | 5.84M | prse->block = (ref_stack_block *)pstack->current.value.refs; |
647 | 5.84M | prse->ptr = pstack->bot; |
648 | 5.84M | prse->size = pstack->p + 1 - pstack->bot; |
649 | 5.84M | } |
650 | | |
651 | | bool |
652 | | ref_stack_enum_next(ref_stack_enum_t *prse) |
653 | 62.9k | { |
654 | 62.9k | ref_stack_block *block = |
655 | 62.9k | prse->block = (ref_stack_block *)prse->block->next.value.refs; |
656 | | |
657 | 62.9k | if (block == 0) |
658 | 41.6k | return false; |
659 | 21.3k | prse->ptr = block->used.value.refs; |
660 | 21.3k | prse->size = r_size(&block->used); |
661 | 21.3k | return true; |
662 | 62.9k | } |
663 | | |
664 | | /* Clean up a stack for garbage collection. */ |
665 | | void |
666 | | ref_stack_cleanup(ref_stack_t *pstack) |
667 | 4.20k | { |
668 | 4.20k | ref_stack_block *pblock = |
669 | 4.20k | (ref_stack_block *) pstack->current.value.refs; |
670 | | |
671 | 4.20k | refset_null_new(pstack->p + 1, pstack->top - pstack->p, 0); |
672 | 4.20k | pblock->used = pstack->current; /* set attrs */ |
673 | 4.20k | pblock->used.value.refs = pstack->bot; |
674 | 4.20k | r_set_size(&pblock->used, pstack->p + 1 - pstack->bot); |
675 | 4.20k | } |
676 | | |
677 | | /* |
678 | | * Free the entire contents of a stack, including the bottom block. |
679 | | * The client must still call ref_stack_free. Note that after calling |
680 | | * ref_stack_release, the stack is no longer usable. |
681 | | */ |
682 | | void |
683 | | ref_stack_release(ref_stack_t *pstack) |
684 | 0 | { |
685 | 0 | gs_ref_memory_t *mem = pstack->memory; |
686 | |
|
687 | 0 | ref_stack_clear(pstack); |
688 | | /* Free the parameter structure. */ |
689 | 0 | gs_free_object((gs_memory_t *)mem, pstack->params, |
690 | 0 | "ref_stack_release(stack.params)"); |
691 | | /* Free the original (bottom) block. */ |
692 | 0 | gs_free_ref_array(mem, &pstack->current, "ref_stack_release"); |
693 | 0 | } |
694 | | |
695 | | /* |
696 | | * Release a stack and then free the ref_stack object. |
697 | | */ |
698 | | void |
699 | | ref_stack_free(ref_stack_t *pstack) |
700 | 0 | { |
701 | 0 | gs_memory_t *mem = (gs_memory_t *)pstack->memory; |
702 | |
|
703 | 0 | ref_stack_release(pstack); |
704 | 0 | gs_free_object(mem, pstack, "ref_stack_free"); |
705 | 0 | } |
706 | | |
707 | | /* ------ Internal routines ------ */ |
708 | | |
709 | | /* Initialize the guards and body of a stack block. */ |
710 | | static void |
711 | | init_block(ref_stack_t *pstack, const ref *psb, uint used) |
712 | 23.1k | { |
713 | 23.1k | ref_stack_params_t *params = pstack->params; |
714 | 23.1k | ref *brefs = psb->value.refs; |
715 | 23.1k | uint i; |
716 | 23.1k | ref *p; |
717 | | |
718 | 23.1k | for (i = params->bot_guard, p = brefs + stack_block_refs; |
719 | 241k | i != 0; i--, p++ |
720 | 23.1k | ) |
721 | 218k | ref_assign(p, ¶ms->guard_value); |
722 | | /* The top guard elements will never be read, */ |
723 | | /* but we need to initialize them for the sake of the GC. */ |
724 | | /* We can use refset_null for this, because even though it uses */ |
725 | | /* make_null_new and stack elements must not be marked new, */ |
726 | | /* these slots will never actually be read or written. */ |
727 | 23.1k | if (params->top_guard) { |
728 | 22.4k | ref *top = brefs + r_size(psb); |
729 | 22.4k | int top_guard = params->top_guard; |
730 | | |
731 | 22.4k | refset_null_new(top - top_guard, top_guard, 0); |
732 | 22.4k | } { |
733 | 23.1k | ref_stack_block *const pblock = (ref_stack_block *) brefs; |
734 | | |
735 | 23.1k | pblock->used = *psb; |
736 | 23.1k | pblock->used.value.refs = brefs + stack_block_refs + params->bot_guard; |
737 | 23.1k | r_set_size(&pblock->used, 0); |
738 | 23.1k | } |
739 | 23.1k | } |