/src/ghostpdl/psi/istack.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2021 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 1305 Grant Avenue - Suite 200, Novato, |
13 | | CA 94945, U.S.A., +1(415)492-9861, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Manager for expandable stacks of refs */ |
18 | | #include "memory_.h" |
19 | | #include "ghost.h" |
20 | | #include "gsstruct.h" |
21 | | #include "gsutil.h" |
22 | | #include "ierrors.h" |
23 | | #include "ialloc.h" |
24 | | #include "istack.h" |
25 | | #include "istkparm.h" |
26 | | #include "istruct.h" /* for RELOC_REF_VAR */ |
27 | | #include "iutil.h" |
28 | | #include "ivmspace.h" /* for local/global test */ |
29 | | #include "store.h" |
30 | | #include "icstate.h" |
31 | | #include "iname.h" |
32 | | #include "dstack.h" |
33 | | #include "idict.h" |
34 | | |
35 | | /* Forward references */ |
36 | | static void init_block(ref_stack_t *pstack, const ref *pblock_array, |
37 | | uint used); |
38 | | static int ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add); |
39 | | |
40 | | /* GC descriptors and procedures */ |
41 | | private_st_ref_stack_params(); |
42 | | static |
43 | | CLEAR_MARKS_PROC(ref_stack_clear_marks) |
44 | 0 | { |
45 | 0 | ref_stack_t *const sptr = vptr; |
46 | |
|
47 | 0 | r_clear_attrs(&sptr->current, l_mark); |
48 | 0 | } |
49 | | static |
50 | 2.55M | ENUM_PTRS_WITH(ref_stack_enum_ptrs, ref_stack_t *sptr) return 0; |
51 | 1.09M | case 0: ENUM_RETURN_REF(&sptr->current); |
52 | 1.09M | case 1: return ENUM_OBJ(sptr->params); |
53 | 2.55M | ENUM_PTRS_END |
54 | 547k | static RELOC_PTRS_WITH(ref_stack_reloc_ptrs, ref_stack_t *sptr) |
55 | 547k | { |
56 | | /* Note that the relocation must be a multiple of sizeof(ref_packed) */ |
57 | | /* * align_packed_per_ref, but it need not be a multiple of */ |
58 | | /* sizeof(ref). Therefore, we must do the adjustments using */ |
59 | | /* ref_packed pointers rather than ref pointers. */ |
60 | 547k | ref_packed *bot = (ref_packed *) sptr->current.value.refs; |
61 | 547k | long reloc; |
62 | | |
63 | 547k | RELOC_REF_VAR(sptr->current); |
64 | 547k | r_clear_attrs(&sptr->current, l_mark); |
65 | 547k | reloc = bot - (ref_packed *) sptr->current.value.refs; |
66 | 547k | #define RELOC_P(p)\ |
67 | 1.64M | sptr->p = (ref *)((ref_packed *)sptr->p - reloc); |
68 | 547k | RELOC_P(p); |
69 | 547k | RELOC_P(bot); |
70 | 547k | RELOC_P(top); |
71 | 547k | #undef RELOC_P |
72 | 547k | RELOC_OBJ_VAR(sptr->params); |
73 | 547k | } RELOC_PTRS_END |
74 | | /* Structure type for a ref_stack. */ |
75 | | public_st_ref_stack(); |
76 | | |
77 | | /* Initialize a stack. */ |
78 | | int |
79 | | ref_stack_init(ref_stack_t *pstack, const ref *pblock_array, |
80 | | uint bot_guard, uint top_guard, const ref *pguard_value, |
81 | | gs_ref_memory_t *mem, ref_stack_params_t *params) |
82 | 267k | { |
83 | 267k | uint size = r_size(pblock_array); |
84 | 267k | uint avail = size - (stack_block_refs + bot_guard + top_guard); |
85 | 267k | ref_stack_block *pblock = (ref_stack_block *)pblock_array->value.refs; |
86 | 267k | s_ptr body = (s_ptr)(pblock + 1); |
87 | | |
88 | 267k | if (params == 0) { |
89 | 267k | params = gs_alloc_struct((gs_memory_t *)mem, ref_stack_params_t, |
90 | 267k | &st_ref_stack_params, |
91 | 267k | "ref_stack_alloc(stack.params)"); |
92 | 267k | if (params == 0) |
93 | 0 | return_error(-1); /* avoid binding in any error codes */ |
94 | 267k | } |
95 | | |
96 | 267k | pstack->bot = body + bot_guard; |
97 | 267k | pstack->p = pstack->bot - 1; |
98 | 267k | pstack->top = pstack->p + avail; |
99 | 267k | pstack->current = *pblock_array; |
100 | 267k | pstack->extension_size = 0; |
101 | 267k | pstack->extension_used = 0; |
102 | | |
103 | 267k | make_int(&pstack->max_stack, avail); |
104 | 267k | pstack->requested = 0; |
105 | 267k | pstack->margin = 0; |
106 | 267k | pstack->body_size = avail; |
107 | | |
108 | 267k | pstack->params = params; |
109 | 267k | pstack->memory = mem; |
110 | | |
111 | 267k | params->bot_guard = bot_guard; |
112 | 267k | params->top_guard = top_guard; |
113 | 267k | params->block_size = size; |
114 | 267k | params->data_size = avail; |
115 | 267k | if (pguard_value != 0) |
116 | 89.2k | params->guard_value = *pguard_value; |
117 | 178k | else |
118 | 178k | make_tav(¶ms->guard_value, t__invalid, 0, intval, 0); |
119 | 267k | params->underflow_error = -1; |
120 | 267k | params->overflow_error = -1; |
121 | 267k | params->allow_expansion = true; |
122 | 267k | init_block(pstack, pblock_array, 0); |
123 | 267k | refset_null_new(pstack->bot, avail, 0); |
124 | 267k | make_empty_array(&pblock->next, 0); |
125 | 267k | return 0; |
126 | 267k | } |
127 | | |
128 | | /* Set whether a stack is allowed to expand. The initial value is true. */ |
129 | | void |
130 | | ref_stack_allow_expansion(ref_stack_t *pstack, bool expand) |
131 | 89.2k | { |
132 | 89.2k | pstack->params->allow_expansion = expand; |
133 | 89.2k | } |
134 | | |
135 | | /* Set the error codes for under- and overflow. The initial values are -1. */ |
136 | | void |
137 | | ref_stack_set_error_codes(ref_stack_t *pstack, int underflow_error, |
138 | | int overflow_error) |
139 | 267k | { |
140 | 267k | pstack->params->underflow_error = underflow_error; |
141 | 267k | pstack->params->overflow_error = overflow_error; |
142 | 267k | } |
143 | | |
144 | | /* Set the maximum number of elements allowed on a stack. */ |
145 | | int |
146 | | ref_stack_set_max_count(ref_stack_t *pstack, long nmax) |
147 | 1.29M | { |
148 | 1.29M | long nmin; |
149 | | |
150 | | /* Bypass checking if we're setting the amximum to -1 'no limits' */ |
151 | 1.29M | if (nmax == -1) { |
152 | 0 | pstack->max_stack.value.intval = nmax; |
153 | 0 | return 0; |
154 | 0 | } |
155 | | |
156 | | /* check how many items we already have on the stack, don't allow |
157 | | * a maximum less than this. |
158 | | */ |
159 | 1.29M | nmin = ref_stack_count_inline(pstack); |
160 | | |
161 | 1.29M | if (nmax < nmin) |
162 | 0 | nmax = nmin; |
163 | 1.29M | if (nmax > max_uint / sizeof(ref)) |
164 | 0 | nmax = max_uint / sizeof(ref); |
165 | 1.29M | if (!pstack->params->allow_expansion) { |
166 | 433k | uint ncur = pstack->body_size; |
167 | | |
168 | 433k | if (nmax > ncur) |
169 | 0 | nmax = ncur; |
170 | 433k | } |
171 | 1.29M | pstack->max_stack.value.intval = nmax; |
172 | 1.29M | return 0; |
173 | 1.29M | } |
174 | | |
175 | | /* |
176 | | * Set the margin between the limit and the top of the stack. |
177 | | * Note that this may require allocating a block. |
178 | | */ |
179 | | int |
180 | | ref_stack_set_margin(ref_stack_t *pstack, uint margin) |
181 | 0 | { |
182 | 0 | const ref_stack_params_t *params = pstack->params; |
183 | 0 | uint data_size = params->data_size; |
184 | |
|
185 | 0 | if (margin <= pstack->margin) { |
186 | 0 | refset_null_new(pstack->top + 1, pstack->margin - margin, 0); |
187 | 0 | } else { |
188 | 0 | if (margin > data_size >> 1) |
189 | 0 | return_error(gs_error_rangecheck); |
190 | 0 | if (pstack->top - pstack->p < margin) { |
191 | 0 | uint used = pstack->p + 1 - pstack->bot; |
192 | 0 | uint keep = data_size - margin; |
193 | 0 | int code = ref_stack_push_block(pstack, keep, used - keep); |
194 | |
|
195 | 0 | if (code < 0) |
196 | 0 | return code; |
197 | 0 | } |
198 | 0 | } |
199 | 0 | pstack->margin = margin; |
200 | 0 | pstack->body_size = data_size - margin; |
201 | 0 | pstack->top = pstack->bot + pstack->body_size - 1; |
202 | 0 | return 0; |
203 | 0 | } |
204 | | |
205 | | /* Return the number of elements on a stack. */ |
206 | | uint |
207 | | ref_stack_count(const ref_stack_t *pstack) |
208 | 113M | { |
209 | 113M | return ref_stack_count_inline(pstack); |
210 | 113M | } |
211 | | |
212 | | /* |
213 | | * Return a pointer to a given element from the stack, counting from |
214 | | * 0 as the top element. If the index is out of range, return 0. |
215 | | */ |
216 | | ref * |
217 | | ref_stack_index(const ref_stack_t *pstack, long idx) |
218 | 17.8G | { |
219 | 17.8G | ref_stack_block *pblock; |
220 | 17.8G | uint used = pstack->p + 1 - pstack->bot; |
221 | | |
222 | 17.8G | if (idx < 0) |
223 | 0 | return NULL; |
224 | 17.8G | if (idx < used) /* common case */ |
225 | 16.5G | return pstack->p - (uint) idx; |
226 | 1.36G | pblock = (ref_stack_block *) pstack->current.value.refs; |
227 | 13.0G | do { |
228 | 13.0G | pblock = (ref_stack_block *) pblock->next.value.refs; |
229 | 13.0G | if (pblock == 0) |
230 | 1.42M | return NULL; |
231 | 13.0G | idx -= used; |
232 | 13.0G | used = r_size(&pblock->used); |
233 | 13.0G | } while (idx >= used); |
234 | 1.36G | return pblock->used.value.refs + (used - 1 - (uint) idx); |
235 | 1.36G | } |
236 | | |
237 | | /* |
238 | | * Count the number of elements down to and including the first mark. |
239 | | * If no mark is found, return 0. |
240 | | */ |
241 | | uint |
242 | | ref_stack_counttomark(const ref_stack_t *pstack) |
243 | 645M | { |
244 | 645M | uint scanned = 0; |
245 | 645M | ref_stack_enum_t rsenum; |
246 | | |
247 | 645M | ref_stack_enum_begin(&rsenum, pstack); |
248 | 648M | do { |
249 | 648M | uint count = rsenum.size; |
250 | 648M | const ref *p = rsenum.ptr + count - 1; |
251 | | |
252 | 4.48G | for (; count; count--, p--) |
253 | 4.48G | if (r_has_type(p, t_mark)) |
254 | 645M | return scanned + (rsenum.size - count + 1); |
255 | 2.59M | scanned += rsenum.size; |
256 | 2.59M | } while (ref_stack_enum_next(&rsenum)); |
257 | 253 | return 0; |
258 | 645M | } |
259 | | |
260 | | /* |
261 | | * Do the store check for storing 'count' elements of a stack, starting |
262 | | * 'skip' elements below the top, into an array. Return 0 or gs_error_invalidaccess. |
263 | | */ |
264 | | int |
265 | | ref_stack_store_check(const ref_stack_t *pstack, ref *parray, uint count, |
266 | | uint skip) |
267 | 374k | { |
268 | 374k | uint space = r_space(parray); |
269 | | |
270 | 374k | if (space != avm_local) { |
271 | 89.2k | uint left = count, pass = skip; |
272 | 89.2k | ref_stack_enum_t rsenum; |
273 | | |
274 | 89.2k | ref_stack_enum_begin(&rsenum, pstack); |
275 | 178k | do { |
276 | 178k | ref *ptr = rsenum.ptr; |
277 | 178k | uint size = rsenum.size; |
278 | | |
279 | 178k | if (size <= pass) |
280 | 0 | pass -= size; |
281 | 178k | else { |
282 | 178k | int code; |
283 | | |
284 | 178k | if (pass != 0) |
285 | 89.2k | size -= pass, pass = 0; |
286 | 178k | ptr += size; |
287 | 178k | if (size > left) |
288 | 89.2k | size = left; |
289 | 178k | left -= size; |
290 | 178k | code = refs_check_space(ptr - size, size, space); |
291 | 178k | if (code < 0) |
292 | 0 | return code; |
293 | 178k | if (left == 0) |
294 | 89.2k | break; |
295 | 178k | } |
296 | 178k | } while (ref_stack_enum_next(&rsenum)); |
297 | 89.2k | } |
298 | 374k | return 0; |
299 | 374k | } |
300 | | |
301 | | int |
302 | | ref_stack_array_sanitize(i_ctx_t *i_ctx_p, ref *sarr, ref *darr) |
303 | 0 | { |
304 | 0 | int i, code; |
305 | 0 | ref obj, arr2; |
306 | 0 | ref *pobj2; |
307 | 0 | gs_memory_t *mem = (gs_memory_t *)idmemory->current; |
308 | |
|
309 | 0 | if (!r_is_array(sarr) || !r_has_type(darr, t_array)) |
310 | 0 | return_error(gs_error_typecheck); |
311 | | |
312 | 0 | for (i = 0; i < r_size(sarr); i++) { |
313 | 0 | code = array_get(mem, sarr, i, &obj); |
314 | 0 | if (code < 0) |
315 | 0 | make_null(&obj); |
316 | 0 | switch(r_type(&obj)) { |
317 | 0 | case t_operator: |
318 | 0 | { |
319 | 0 | int index = op_index(&obj); |
320 | |
|
321 | 0 | if (index > 0 && index < op_def_count) { |
322 | 0 | const byte *data = (const byte *)(op_index_def(index)->oname + 1); |
323 | 0 | if (dict_find_string(systemdict, (const char *)data, &pobj2) <= 0) { |
324 | 0 | byte *s = gs_alloc_bytes(mem, strlen((char *)data) + 5, "ref_stack_array_sanitize"); |
325 | 0 | if (s) { |
326 | 0 | s[0] = '\0'; |
327 | 0 | strcpy((char *)s, "--"); |
328 | 0 | strcpy((char *)s + 2, (char *)data); |
329 | 0 | strcpy((char *)s + strlen((char *)data) + 2, "--"); |
330 | 0 | } |
331 | 0 | else { |
332 | 0 | s = (byte *)data; |
333 | 0 | } |
334 | 0 | code = name_ref(imemory, s, strlen((char *)s), &obj, 1); |
335 | 0 | if (code < 0) make_null(&obj); |
336 | 0 | if (s != data) |
337 | 0 | gs_free_object(mem, s, "ref_stack_array_sanitize"); |
338 | 0 | } |
339 | 0 | } |
340 | 0 | else { |
341 | 0 | make_null(&obj); |
342 | 0 | } |
343 | 0 | ref_assign(darr->value.refs + i, &obj); |
344 | 0 | break; |
345 | 0 | } |
346 | 0 | case t_array: |
347 | 0 | case t_shortarray: |
348 | 0 | case t_mixedarray: |
349 | 0 | { |
350 | 0 | int attrs = r_type_attrs(&obj) & (a_write | a_read | a_execute | a_executable); |
351 | | /* We only want to copy executable arrays */ |
352 | 0 | if (attrs & (a_execute | a_executable)) { |
353 | 0 | code = ialloc_ref_array(&arr2, attrs, r_size(&obj), "ref_stack_array_sanitize"); |
354 | 0 | if (code < 0) { |
355 | 0 | make_null(&arr2); |
356 | 0 | } |
357 | 0 | else { |
358 | 0 | code = ref_stack_array_sanitize(i_ctx_p, &obj, &arr2); |
359 | 0 | if (code < 0) { |
360 | 0 | ifree_ref_array(&arr2, "ref_stack_array_sanitize"); |
361 | 0 | return code; |
362 | 0 | } |
363 | 0 | } |
364 | 0 | ref_assign(darr->value.refs + i, &arr2); |
365 | 0 | } |
366 | 0 | else { |
367 | 0 | ref_assign(darr->value.refs + i, &obj); |
368 | 0 | } |
369 | 0 | break; |
370 | 0 | } |
371 | 0 | default: |
372 | 0 | ref_assign(darr->value.refs + i, &obj); |
373 | 0 | } |
374 | 0 | } |
375 | 0 | return 0; |
376 | 0 | } |
377 | | |
378 | | |
379 | | /* |
380 | | * Store the top 'count' elements of a stack, starting 'skip' elements below |
381 | | * the top, into an array, with or without store/undo checking. age=-1 for |
382 | | * no check, 0 for old, 1 for new. May return gs_error_rangecheck or |
383 | | * gs_error_invalidaccess. |
384 | | */ |
385 | | #undef idmemory /****** NOTA BENE ******/ |
386 | | int |
387 | | ref_stack_store(const ref_stack_t *pstack, ref *parray, uint count, |
388 | | uint skip, int age, bool check, gs_dual_memory_t *idmemory, |
389 | | client_name_t cname) |
390 | 13.1M | { |
391 | 13.1M | uint left, pass; |
392 | 13.1M | ref *to; |
393 | 13.1M | ref_stack_enum_t rsenum; |
394 | | |
395 | 13.1M | if (count > ref_stack_count(pstack) || count > r_size(parray)) |
396 | 0 | return_error(gs_error_rangecheck); |
397 | 13.1M | if (check) { |
398 | 228k | int code = ref_stack_store_check(pstack, parray, count, skip); |
399 | | |
400 | 228k | if (code < 0) |
401 | 0 | return code; |
402 | 228k | } |
403 | 13.1M | to = parray->value.refs + count; |
404 | 13.1M | left = count, pass = skip; |
405 | 13.1M | ref_stack_enum_begin(&rsenum, pstack); |
406 | 13.3M | do { |
407 | 13.3M | ref *from = rsenum.ptr; |
408 | 13.3M | uint size = rsenum.size; |
409 | | |
410 | 13.3M | if (size <= pass) |
411 | 0 | pass -= size; |
412 | 13.3M | else { |
413 | 13.3M | if (pass != 0) |
414 | 91.1k | size -= pass, pass = 0; |
415 | 13.3M | from += size; |
416 | 13.3M | if (size > left) |
417 | 13.0M | size = left; |
418 | 13.3M | left -= size; |
419 | 13.3M | switch (age) { |
420 | 0 | case -1: /* not an array */ |
421 | 0 | while (size--) { |
422 | 0 | from--, to--; |
423 | 0 | ref_assign(to, from); |
424 | 0 | } |
425 | 0 | break; |
426 | 380k | case 0: /* old array */ |
427 | 126M | while (size--) { |
428 | 126M | from--, to--; |
429 | 126M | ref_assign_old(parray, to, from, cname); |
430 | 126M | } |
431 | 380k | break; |
432 | 12.9M | case 1: /* new array */ |
433 | 106M | while (size--) { |
434 | 94.0M | from--, to--; |
435 | 94.0M | ref_assign_new(to, from); |
436 | 94.0M | } |
437 | 12.9M | break; |
438 | 13.3M | } |
439 | 13.3M | if (left == 0) |
440 | 13.1M | break; |
441 | 13.3M | } |
442 | 13.3M | } while (ref_stack_enum_next(&rsenum)); |
443 | 13.1M | r_set_size(parray, count); |
444 | 13.1M | return 0; |
445 | 13.1M | } |
446 | | |
447 | | /* |
448 | | * Pop the top N elements off a stack. |
449 | | * The number must not exceed the number of elements in use. |
450 | | */ |
451 | | void |
452 | | ref_stack_pop(ref_stack_t *pstack, uint count) |
453 | 1.24G | { |
454 | 1.24G | uint used; |
455 | | |
456 | 1.24G | while ((used = pstack->p + 1 - pstack->bot) <= count && |
457 | 1.24G | pstack->extension_used > 0) { |
458 | 2.72M | count -= used; |
459 | 2.72M | pstack->p = pstack->bot - 1; |
460 | 2.72M | ref_stack_pop_block(pstack); |
461 | 2.72M | } |
462 | 1.24G | pstack->p -= count; |
463 | 1.24G | } |
464 | | |
465 | | /* Pop the top block off a stack. May return underflow_error. */ |
466 | | int |
467 | | ref_stack_pop_block(ref_stack_t *pstack) |
468 | 2.74M | { |
469 | 2.74M | s_ptr bot = pstack->bot; |
470 | 2.74M | uint count = pstack->p + 1 - bot; |
471 | 2.74M | ref_stack_block *pcur = |
472 | 2.74M | (ref_stack_block *) pstack->current.value.refs; |
473 | 2.74M | ref_stack_block *pnext = |
474 | 2.74M | (ref_stack_block *) pcur->next.value.refs; |
475 | 2.74M | uint used; |
476 | 2.74M | ref *body; |
477 | 2.74M | ref next; |
478 | | |
479 | 2.74M | if (pnext == 0) |
480 | 2.53k | return_error(pstack->params->underflow_error); |
481 | 2.74M | used = r_size(&pnext->used); |
482 | 2.74M | body = (ref *) (pnext + 1) + pstack->params->bot_guard; |
483 | 2.74M | next = pcur->next; |
484 | | /* |
485 | | * If the contents of the two blocks won't fit in a single block, we |
486 | | * move up the used part of the top block, and copy up as much of |
487 | | * the contents of the next block under it as will fit. If the |
488 | | * contents of both blocks fit in a single block, we copy the used |
489 | | * part of the top block to the top of the next block, and free the |
490 | | * top block. |
491 | | */ |
492 | 2.74M | if (used + count > pstack->body_size) { |
493 | | /* |
494 | | * The contents of the two blocks won't fit into a single block. |
495 | | * On the assumption that we're recovering from a local stack |
496 | | * underflow and need to increase the number of contiguous |
497 | | * elements available, move up the used part of the top block, and |
498 | | * copy up as much of the contents of the next block under it as |
499 | | * will fit. |
500 | | */ |
501 | 3.12k | uint moved = pstack->body_size - count; |
502 | 3.12k | uint left; |
503 | | |
504 | 3.12k | if (moved == 0) |
505 | 0 | return_error(gs_error_Fatal); |
506 | 3.12k | memmove(bot + moved, bot, count * sizeof(ref)); |
507 | 3.12k | left = used - moved; |
508 | 3.12k | memcpy(bot, body + left, moved * sizeof(ref)); |
509 | 3.12k | refset_null_new(body + left, moved, 0); |
510 | 3.12k | r_dec_size(&pnext->used, moved); |
511 | 3.12k | pstack->p = pstack->top; |
512 | 3.12k | pstack->extension_used -= moved; |
513 | 2.73M | } else { |
514 | | /* |
515 | | * The contents of the two blocks will fit into a single block. |
516 | | * Copy the used part of the top block to the top of the next |
517 | | * block, and free the top block. |
518 | | */ |
519 | 2.73M | memcpy(body + used, bot, count * sizeof(ref)); |
520 | 2.73M | pstack->bot = bot = body; |
521 | 2.73M | pstack->top = bot + pstack->body_size - 1; |
522 | 2.73M | gs_free_ref_array(pstack->memory, &pstack->current, |
523 | 2.73M | "ref_stack_pop_block"); |
524 | 2.73M | pstack->current = next; |
525 | 2.73M | pstack->p = bot + (used + count - 1); |
526 | 2.73M | pstack->extension_size -= pstack->body_size; |
527 | 2.73M | pstack->extension_used -= used; |
528 | 2.73M | } |
529 | 2.74M | return 0; |
530 | 2.74M | } |
531 | | |
532 | | /* |
533 | | * Extend a stack to recover from an overflow condition. |
534 | | * May return overflow_error or gs_error_VMerror. |
535 | | */ |
536 | | int |
537 | | ref_stack_extend(ref_stack_t *pstack, uint request) |
538 | 2.66M | { |
539 | 2.66M | uint keep = (pstack->top - pstack->bot + 1) / 3; |
540 | 2.66M | uint count = pstack->p - pstack->bot + 1; |
541 | 2.66M | const ref_stack_params_t *params = pstack->params; |
542 | | |
543 | 2.66M | if (request > params->data_size) |
544 | 0 | return_error(params->overflow_error); |
545 | 2.66M | if (keep + request > pstack->body_size) |
546 | 2 | keep = pstack->body_size - request; |
547 | 2.66M | if (keep > count) |
548 | 0 | keep = count; /* required by ref_stack_push_block */ |
549 | 2.66M | return ref_stack_push_block(pstack, keep, request); |
550 | 2.66M | } |
551 | | |
552 | | /* |
553 | | * Push N empty slots onto a stack. These slots are not initialized: |
554 | | * the caller must immediately fill them. May return overflow_error |
555 | | * (if max_stack would be exceeded, or the stack has no allocator) |
556 | | * or gs_error_VMerror. |
557 | | */ |
558 | | int |
559 | | ref_stack_push(ref_stack_t *pstack, uint count) |
560 | 45.2k | { |
561 | | /* Don't bother to pre-check for overflow: we must be able to */ |
562 | | /* back out in the case of a VMerror anyway, and */ |
563 | | /* ref_stack_push_block will make the check itself. */ |
564 | 45.2k | uint needed = count; |
565 | 45.2k | uint added; |
566 | | |
567 | 150k | for (; (added = pstack->top - pstack->p) < needed; needed -= added) { |
568 | 105k | int code; |
569 | | |
570 | 105k | pstack->p = pstack->top; |
571 | 105k | code = ref_stack_push_block(pstack, |
572 | 105k | (pstack->top - pstack->bot + 1) / 3, |
573 | 105k | added); |
574 | 105k | if (code < 0) { |
575 | | /* Back out. */ |
576 | 7 | ref_stack_pop(pstack, count - needed + added); |
577 | 7 | pstack->requested = count; |
578 | 7 | return code; |
579 | 7 | } |
580 | 105k | } |
581 | 45.2k | pstack->p += needed; |
582 | 45.2k | return 0; |
583 | 45.2k | } |
584 | | |
585 | | /* |
586 | | * Push a block onto the stack, specifying how many elements of the current |
587 | | * top block should remain in the top block and also how many elements we |
588 | | * are trying to add. Requires keep <= count. May return overflow_error or |
589 | | * gs_error_VMerror. |
590 | | */ |
591 | | static int |
592 | | ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add) |
593 | 2.76M | { |
594 | 2.76M | const ref_stack_params_t *params = pstack->params; |
595 | 2.76M | uint count = pstack->p - pstack->bot + 1; |
596 | 2.76M | uint move = count - keep; |
597 | 2.76M | ref_stack_block *pcur = (ref_stack_block *) pstack->current.value.refs; |
598 | 2.76M | ref next; |
599 | 2.76M | ref_stack_block *pnext; |
600 | 2.76M | ref *body; |
601 | 2.76M | int code; |
602 | | |
603 | 2.76M | if (keep > count) |
604 | 0 | return_error(gs_error_Fatal); |
605 | | /* Check for overflowing the maximum size, */ |
606 | | /* or expansion not allowed. */ |
607 | | /* Or specifically allowing unlimited expansion */ |
608 | 2.76M | if (pstack->max_stack.value.intval > 0) { |
609 | 2.76M | if (pstack->extension_used + (pstack->top - pstack->bot) + add >= |
610 | 2.76M | pstack->max_stack.value.intval || |
611 | 2.76M | !params->allow_expansion |
612 | 2.76M | ) |
613 | 28 | return_error(params->overflow_error); |
614 | 2.76M | } |
615 | 2.76M | code = gs_alloc_ref_array(pstack->memory, &next, 0, |
616 | 2.76M | params->block_size, "ref_stack_push_block"); |
617 | 2.76M | if (code < 0) |
618 | 0 | return code; |
619 | 2.76M | pnext = (ref_stack_block *) next.value.refs; |
620 | 2.76M | body = (ref *) (pnext + 1); |
621 | | /* Copy the top keep elements into the new block, */ |
622 | | /* and make the new block the top block. */ |
623 | 2.76M | init_block(pstack, &next, keep); |
624 | 2.76M | body += params->bot_guard; |
625 | 2.76M | memcpy(body, pstack->bot + move, keep * sizeof(ref)); |
626 | | /* Clear the elements above the top of the new block. */ |
627 | 2.76M | refset_null_new(body + keep, params->data_size - keep, 0); |
628 | | /* Clear the elements above the top of the old block. */ |
629 | 2.76M | refset_null_new(pstack->bot + move, keep, 0); |
630 | 2.76M | pnext->next = pstack->current; |
631 | 2.76M | pcur->used.value.refs = pstack->bot; |
632 | 2.76M | r_set_size(&pcur->used, move); |
633 | 2.76M | pstack->current = next; |
634 | 2.76M | pstack->bot = body; |
635 | 2.76M | pstack->top = pstack->bot + pstack->body_size - 1; |
636 | 2.76M | pstack->p = pstack->bot + keep - 1; |
637 | 2.76M | pstack->extension_size += pstack->body_size; |
638 | 2.76M | pstack->extension_used += move; |
639 | 2.76M | return 0; |
640 | 2.76M | } |
641 | | |
642 | | /* Begin enumerating the blocks of a stack. */ |
643 | | void |
644 | | ref_stack_enum_begin(ref_stack_enum_t *prse, const ref_stack_t *pstack) |
645 | 779M | { |
646 | 779M | prse->block = (ref_stack_block *)pstack->current.value.refs; |
647 | 779M | prse->ptr = pstack->bot; |
648 | 779M | prse->size = pstack->p + 1 - pstack->bot; |
649 | 779M | } |
650 | | |
651 | | bool |
652 | | ref_stack_enum_next(ref_stack_enum_t *prse) |
653 | 6.94M | { |
654 | 6.94M | ref_stack_block *block = |
655 | 6.94M | prse->block = (ref_stack_block *)prse->block->next.value.refs; |
656 | | |
657 | 6.94M | if (block == 0) |
658 | 4.09M | return false; |
659 | 2.84M | prse->ptr = block->used.value.refs; |
660 | 2.84M | prse->size = r_size(&block->used); |
661 | 2.84M | return true; |
662 | 6.94M | } |
663 | | |
664 | | /* Clean up a stack for garbage collection. */ |
665 | | void |
666 | | ref_stack_cleanup(ref_stack_t *pstack) |
667 | 547k | { |
668 | 547k | ref_stack_block *pblock = |
669 | 547k | (ref_stack_block *) pstack->current.value.refs; |
670 | | |
671 | 547k | refset_null_new(pstack->p + 1, pstack->top - pstack->p, 0); |
672 | 547k | pblock->used = pstack->current; /* set attrs */ |
673 | 547k | pblock->used.value.refs = pstack->bot; |
674 | 547k | r_set_size(&pblock->used, pstack->p + 1 - pstack->bot); |
675 | 547k | } |
676 | | |
677 | | /* |
678 | | * Free the entire contents of a stack, including the bottom block. |
679 | | * The client must still call ref_stack_free. Note that after calling |
680 | | * ref_stack_release, the stack is no longer usable. |
681 | | */ |
682 | | void |
683 | | ref_stack_release(ref_stack_t *pstack) |
684 | 0 | { |
685 | 0 | gs_ref_memory_t *mem = pstack->memory; |
686 | |
|
687 | 0 | ref_stack_clear(pstack); |
688 | | /* Free the parameter structure. */ |
689 | 0 | gs_free_object((gs_memory_t *)mem, pstack->params, |
690 | 0 | "ref_stack_release(stack.params)"); |
691 | | /* Free the original (bottom) block. */ |
692 | 0 | gs_free_ref_array(mem, &pstack->current, "ref_stack_release"); |
693 | 0 | } |
694 | | |
695 | | /* |
696 | | * Release a stack and then free the ref_stack object. |
697 | | */ |
698 | | void |
699 | | ref_stack_free(ref_stack_t *pstack) |
700 | 0 | { |
701 | 0 | gs_memory_t *mem = (gs_memory_t *)pstack->memory; |
702 | |
|
703 | 0 | ref_stack_release(pstack); |
704 | 0 | gs_free_object(mem, pstack, "ref_stack_free"); |
705 | 0 | } |
706 | | |
707 | | /* ------ Internal routines ------ */ |
708 | | |
709 | | /* Initialize the guards and body of a stack block. */ |
710 | | static void |
711 | | init_block(ref_stack_t *pstack, const ref *psb, uint used) |
712 | 3.03M | { |
713 | 3.03M | ref_stack_params_t *params = pstack->params; |
714 | 3.03M | ref *brefs = psb->value.refs; |
715 | 3.03M | uint i; |
716 | 3.03M | ref *p; |
717 | | |
718 | 3.03M | for (i = params->bot_guard, p = brefs + stack_block_refs; |
719 | 31.6M | i != 0; i--, p++ |
720 | 3.03M | ) |
721 | 28.6M | ref_assign(p, ¶ms->guard_value); |
722 | | /* The top guard elements will never be read, */ |
723 | | /* but we need to initialize them for the sake of the GC. */ |
724 | | /* We can use refset_null for this, because even though it uses */ |
725 | | /* make_null_new and stack elements must not be marked new, */ |
726 | | /* these slots will never actually be read or written. */ |
727 | 3.03M | if (params->top_guard) { |
728 | 2.94M | ref *top = brefs + r_size(psb); |
729 | 2.94M | int top_guard = params->top_guard; |
730 | | |
731 | 2.94M | refset_null_new(top - top_guard, top_guard, 0); |
732 | 2.94M | } { |
733 | 3.03M | ref_stack_block *const pblock = (ref_stack_block *) brefs; |
734 | | |
735 | 3.03M | pblock->used = *psb; |
736 | 3.03M | pblock->used.value.refs = brefs + stack_block_refs + params->bot_guard; |
737 | 3.03M | r_set_size(&pblock->used, 0); |
738 | 3.03M | } |
739 | 3.03M | } |