/src/ghostpdl/psi/interp.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2024 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Ghostscript language interpreter */ |
18 | | #include "memory_.h" |
19 | | #include "string_.h" |
20 | | #include "ghost.h" |
21 | | #include "gsstruct.h" /* for iastruct.h */ |
22 | | #include "gserrors.h" /* for gpcheck.h */ |
23 | | #include "stream.h" |
24 | | #include "ierrors.h" |
25 | | #include "estack.h" |
26 | | #include "ialloc.h" |
27 | | #include "iastruct.h" |
28 | | #include "icontext.h" |
29 | | #include "icremap.h" |
30 | | #include "idebug.h" |
31 | | #include "igstate.h" /* for handling gs_error_Remap_Color */ |
32 | | #include "inamedef.h" |
33 | | #include "iname.h" /* for the_name_table */ |
34 | | #include "interp.h" |
35 | | #include "ipacked.h" |
36 | | #include "ostack.h" /* must precede iscan.h */ |
37 | | #include "strimpl.h" /* for sfilter.h */ |
38 | | #include "sfilter.h" /* for iscan.h */ |
39 | | #include "iscan.h" |
40 | | #include "iddict.h" |
41 | | #include "isave.h" |
42 | | #include "istack.h" |
43 | | #include "itoken.h" |
44 | | #include "iutil.h" /* for array_get */ |
45 | | #include "ivmspace.h" |
46 | | #include "iinit.h" |
47 | | #include "dstack.h" |
48 | | #include "files.h" /* for file_check_read */ |
49 | | #include "oper.h" |
50 | | #include "store.h" |
51 | | #include "gpcheck.h" |
52 | | #define FORCE_ASSERT_CHECKING 1 |
53 | | #define DEBUG_TRACE_PS_OPERATORS 1 |
54 | | #include "assert_.h" |
55 | | |
56 | | /* |
57 | | * We may or may not optimize the handling of the special fast operators |
58 | | * in packed arrays. If we do this, they run much faster when packed, but |
59 | | * slightly slower when not packed. |
60 | | */ |
61 | | #define PACKED_SPECIAL_OPS 1 |
62 | | |
63 | | /* |
64 | | * Pseudo-operators (procedures of type t_oparray) record |
65 | | * the operand and dictionary stack pointers, and restore them if an error |
66 | | * occurs during the execution of the procedure and if the procedure hasn't |
67 | | * (net) decreased the depth of the stack. While this obviously doesn't |
68 | | * do all the work of restoring the state if a pseudo-operator gets an |
69 | | * error, it's a big help. The only downside is that pseudo-operators run |
70 | | * a little slower. |
71 | | */ |
72 | | |
73 | | /* GC descriptors for stacks */ |
74 | | extern_st(st_ref_stack); |
75 | | public_st_dict_stack(); |
76 | | public_st_exec_stack(); |
77 | | public_st_op_stack(); |
78 | | |
79 | | /* |
80 | | * Apply an operator. When debugging, we route all operator calls |
81 | | * through a procedure. |
82 | | */ |
83 | | #if defined(DEBUG_TRACE_PS_OPERATORS) || defined(DEBUG) |
84 | 32.2G | #define call_operator(proc, p) (*call_operator_fn)(proc, p) |
85 | | static int |
86 | | do_call_operator(op_proc_t op_proc, i_ctx_t *i_ctx_p) |
87 | 32.2G | { |
88 | 32.2G | int code; |
89 | 32.2G | assert(e_stack.p >= e_stack.bot - 1 && e_stack.p < e_stack.top + 1); |
90 | 32.2G | assert(o_stack.p >= o_stack.bot - 1 && o_stack.p < o_stack.top + 1); |
91 | 32.2G | code = op_proc(i_ctx_p); |
92 | 32.2G | if (gs_debug_c(gs_debug_flag_validate_clumps)) |
93 | 0 | ivalidate_clean_spaces(i_ctx_p); |
94 | 32.2G | assert(e_stack.p >= e_stack.bot - 1 && e_stack.p < e_stack.top + 1); |
95 | 32.2G | assert(o_stack.p >= o_stack.bot - 1 && o_stack.p < o_stack.top + 1); |
96 | 32.2G | return code; /* A good place for a conditional breakpoint. */ |
97 | 32.2G | } |
98 | | static int |
99 | | do_call_operator_verbose(op_proc_t op_proc, i_ctx_t *i_ctx_p) |
100 | 0 | { |
101 | 0 | int code; |
102 | |
|
103 | 0 | #ifndef SHOW_STACK_DEPTHS |
104 | 0 | if_debug1m('!', imemory, "[!]operator %s\n", op_get_name_string(op_proc)); |
105 | | #else |
106 | | if_debug3m('!', imemory, "[!][es=%d os=%d]operator %s\n", |
107 | | esp-i_ctx_p->exec_stack.stack.bot, |
108 | | osp-i_ctx_p->op_stack.stack.bot, |
109 | | op_get_name_string(op_proc)); |
110 | | #endif |
111 | 0 | code = do_call_operator(op_proc, i_ctx_p); |
112 | 0 | if (code < 0) |
113 | 0 | if_debug1m('!', imemory, "[!] error: %d\n", code); |
114 | | #if defined(SHOW_STACK_DEPTHS) |
115 | | if_debug2m('!', imemory, "[!][es=%d os=%d]\n", |
116 | | esp-i_ctx_p->exec_stack.stack.bot, |
117 | | osp-i_ctx_p->op_stack.stack.bot); |
118 | | #endif |
119 | 0 | if (gs_debug_c(gs_debug_flag_validate_clumps)) |
120 | 0 | ivalidate_clean_spaces(i_ctx_p); |
121 | 0 | return code; /* A good place for a conditional breakpoint. */ |
122 | 0 | } |
123 | | #else |
124 | | # define call_operator(proc, p) ((*(proc))(p)) |
125 | | #endif |
126 | | |
127 | | /* Define debugging statistics (not threadsafe as uses globals) */ |
128 | | /* #define COLLECT_STATS_IDSTACK */ |
129 | | |
130 | | #ifdef COLLECT_STATS_INTERP |
131 | | struct stats_interp_s { |
132 | | long top; |
133 | | long lit, lit_array, exec_array, exec_operator, exec_name; |
134 | | long x_add, x_def, x_dup, x_exch, x_if, x_ifelse, |
135 | | x_index, x_pop, x_roll, x_sub; |
136 | | long find_name, name_lit, name_proc, name_oparray, name_operator; |
137 | | long p_full, p_exec_operator, p_exec_oparray, p_exec_non_x_operator, |
138 | | p_integer, p_lit_name, p_exec_name; |
139 | | long p_find_name, p_name_lit, p_name_proc; |
140 | | } stats_interp; |
141 | | # define INCR(v) (++(stats_interp.v)) |
142 | | #else |
143 | 288G | # define INCR(v) DO_NOTHING |
144 | | #endif |
145 | | |
146 | | /* Forward references */ |
147 | | static int estack_underflow(i_ctx_t *); |
148 | | static int interp(i_ctx_t **, const ref *, ref *); |
149 | | static int interp_exit(i_ctx_t *); |
150 | | static int zforceinterp_exit(i_ctx_t *i_ctx_p); |
151 | | static void set_gc_signal(i_ctx_t *, int); |
152 | | static int copy_stack(i_ctx_t *, const ref_stack_t *, int skip, ref *); |
153 | | static int oparray_pop(i_ctx_t *); |
154 | | static int oparray_cleanup(i_ctx_t *); |
155 | | static int zerrorexec(i_ctx_t *); |
156 | | static int zfinderrorobject(i_ctx_t *); |
157 | | static int errorexec_pop(i_ctx_t *); |
158 | | static int errorexec_cleanup(i_ctx_t *); |
159 | | static int zsetstackprotect(i_ctx_t *); |
160 | | static int zcurrentstackprotect(i_ctx_t *); |
161 | | static int zactonuel(i_ctx_t *); |
162 | | |
163 | | /* Stack sizes */ |
164 | | |
165 | | /* The maximum stack sizes may all be set in the makefile. */ |
166 | | |
167 | | /* |
168 | | * Define the initial maximum size of the operand stack (MaxOpStack |
169 | | * user parameter). |
170 | | */ |
171 | | #ifndef MAX_OSTACK |
172 | 162k | # define MAX_OSTACK 800 |
173 | | #endif |
174 | | /* |
175 | | * The minimum block size for extending the operand stack is the larger of: |
176 | | * - the maximum number of parameters to an operator |
177 | | * (currently setcolorscreen, with 12 parameters); |
178 | | * - the maximum number of values pushed by an operator |
179 | | * (currently setcolortransfer, which calls zcolor_remap_one 4 times |
180 | | * and therefore pushes 16 values). |
181 | | */ |
182 | | #define MIN_BLOCK_OSTACK 16 |
183 | | const int gs_interp_max_op_num_args = MIN_BLOCK_OSTACK; /* for iinit.c */ |
184 | | |
185 | | /* |
186 | | * Define the initial maximum size of the execution stack (MaxExecStack |
187 | | * user parameter). |
188 | | */ |
189 | | #ifndef MAX_ESTACK |
190 | 162k | # define MAX_ESTACK 5000 |
191 | | #endif |
192 | | /* |
193 | | * The minimum block size for extending the execution stack is the largest |
194 | | * size of a contiguous block surrounding an e-stack mark. (At least, |
195 | | * that's what the minimum value would be if we supported multi-block |
196 | | * estacks, which we currently don't.) Currently, the largest such block is |
197 | | * the one created for text processing, which is 8 (snumpush) slots. |
198 | | */ |
199 | 166 | #define MIN_BLOCK_ESTACK 8 |
200 | | /* |
201 | | * If we get an e-stack overflow, we need to cut it back far enough to |
202 | | * have some headroom for executing the error procedure. |
203 | | */ |
204 | 32 | #define ES_HEADROOM 20 |
205 | | |
206 | | /* |
207 | | * Define the initial maximum size of the dictionary stack (MaxDictStack |
208 | | * user parameter). Again, this is also currently the block size for |
209 | | * extending the d-stack. |
210 | | */ |
211 | | #ifndef MAX_DSTACK |
212 | 162k | # define MAX_DSTACK 20 |
213 | | #endif |
214 | | /* |
215 | | * The minimum block size for extending the dictionary stack is the number |
216 | | * of permanent entries on the dictionary stack, currently 3. |
217 | | */ |
218 | | #define MIN_BLOCK_DSTACK 3 |
219 | | |
220 | | /* See estack.h for a description of the execution stack. */ |
221 | | |
222 | | /* The logic for managing icount and iref below assumes that */ |
223 | | /* there are no control operators which pop and then push */ |
224 | | /* information on the execution stack. */ |
225 | | |
226 | | /* Stacks */ |
227 | | extern_st(st_ref_stack); |
228 | 487k | #define OS_GUARD_UNDER 10 |
229 | 487k | #define OS_GUARD_OVER 10 |
230 | | #define OS_REFS_SIZE(body_size)\ |
231 | 324k | (stack_block_refs + OS_GUARD_UNDER + (body_size) + OS_GUARD_OVER) |
232 | | |
233 | 487k | #define ES_GUARD_UNDER 1 |
234 | 487k | #define ES_GUARD_OVER 10 |
235 | | #define ES_REFS_SIZE(body_size)\ |
236 | 324k | (stack_block_refs + ES_GUARD_UNDER + (body_size) + ES_GUARD_OVER) |
237 | | |
238 | | #define DS_REFS_SIZE(body_size)\ |
239 | 162k | (stack_block_refs + (body_size)) |
240 | | |
241 | | /* Extended types. The interpreter may replace the type of operators */ |
242 | | /* in procedures with these, to speed up the interpretation loop. */ |
243 | | /****** NOTE: If you add or change entries in this list, */ |
244 | | /****** you must change the three dispatches in the interpreter loop. */ |
245 | | /* The operator procedures are declared in opextern.h. */ |
246 | 21.8G | #define tx_op t_next_index |
247 | | typedef enum { |
248 | | tx_op_add = tx_op, |
249 | | tx_op_def, |
250 | | tx_op_dup, |
251 | | tx_op_exch, |
252 | | tx_op_if, |
253 | | tx_op_ifelse, |
254 | | tx_op_index, |
255 | | tx_op_pop, |
256 | | tx_op_roll, |
257 | | tx_op_sub, |
258 | | tx_next_op |
259 | | } special_op_types; |
260 | | |
261 | 106M | #define num_special_ops ((int)tx_next_op - tx_op) |
262 | | const int gs_interp_num_special_ops = num_special_ops; /* for iinit.c */ |
263 | | const int tx_next_index = tx_next_op; |
264 | | |
265 | | /* |
266 | | * NOTE: if the size of either table below ever exceeds 15 real entries, it |
267 | | * will have to be split. |
268 | | */ |
269 | | /* Define the extended-type operators per the list above. */ |
270 | | const op_def interp1_op_defs[] = { |
271 | | /* |
272 | | * The very first entry, which corresponds to operator index 0, |
273 | | * must not contain an actual operator. |
274 | | */ |
275 | | op_def_begin_dict("systemdict"), |
276 | | {"2add", zadd}, |
277 | | {"2def", zdef}, |
278 | | {"1dup", zdup}, |
279 | | {"2exch", zexch}, |
280 | | {"2if", zif}, |
281 | | {"3ifelse", zifelse}, |
282 | | {"1index", zindex}, |
283 | | {"1pop", zpop}, |
284 | | {"2roll", zroll}, |
285 | | {"2sub", zsub}, |
286 | | op_def_end(0) |
287 | | }; |
288 | | /* Define the internal interpreter operators. */ |
289 | | const op_def interp2_op_defs[] = { |
290 | | {"0.currentstackprotect", zcurrentstackprotect}, |
291 | | {"1.setstackprotect", zsetstackprotect}, |
292 | | {"2.errorexec", zerrorexec}, |
293 | | {"0.finderrorobject", zfinderrorobject}, |
294 | | {"0%interp_exit", interp_exit}, |
295 | | {"0.forceinterp_exit", zforceinterp_exit}, |
296 | | {"0%oparray_pop", oparray_pop}, |
297 | | {"0%errorexec_pop", errorexec_pop}, |
298 | | {"0.actonuel", zactonuel}, |
299 | | op_def_end(0) |
300 | | }; |
301 | | |
302 | | #define make_null_proc(pref)\ |
303 | 18.1k | make_empty_const_array(pref, a_executable + a_readonly) |
304 | | |
305 | | /* Initialize the interpreter. */ |
306 | | int |
307 | | gs_interp_init(i_ctx_t **pi_ctx_p, const ref *psystem_dict, |
308 | | gs_dual_memory_t *dmem) |
309 | 162k | { |
310 | | /* Create and initialize a context state. */ |
311 | 162k | gs_context_state_t *pcst = 0; |
312 | 162k | int code = context_state_alloc(&pcst, psystem_dict, dmem); |
313 | 162k | if (code >= 0) { |
314 | 162k | code = context_state_load(pcst); |
315 | 162k | if (code < 0) { |
316 | 0 | context_state_free(pcst); |
317 | 0 | pcst = NULL; |
318 | 0 | } |
319 | 162k | } |
320 | | |
321 | 162k | if (code < 0) |
322 | 0 | lprintf1("Fatal error %d in gs_interp_init!\n", code); |
323 | 162k | *pi_ctx_p = pcst; |
324 | | |
325 | 162k | return code; |
326 | 162k | } |
327 | | /* |
328 | | * Create initial stacks for the interpreter. |
329 | | * We export this for creating new contexts. |
330 | | */ |
331 | | int |
332 | | gs_interp_alloc_stacks(gs_ref_memory_t *mem, gs_context_state_t * pcst) |
333 | 162k | { |
334 | 162k | int code; |
335 | 162k | gs_ref_memory_t *smem = |
336 | 162k | (gs_ref_memory_t *)gs_memory_stable((gs_memory_t *)mem); |
337 | 162k | ref stk; |
338 | | |
339 | 324k | #define REFS_SIZE_OSTACK OS_REFS_SIZE(MAX_OSTACK) |
340 | 324k | #define REFS_SIZE_ESTACK ES_REFS_SIZE(MAX_ESTACK) |
341 | 162k | #define REFS_SIZE_DSTACK DS_REFS_SIZE(MAX_DSTACK) |
342 | 162k | code = gs_alloc_ref_array(smem, &stk, 0, |
343 | 162k | REFS_SIZE_OSTACK + REFS_SIZE_ESTACK + |
344 | 162k | REFS_SIZE_DSTACK, "gs_interp_alloc_stacks"); |
345 | 162k | if (code < 0) |
346 | 0 | return code; |
347 | | |
348 | 162k | { |
349 | 162k | ref_stack_t *pos = &pcst->op_stack.stack; |
350 | | |
351 | 162k | r_set_size(&stk, REFS_SIZE_OSTACK); |
352 | 162k | code = ref_stack_init(pos, &stk, OS_GUARD_UNDER, OS_GUARD_OVER, NULL, |
353 | 162k | smem, NULL); |
354 | 162k | if (code < 0) |
355 | 0 | return code; |
356 | 162k | ref_stack_set_error_codes(pos, gs_error_stackunderflow, gs_error_stackoverflow); |
357 | 162k | ref_stack_set_max_count(pos, MAX_OSTACK); |
358 | 162k | stk.value.refs += REFS_SIZE_OSTACK; |
359 | 162k | } |
360 | | |
361 | 0 | { |
362 | 162k | ref_stack_t *pes = &pcst->exec_stack.stack; |
363 | 162k | ref euop; |
364 | | |
365 | 162k | r_set_size(&stk, REFS_SIZE_ESTACK); |
366 | 162k | make_oper(&euop, 0, estack_underflow); |
367 | 162k | code = ref_stack_init(pes, &stk, ES_GUARD_UNDER, ES_GUARD_OVER, &euop, |
368 | 162k | smem, NULL); |
369 | 162k | if (code < 0) |
370 | 0 | return code; |
371 | 162k | ref_stack_set_error_codes(pes, gs_error_ExecStackUnderflow, |
372 | 162k | gs_error_execstackoverflow); |
373 | | /**************** E-STACK EXPANSION IS NYI. ****************/ |
374 | 162k | ref_stack_allow_expansion(pes, false); |
375 | 162k | ref_stack_set_max_count(pes, MAX_ESTACK); |
376 | 162k | stk.value.refs += REFS_SIZE_ESTACK; |
377 | 162k | } |
378 | | |
379 | 0 | { |
380 | 162k | ref_stack_t *pds = &pcst->dict_stack.stack; |
381 | | |
382 | 162k | r_set_size(&stk, REFS_SIZE_DSTACK); |
383 | 162k | code = ref_stack_init(pds, &stk, 0, 0, NULL, smem, NULL); |
384 | 162k | if (code < 0) |
385 | 0 | return code; |
386 | 162k | ref_stack_set_error_codes(pds, gs_error_dictstackunderflow, |
387 | 162k | gs_error_dictstackoverflow); |
388 | 162k | ref_stack_set_max_count(pds, MAX_DSTACK); |
389 | 162k | } |
390 | | |
391 | 0 | #undef REFS_SIZE_OSTACK |
392 | 0 | #undef REFS_SIZE_ESTACK |
393 | 0 | #undef REFS_SIZE_DSTACK |
394 | 0 | return 0; |
395 | 162k | } |
396 | | /* |
397 | | * Free the stacks when destroying a context. This is the inverse of |
398 | | * create_stacks. |
399 | | */ |
400 | | void |
401 | | gs_interp_free_stacks(gs_ref_memory_t * smem, gs_context_state_t * pcst) |
402 | 0 | { |
403 | | /* Free the stacks in inverse order of allocation. */ |
404 | 0 | ref_stack_release(&pcst->dict_stack.stack); |
405 | 0 | ref_stack_release(&pcst->exec_stack.stack); |
406 | 0 | ref_stack_release(&pcst->op_stack.stack); |
407 | 0 | } |
408 | | void |
409 | | gs_interp_reset(i_ctx_t *i_ctx_p) |
410 | 162k | { /* Reset the stacks. */ |
411 | 162k | ref_stack_clear(&o_stack); |
412 | 162k | ref_stack_clear(&e_stack); |
413 | 162k | esp++; |
414 | 162k | make_oper(esp, 0, interp_exit); |
415 | 162k | ref_stack_pop_to(&d_stack, min_dstack_size); |
416 | 162k | dict_set_top(); |
417 | 162k | } |
418 | | /* Report an e-stack block underflow. The bottom guard slots of */ |
419 | | /* e-stack blocks contain a pointer to this procedure. */ |
420 | | static int |
421 | | estack_underflow(i_ctx_t *i_ctx_p) |
422 | 0 | { |
423 | 0 | return gs_error_ExecStackUnderflow; |
424 | 0 | } |
425 | | |
426 | | /* |
427 | | * Create an operator during initialization. |
428 | | * If operator is hard-coded into the interpreter, |
429 | | * assign it a special type and index. |
430 | | */ |
431 | | void |
432 | | gs_interp_make_oper(ref * opref, op_proc_t proc, int idx) |
433 | 106M | { |
434 | 106M | int i; |
435 | | |
436 | 1.15G | for (i = num_special_ops; i > 0 && proc != interp1_op_defs[i].proc; --i) |
437 | 1.04G | DO_NOTHING; |
438 | 106M | if (i > 0) |
439 | 3.24M | make_tasv(opref, tx_op + (i - 1), a_executable, i, opproc, proc); |
440 | 103M | else |
441 | 103M | make_tasv(opref, t_operator, a_executable, idx, opproc, proc); |
442 | 106M | } |
443 | | |
444 | | /* |
445 | | * Call the garbage collector, updating the context pointer properly. |
446 | | */ |
447 | | int |
448 | | interp_reclaim(i_ctx_t **pi_ctx_p, int space) |
449 | 339k | { |
450 | 339k | i_ctx_t *i_ctx_p = *pi_ctx_p; |
451 | 339k | gs_gc_root_t ctx_root, *r = &ctx_root; |
452 | 339k | int code; |
453 | | |
454 | | #ifdef DEBUG |
455 | | if (gs_debug_c(gs_debug_flag_gc_disable)) |
456 | | return 0; |
457 | | #endif |
458 | | |
459 | 339k | gs_register_struct_root(imemory_system, &r, |
460 | 339k | (void **)pi_ctx_p, "interp_reclaim(pi_ctx_p)"); |
461 | 339k | code = (*idmemory->reclaim)(idmemory, space); |
462 | 339k | i_ctx_p = *pi_ctx_p; /* may have moved */ |
463 | 339k | gs_unregister_root(imemory_system, r, "interp_reclaim(pi_ctx_p)"); |
464 | 339k | return code; |
465 | 339k | } |
466 | | |
467 | | /* |
468 | | * Invoke the interpreter. If execution completes normally, return 0. |
469 | | * If an error occurs, the action depends on user_errors as follows: |
470 | | * user_errors < 0: always return an error code. |
471 | | * user_errors >= 0: let the PostScript machinery handle all errors. |
472 | | * (This will eventually result in a fatal error if no 'stopped' |
473 | | * is active.) |
474 | | * In case of a quit or a fatal error, also store the exit code. |
475 | | * Set *perror_object to null or the error object. |
476 | | */ |
477 | | static int gs_call_interp(i_ctx_t **, ref *, int, int *, ref *); |
478 | | int |
479 | | gs_interpret(i_ctx_t **pi_ctx_p, ref * pref, int user_errors, int *pexit_code, |
480 | | ref * perror_object) |
481 | 2.92M | { |
482 | 2.92M | i_ctx_t *i_ctx_p = *pi_ctx_p; |
483 | 2.92M | gs_gc_root_t error_root, *r = &error_root; |
484 | 2.92M | int code; |
485 | | |
486 | 2.92M | gs_register_ref_root(imemory_system, &r, |
487 | 2.92M | (void **)&perror_object, "gs_interpret"); |
488 | 2.92M | code = gs_call_interp(pi_ctx_p, pref, user_errors, pexit_code, |
489 | 2.92M | perror_object); |
490 | 2.92M | i_ctx_p = *pi_ctx_p; |
491 | 2.92M | gs_unregister_root(imemory_system, &error_root, "gs_interpret"); |
492 | | /* Avoid a dangling reference to the lib context GC signal. */ |
493 | 2.92M | set_gc_signal(i_ctx_p, 0); |
494 | 2.92M | return code; |
495 | 2.92M | } |
496 | | static int |
497 | | gs_call_interp(i_ctx_t **pi_ctx_p, ref * pref, int user_errors, |
498 | | int *pexit_code, ref * perror_object) |
499 | 2.92M | { |
500 | 2.92M | ref *epref = pref; |
501 | 2.92M | ref doref; |
502 | 2.92M | ref *perrordict; |
503 | 2.92M | ref error_name; |
504 | 2.92M | int code, ccode; |
505 | 2.92M | ref saref; |
506 | 2.92M | i_ctx_t *i_ctx_p = *pi_ctx_p; |
507 | 2.92M | int *gc_signal = &imemory_system->gs_lib_ctx->gcsignal; |
508 | | |
509 | 2.92M | *pexit_code = 0; |
510 | 2.92M | *gc_signal = 0; |
511 | | |
512 | | /* This avoids a valgrind error */ |
513 | 2.92M | doref.tas.type_attrs = error_name.tas.type_attrs = saref.tas.type_attrs = 0; |
514 | | |
515 | 2.92M | ialloc_reset_requested(idmemory); |
516 | 118M | again: |
517 | | /* Avoid a dangling error object that might get traced by a future GC. */ |
518 | 118M | make_null(perror_object); |
519 | 118M | o_stack.requested = e_stack.requested = d_stack.requested = 0; |
520 | 118M | while (*gc_signal) { /* Some routine below triggered a GC. */ |
521 | 2 | gs_gc_root_t epref_root, *r = &epref_root; |
522 | | |
523 | 2 | *gc_signal = 0; |
524 | | /* Make sure that doref will get relocated properly if */ |
525 | | /* a garbage collection happens with epref == &doref. */ |
526 | 2 | gs_register_ref_root(imemory_system, &r, |
527 | 2 | (void **)&epref, "gs_call_interp(epref)"); |
528 | 2 | code = interp_reclaim(pi_ctx_p, -1); |
529 | 2 | i_ctx_p = *pi_ctx_p; |
530 | 2 | gs_unregister_root(imemory_system, &epref_root, |
531 | 2 | "gs_call_interp(epref)"); |
532 | 2 | if (code < 0) |
533 | 0 | return code; |
534 | 2 | } |
535 | 118M | code = interp(pi_ctx_p, epref, perror_object); |
536 | 118M | i_ctx_p = *pi_ctx_p; |
537 | 118M | if (!r_has_type(&i_ctx_p->error_object, t__invalid)) { |
538 | 138 | *perror_object = i_ctx_p->error_object; |
539 | 138 | make_t(&i_ctx_p->error_object, t__invalid); |
540 | 138 | } |
541 | | /* Prevent a dangling reference to the GC signal in ticks_left */ |
542 | | /* in the frame of interp, but be prepared to do a GC if */ |
543 | | /* an allocation in this routine asks for it. */ |
544 | 118M | *gc_signal = 0; |
545 | 118M | set_gc_signal(i_ctx_p, 1); |
546 | 118M | if (esp < esbot) /* popped guard entry */ |
547 | 443k | esp = esbot; |
548 | 118M | switch (code) { |
549 | 8 | case gs_error_Fatal: |
550 | 8 | *pexit_code = 255; |
551 | 8 | return code; |
552 | 693k | case gs_error_Quit: |
553 | 693k | *perror_object = osp[-1]; |
554 | 693k | *pexit_code = code = osp->value.intval; |
555 | 693k | osp -= 2; |
556 | 693k | return |
557 | 693k | (code == 0 ? gs_error_Quit : |
558 | 693k | code < 0 && code > -100 ? code : gs_error_Fatal); |
559 | 443k | case gs_error_InterpreterExit: |
560 | 443k | return 0; |
561 | 0 | case gs_error_ExecStackUnderflow: |
562 | | /****** WRONG -- must keep mark blocks intact ******/ |
563 | 0 | ref_stack_pop_block(&e_stack); |
564 | 0 | doref = *perror_object; |
565 | 0 | epref = &doref; |
566 | 0 | goto again; |
567 | 162k | case gs_error_VMreclaim: |
568 | | /* Do the GC and continue. */ |
569 | | /* We ignore the return value here, if it fails here |
570 | | * we'll call it again having jumped to the "again" label. |
571 | | * Where, assuming it fails again, we'll handle the error. |
572 | | */ |
573 | 162k | (void)interp_reclaim(pi_ctx_p, |
574 | 162k | (osp->value.intval == 2 ? |
575 | 162k | avm_global : avm_local)); |
576 | 162k | i_ctx_p = *pi_ctx_p; |
577 | 162k | make_oper(&doref, 0, zpop); |
578 | 162k | epref = &doref; |
579 | 162k | goto again; |
580 | 1.78M | case gs_error_NeedInput: |
581 | 1.78M | case gs_error_interrupt: |
582 | 1.78M | return code; |
583 | 118M | } |
584 | | /* Adjust osp in case of operand stack underflow */ |
585 | 115M | if (osp < osbot - 1) |
586 | 0 | osp = osbot - 1; |
587 | | /* We have to handle stack over/underflow specially, because */ |
588 | | /* we might be able to recover by adding or removing a block. */ |
589 | 115M | switch (code) { |
590 | 4 | case gs_error_dictstackoverflow: |
591 | | /* We don't have to handle this specially: */ |
592 | | /* The only places that could generate it */ |
593 | | /* use check_dstack, which does a ref_stack_extend, */ |
594 | | /* so if` we get this error, it's a real one. */ |
595 | 4 | if (osp >= ostop) { |
596 | 0 | if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) |
597 | 0 | return ccode; |
598 | 0 | } |
599 | | /* Skip system dictionaries for CET 20-02-02 */ |
600 | 4 | ccode = copy_stack(i_ctx_p, &d_stack, min_dstack_size, &saref); |
601 | 4 | if (ccode < 0) |
602 | 0 | return ccode; |
603 | 4 | ref_stack_pop_to(&d_stack, min_dstack_size); |
604 | 4 | dict_set_top(); |
605 | 4 | *++osp = saref; |
606 | 4 | break; |
607 | 31 | case gs_error_dictstackunderflow: |
608 | 31 | if (ref_stack_pop_block(&d_stack) >= 0) { |
609 | 0 | dict_set_top(); |
610 | 0 | doref = *perror_object; |
611 | 0 | epref = &doref; |
612 | 0 | goto again; |
613 | 0 | } |
614 | 31 | break; |
615 | 32 | case gs_error_execstackoverflow: |
616 | | /* We don't have to handle this specially: */ |
617 | | /* The only places that could generate it */ |
618 | | /* use check_estack, which does a ref_stack_extend, */ |
619 | | /* so if we get this error, it's a real one. */ |
620 | 32 | if (osp >= ostop) { |
621 | 0 | if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) |
622 | 0 | return ccode; |
623 | 0 | } |
624 | 32 | ccode = copy_stack(i_ctx_p, &e_stack, 0, &saref); |
625 | 32 | if (ccode < 0) |
626 | 0 | return ccode; |
627 | 32 | { |
628 | 32 | uint count = ref_stack_count(&e_stack); |
629 | 32 | uint limit = ref_stack_max_count(&e_stack) - ES_HEADROOM; |
630 | | |
631 | 32 | if (count > limit) { |
632 | | /* |
633 | | * If there is an e-stack mark within MIN_BLOCK_ESTACK of |
634 | | * the new top, cut the stack back to remove the mark. |
635 | | */ |
636 | 32 | int skip = count - limit; |
637 | 32 | int i; |
638 | | |
639 | 166 | for (i = skip; i < skip + MIN_BLOCK_ESTACK; ++i) { |
640 | 166 | const ref *ep = ref_stack_index(&e_stack, i); |
641 | | |
642 | 166 | if (ep == NULL) |
643 | 0 | continue; |
644 | | |
645 | 166 | if (r_has_type_attrs(ep, t_null, a_executable)) { |
646 | 32 | skip = i + 1; |
647 | 32 | break; |
648 | 32 | } |
649 | 166 | } |
650 | 32 | pop_estack(i_ctx_p, skip); |
651 | 32 | } |
652 | 32 | } |
653 | 32 | *++osp = saref; |
654 | 32 | break; |
655 | 4.97M | case gs_error_stackoverflow: |
656 | 4.97M | if (ref_stack_extend(&o_stack, o_stack.requested) >= 0) { /* We can't just re-execute the object, because */ |
657 | | /* it might be a procedure being pushed as a */ |
658 | | /* literal. We check for this case specially. */ |
659 | 4.97M | doref = *perror_object; |
660 | 4.97M | if (r_is_proc(&doref)) { |
661 | 18.1k | *++osp = doref; |
662 | 18.1k | make_null_proc(&doref); |
663 | 18.1k | } |
664 | 4.97M | epref = &doref; |
665 | 4.97M | goto again; |
666 | 4.97M | } |
667 | 230 | ccode = copy_stack(i_ctx_p, &o_stack, 0, &saref); |
668 | 230 | if (ccode < 0) |
669 | 0 | return ccode; |
670 | 230 | ref_stack_clear(&o_stack); |
671 | 230 | *++osp = saref; |
672 | 230 | break; |
673 | 40.9k | case gs_error_stackunderflow: |
674 | 40.9k | if (ref_stack_pop_block(&o_stack) >= 0) { |
675 | 37.7k | doref = *perror_object; |
676 | 37.7k | epref = &doref; |
677 | 37.7k | goto again; |
678 | 37.7k | } |
679 | 3.20k | break; |
680 | 115M | } |
681 | 110M | if (user_errors < 0) |
682 | 0 | return code; |
683 | 110M | if (gs_errorname(i_ctx_p, code, &error_name) < 0) |
684 | 0 | return code; /* out-of-range error code! */ |
685 | | |
686 | | /* We refer to gserrordict first, which is not accessible to Postcript jobs |
687 | | * If we're running with SAFERERRORS all the handlers are copied to gserrordict |
688 | | * so we'll always find the default one. If not SAFERERRORS, only gs specific |
689 | | * errors are in gserrordict. |
690 | | */ |
691 | 110M | if ((dict_find_string(systemdict, "gserrordict", &perrordict) <= 0 || |
692 | 110M | !r_has_type(perrordict, t_dictionary) || |
693 | 110M | dict_find(perrordict, &error_name, &epref) <= 0) && |
694 | 110M | (dict_find_string(systemdict, "errordict", &perrordict) <= 0 || |
695 | 110M | !r_has_type(perrordict, t_dictionary) || |
696 | 110M | dict_find(perrordict, &error_name, &epref) <= 0)) |
697 | 0 | return code; /* error name not in errordict??? */ |
698 | | |
699 | 110M | if (code == gs_error_execstackoverflow |
700 | 110M | && obj_eq(imemory, &doref, epref)) { |
701 | | /* This strongly suggests we're in an error handler that |
702 | | calls itself infinitely, so Postscript is done, return |
703 | | to the caller. |
704 | | */ |
705 | 0 | ref_stack_clear(&e_stack); |
706 | 0 | *pexit_code = gs_error_execstackoverflow; |
707 | 0 | return_error(gs_error_execstackoverflow); |
708 | 0 | } |
709 | | |
710 | 110M | if (!r_is_proc(epref)){ |
711 | 0 | *pexit_code = gs_error_Fatal; |
712 | 0 | return_error(gs_error_Fatal); |
713 | 0 | } |
714 | | |
715 | 110M | doref = *epref; |
716 | 110M | epref = &doref; |
717 | | /* Push the error object on the operand stack if appropriate. */ |
718 | 110M | if (!GS_ERROR_IS_INTERRUPT(code)) { |
719 | 110M | byte buf[260], *bufptr; |
720 | 110M | uint rlen; |
721 | | /* Replace the error object if within an oparray or .errorexec. */ |
722 | 110M | osp++; |
723 | 110M | if (osp >= ostop) { |
724 | 26 | *pexit_code = gs_error_Fatal; |
725 | 26 | return_error(gs_error_Fatal); |
726 | 26 | } |
727 | 110M | *osp = *perror_object; |
728 | 110M | errorexec_find(i_ctx_p, osp); |
729 | | |
730 | 110M | if (!r_has_type(osp, t_string) && !r_has_type(osp, t_name)) { |
731 | 108M | code = obj_cvs(imemory, osp, buf + 2, 256, &rlen, (const byte **)&bufptr); |
732 | 108M | if (code < 0) { |
733 | 0 | const char *unknownstr = "--unknown--"; |
734 | 0 | rlen = strlen(unknownstr); |
735 | 0 | memcpy(buf, unknownstr, rlen); |
736 | 0 | bufptr = buf; |
737 | 0 | } |
738 | 108M | else { |
739 | 108M | ref *tobj; |
740 | 108M | bufptr[rlen] = '\0'; |
741 | | /* Only pass a name object if the operator doesn't exist in systemdict |
742 | | * i.e. it's an internal operator we have hidden |
743 | | */ |
744 | 108M | code = dict_find_string(systemdict, (const char *)bufptr, &tobj); |
745 | 108M | if (code <= 0) { |
746 | 2.07M | buf[0] = buf[1] = buf[rlen + 2] = buf[rlen + 3] = '-'; |
747 | 2.07M | rlen += 4; |
748 | 2.07M | bufptr = buf; |
749 | 2.07M | } |
750 | 106M | else { |
751 | 106M | bufptr = NULL; |
752 | 106M | } |
753 | 108M | } |
754 | 108M | if (bufptr) { |
755 | 2.07M | code = name_ref(imemory, buf, rlen, osp, 1); |
756 | 2.07M | if (code < 0) |
757 | 2.07M | make_null(osp); |
758 | 2.07M | } |
759 | 108M | } |
760 | 110M | } |
761 | 110M | goto again; |
762 | 110M | } |
763 | | static int |
764 | | interp_exit(i_ctx_t *i_ctx_p) |
765 | 443k | { |
766 | 443k | return gs_error_InterpreterExit; |
767 | 443k | } |
768 | | |
769 | | /* Only used (currently) with language switching: |
770 | | * allows the PS interpreter to co-exist with the |
771 | | * PJL interpreter. |
772 | | */ |
773 | | static int |
774 | | zforceinterp_exit(i_ctx_t *i_ctx_p) |
775 | 0 | { |
776 | 0 | os_ptr op = osp; |
777 | 0 | stream *s; |
778 | |
|
779 | 0 | check_file(s, op); |
780 | 0 | i_ctx_p->uel_position = stell(s)-1; |
781 | | /* resetfile */ |
782 | 0 | if (file_is_valid(s, op)) |
783 | 0 | sreset(s); |
784 | |
|
785 | 0 | if (!gs_lib_ctx_get_act_on_uel((gs_memory_t *)(i_ctx_p->memory.current))) |
786 | 0 | return 0; |
787 | | |
788 | 0 | gs_interp_reset(i_ctx_p); |
789 | | /* gs_interp_reset() actually leaves the op stack one entry below |
790 | | * the bottom of the stack, and that can cause problems depending |
791 | | * on the interpreter state at the end of the job. |
792 | | * So push a null object, and the return code before continuing. |
793 | | */ |
794 | 0 | push(2); |
795 | 0 | op = osp; |
796 | 0 | make_null(op - 1); |
797 | 0 | make_int(op, gs_error_InterpreterExit); |
798 | 0 | return_error(gs_error_Quit); |
799 | 0 | } |
800 | | |
801 | | /* Set the GC signal for all VMs. */ |
802 | | static void |
803 | | set_gc_signal(i_ctx_t *i_ctx_p, int value) |
804 | 239M | { |
805 | 239M | gs_memory_gc_status_t stat; |
806 | 239M | int i; |
807 | | |
808 | 1.19G | for (i = 0; i < countof(idmemory->spaces_indexed); i++) { |
809 | 956M | gs_ref_memory_t *mem = idmemory->spaces_indexed[i]; |
810 | 956M | gs_ref_memory_t *mem_stable; |
811 | | |
812 | 956M | if (mem == 0) |
813 | 239M | continue; |
814 | 1.19G | for (;; mem = mem_stable) { |
815 | 1.19G | mem_stable = (gs_ref_memory_t *) |
816 | 1.19G | gs_memory_stable((gs_memory_t *)mem); |
817 | 1.19G | gs_memory_gc_status(mem, &stat); |
818 | 1.19G | stat.signal_value = value; |
819 | 1.19G | gs_memory_set_gc_status(mem, &stat); |
820 | 1.19G | if (mem_stable == mem) |
821 | 717M | break; |
822 | 1.19G | } |
823 | 717M | } |
824 | 239M | } |
825 | | |
826 | | /* Create a printable string ref (or null) from an arbitrary ref. |
827 | | * For the purpose this is used here, it cannot fail, any |
828 | | * error in the process results in a null object, instead |
829 | | * of the string. |
830 | | */ |
831 | | static void obj_cvs_ref(i_ctx_t *i_ctx_p, const ref *in, ref *out) |
832 | 2.35k | { |
833 | 2.35k | uint rlen; |
834 | 2.35k | int code; |
835 | 2.35k | byte sbuf[65], *buf = sbuf; |
836 | 2.35k | uint len = sizeof(sbuf) - 1; |
837 | | |
838 | 2.35k | code = obj_cvs(imemory, in, buf, len, &rlen, NULL); |
839 | 2.35k | if (code == gs_error_rangecheck) { |
840 | 0 | len = rlen; |
841 | 0 | buf = gs_alloc_bytes(imemory, len + 1, "obj_cvs_ref"); |
842 | 0 | if (!buf) |
843 | 0 | code = -1; |
844 | 0 | else |
845 | 0 | code = obj_cvs(imemory, in, buf, len, &rlen, NULL); |
846 | 0 | } |
847 | 2.35k | if (code < 0) { |
848 | 0 | make_null(out); |
849 | 0 | } |
850 | 2.35k | else { |
851 | 2.35k | buf[rlen] = '\0'; |
852 | 2.35k | code = string_to_ref((const char *)buf, out, iimemory, "obj_cvs_ref"); |
853 | 2.35k | if (code < 0) |
854 | 2.35k | make_null(out); |
855 | 2.35k | } |
856 | 2.35k | if (buf != sbuf) |
857 | 0 | gs_free_object(imemory, buf, "obj_cvs_ref"); |
858 | 2.35k | return; |
859 | 2.35k | } |
860 | | |
861 | | /* Copy top elements of an overflowed stack into a (local) array. */ |
862 | | /* Adobe copies only 500 top elements, we copy up to 65535 top elements */ |
863 | | /* for better debugging, PLRM compliance, and backward compatibility. */ |
864 | | static int |
865 | | copy_stack(i_ctx_t *i_ctx_p, const ref_stack_t * pstack, int skip, ref * arr) |
866 | 266 | { |
867 | 266 | uint size = ref_stack_count(pstack) - skip; |
868 | 266 | uint save_space = ialloc_space(idmemory); |
869 | 266 | int code, i; |
870 | 266 | ref *safety, *safe; |
871 | | |
872 | 266 | if (size > 65535) |
873 | 230 | size = 65535; |
874 | 266 | ialloc_set_space(idmemory, avm_local); |
875 | 266 | code = ialloc_ref_array(arr, a_all, size, "copy_stack"); |
876 | 266 | if (code >= 0) |
877 | 266 | code = ref_stack_store(pstack, arr, size, 0, 1, true, idmemory, |
878 | 266 | "copy_stack"); |
879 | | /* If we are copying the exec stack, try to replace any oparrays with |
880 | | * the operator that references them |
881 | | * We also replace any internal objects (t_struct and t_astruct) with |
882 | | * string representations, since these can contain references to objects |
883 | | * with uncertain lifespans, it is safer not to risk them persisting. |
884 | | * Since we basically did this later on for the error handler, it isn't |
885 | | * a significant speed hit. |
886 | | */ |
887 | 266 | if (pstack == &e_stack) { |
888 | 159k | for (i = 0; i < size; i++) { |
889 | 159k | if (errorexec_find(i_ctx_p, &arr->value.refs[i]) < 0) |
890 | 159k | make_null(&arr->value.refs[i]); |
891 | 159k | else if (r_has_type(&arr->value.refs[i], t_struct) |
892 | 159k | || r_has_type(&arr->value.refs[i], t_astruct)) { |
893 | 2.35k | ref r; |
894 | 2.35k | obj_cvs_ref(i_ctx_p, (const ref *)&arr->value.refs[i], &r); |
895 | 2.35k | ref_assign(&arr->value.refs[i], &r); |
896 | 2.35k | } |
897 | 159k | } |
898 | 32 | } |
899 | 266 | if (pstack == &o_stack && dict_find_string(systemdict, "SAFETY", &safety) > 0 && |
900 | 266 | dict_find_string(safety, "safe", &safe) > 0 && r_has_type(safe, t_boolean) && |
901 | 266 | safe->value.boolval == true) { |
902 | 0 | code = ref_stack_array_sanitize(i_ctx_p, arr, arr, 0); |
903 | 0 | if (code < 0) |
904 | 0 | return code; |
905 | 0 | } |
906 | 266 | ialloc_set_space(idmemory, save_space); |
907 | 266 | return code; |
908 | 266 | } |
909 | | |
910 | | /* Get the name corresponding to an error number. */ |
911 | | int |
912 | | gs_errorname(i_ctx_t *i_ctx_p, int code, ref * perror_name) |
913 | 110M | { |
914 | 110M | ref *perrordict, *pErrorNames; |
915 | | |
916 | 110M | if (dict_find_string(systemdict, "errordict", &perrordict) <= 0 || |
917 | 110M | dict_find_string(systemdict, "ErrorNames", &pErrorNames) <= 0 |
918 | 110M | ) |
919 | 0 | return_error(gs_error_undefined); /* errordict or ErrorNames not found?! */ |
920 | 110M | return array_get(imemory, pErrorNames, (long)(-code - 1), perror_name); |
921 | 110M | } |
922 | | |
923 | | /* Store an error string in $error.errorinfo. */ |
924 | | /* This routine is here because of the proximity to the error handler. */ |
925 | | int |
926 | | gs_errorinfo_put_string(i_ctx_t *i_ctx_p, const char *str) |
927 | 0 | { |
928 | 0 | ref rstr; |
929 | 0 | ref *pderror; |
930 | 0 | int code = string_to_ref(str, &rstr, iimemory, "gs_errorinfo_put_string"); |
931 | |
|
932 | 0 | if (code < 0) |
933 | 0 | return code; |
934 | 0 | if (dict_find_string(systemdict, "$error", &pderror) <= 0 || |
935 | 0 | !r_has_type(pderror, t_dictionary) || |
936 | 0 | idict_put_string(pderror, "errorinfo", &rstr) < 0 |
937 | 0 | ) |
938 | 0 | return_error(gs_error_Fatal); |
939 | 0 | return 0; |
940 | 0 | } |
941 | | |
942 | | /* Main interpreter. */ |
943 | | /* If execution terminates normally, return gs_error_InterpreterExit. */ |
944 | | /* If an error occurs, leave the current object in *perror_object */ |
945 | | /* and return a (negative) error code. */ |
946 | | static int |
947 | | interp(/* lgtm [cpp/use-of-goto] */ |
948 | | i_ctx_t **pi_ctx_p /* context for execution, updated if resched */, |
949 | | const ref * pref /* object to interpret */, |
950 | | ref * perror_object) |
951 | 118M | { |
952 | 118M | i_ctx_t *i_ctx_p = *pi_ctx_p; |
953 | | /* |
954 | | * Note that iref may actually be either a ref * or a ref_packed *. |
955 | | * Certain DEC compilers assume that a ref * is ref-aligned even if it |
956 | | * is cast to a short *, and generate code on this assumption, leading |
957 | | * to "unaligned access" errors. For this reason, we declare |
958 | | * iref_packed, and use a macro to cast it to the more aligned type |
959 | | * where necessary (which is almost everywhere it is used). This may |
960 | | * lead to compiler warnings about "cast increases alignment |
961 | | * requirements", but this is less harmful than expensive traps at run |
962 | | * time. |
963 | | */ |
964 | 118M | register const ref_packed *iref_packed = (const ref_packed *)pref; |
965 | | /* |
966 | | * To make matters worse, some versions of gcc/egcs have a bug that |
967 | | * leads them to assume that if iref_packed is EVER cast to a ref *, |
968 | | * it is ALWAYS ref-aligned. We detect this in stdpre.h and provide |
969 | | * the following workaround: |
970 | | */ |
971 | | #ifdef ALIGNMENT_ALIASING_BUG |
972 | | const ref *iref_temp; |
973 | | # define IREF (iref_temp = (const ref *)iref_packed, iref_temp) |
974 | | #else |
975 | 15.0G | # define IREF ((const ref *)iref_packed) |
976 | 118M | #endif |
977 | 41.8G | #define SET_IREF(rp) (iref_packed = (const ref_packed *)(rp)) |
978 | 118M | register int icount = 0; /* # of consecutive tokens at iref */ |
979 | 118M | register os_ptr iosp = osp; /* private copy of osp */ |
980 | 118M | register es_ptr iesp = esp; /* private copy of esp */ |
981 | 118M | int code; |
982 | 118M | ref token; /* token read from file or string, */ |
983 | | /* must be declared in this scope */ |
984 | 118M | ref *pvalue; |
985 | 118M | ref refnull; |
986 | 118M | uint opindex; /* needed for oparrays */ |
987 | 118M | os_ptr whichp; |
988 | | |
989 | | /* |
990 | | * We have to make the error information into a struct; |
991 | | * otherwise, the Watcom compiler will assign it to registers |
992 | | * strictly on the basis of textual frequency. |
993 | | * We also have to use ref_assign_inline everywhere, and |
994 | | * avoid direct assignments of refs, so that esi and edi |
995 | | * will remain available on Intel processors. |
996 | | */ |
997 | 118M | struct interp_error_s { |
998 | 118M | int code; |
999 | 118M | int line; |
1000 | 118M | const ref *obj; |
1001 | 118M | ref full; |
1002 | 118M | } ierror; |
1003 | | |
1004 | | /* |
1005 | | * Get a pointer to the name table so that we can use the |
1006 | | * inline version of name_index_ref. |
1007 | | */ |
1008 | 118M | const name_table *const int_nt = imemory->gs_lib_ctx->gs_name_table; |
1009 | | |
1010 | 118M | #define set_error(ecode)\ |
1011 | 118M | { ierror.code = ecode; ierror.line = __LINE__; } |
1012 | 118M | #define return_with_error(ecode, objp)\ |
1013 | 118M | { set_error(ecode); ierror.obj = objp; goto rwe; } |
1014 | 118M | #define return_with_error_iref(ecode)\ |
1015 | 118M | { set_error(ecode); goto rwei; } |
1016 | 118M | #define return_with_code_iref()\ |
1017 | 118M | { ierror.line = __LINE__; goto rweci; } |
1018 | 118M | #define return_with_stackoverflow(objp)\ |
1019 | 118M | { o_stack.requested = 1; return_with_error(gs_error_stackoverflow, objp); } |
1020 | 118M | #define return_with_stackoverflow_iref()\ |
1021 | 118M | { o_stack.requested = 1; return_with_error_iref(gs_error_stackoverflow); } |
1022 | | /* |
1023 | | * If control reaches the special operators (x_add, etc.) as a result of |
1024 | | * interpreting an executable name, iref points to the name, not the |
1025 | | * operator, so the name rather than the operator becomes the error object, |
1026 | | * which is wrong. We detect and handle this case explicitly when an error |
1027 | | * occurs, so as not to slow down the non-error case. |
1028 | | */ |
1029 | 118M | #define return_with_error_tx_op(err_code)\ |
1030 | 118M | { if (r_has_type(IREF, t_name)) {\ |
1031 | 345 | return_with_error(err_code, pvalue);\ |
1032 | 8.68k | } else {\ |
1033 | 8.68k | return_with_error_iref(err_code);\ |
1034 | 0 | }\ |
1035 | 9.02k | } |
1036 | | |
1037 | 118M | int *ticks_left = &imemory_system->gs_lib_ctx->gcsignal; |
1038 | | |
1039 | 118M | #if defined(DEBUG_TRACE_PS_OPERATORS) || defined(DEBUG) |
1040 | 118M | int (*call_operator_fn)(op_proc_t, i_ctx_t *) = do_call_operator; |
1041 | | |
1042 | 118M | if (gs_debug_c('!')) |
1043 | 0 | call_operator_fn = do_call_operator_verbose; |
1044 | 118M | #endif |
1045 | | |
1046 | 118M | *ticks_left = i_ctx_p->time_slice_ticks; |
1047 | | |
1048 | 118M | make_null(&ierror.full); |
1049 | 118M | ierror.obj = &ierror.full; |
1050 | 118M | make_null(&refnull); |
1051 | 118M | refnull.value.intval = 0; |
1052 | 118M | pvalue = &refnull; |
1053 | | |
1054 | | /* |
1055 | | * If we exceed the VMThreshold, set *ticks_left to -100 |
1056 | | * to alert the interpreter that we need to garbage collect. |
1057 | | */ |
1058 | 118M | set_gc_signal(i_ctx_p, -100); |
1059 | | |
1060 | 118M | esfile_clear_cache(); |
1061 | | /* |
1062 | | * From here on, if icount > 0, iref and icount correspond |
1063 | | * to the top entry on the execution stack: icount is the count |
1064 | | * of sequential entries remaining AFTER the current one. |
1065 | | */ |
1066 | 118M | #define IREF_NEXT(ip)\ |
1067 | 25.8G | ((const ref_packed *)((const ref *)(ip) + 1)) |
1068 | 118M | #define IREF_NEXT_EITHER(ip)\ |
1069 | 19.2G | ( r_is_packed(ip) ? (ip) + 1 : IREF_NEXT(ip) ) |
1070 | 118M | #define store_state(ep)\ |
1071 | 7.74G | ( icount > 0 ? (ep->value.const_refs = IREF + 1, r_set_size(ep, icount)) : 0 ) |
1072 | 118M | #define store_state_short(ep)\ |
1073 | 10.9G | ( icount > 0 ? (ep->value.packed = iref_packed + 1, r_set_size(ep, icount)) : 0 ) |
1074 | 118M | #define store_state_either(ep)\ |
1075 | 4.44G | ( icount > 0 ? (ep->value.packed = IREF_NEXT_EITHER(iref_packed), r_set_size(ep, icount)) : 0 ) |
1076 | 118M | #define next()\ |
1077 | 28.9G | if ( --icount > 0 ) { iref_packed = IREF_NEXT(iref_packed); goto top; } else goto out |
1078 | 118M | #define next_short()\ |
1079 | 28.2G | if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\ |
1080 | 28.2G | ++iref_packed; goto top |
1081 | 118M | #define next_either()\ |
1082 | 22.8G | if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\ |
1083 | 22.8G | iref_packed = IREF_NEXT_EITHER(iref_packed); goto top |
1084 | | |
1085 | | #if !PACKED_SPECIAL_OPS |
1086 | | # undef next_either |
1087 | | # define next_either() next() |
1088 | | # undef store_state_either |
1089 | | # define store_state_either(ep) store_state(ep) |
1090 | | #endif |
1091 | | |
1092 | | /* We want to recognize executable arrays here, */ |
1093 | | /* so we push the argument on the estack and enter */ |
1094 | | /* the loop at the bottom. */ |
1095 | 118M | if (iesp >= estop) |
1096 | 118M | return_with_error(gs_error_execstackoverflow, pref); |
1097 | 118M | ++iesp; |
1098 | 118M | ref_assign_inline(iesp, pref); |
1099 | 118M | goto bot; |
1100 | 106G | top: |
1101 | | /* |
1102 | | * This is the top of the interpreter loop. |
1103 | | * iref points to the ref being interpreted. |
1104 | | * Note that this might be an element of a packed array, |
1105 | | * not a real ref: we carefully arranged the first 16 bits of |
1106 | | * a ref and of a packed array element so they could be distinguished |
1107 | | * from each other. (See ghost.h and packed.h for more detail.) |
1108 | | */ |
1109 | 106G | INCR(top); |
1110 | | #ifdef DEBUG |
1111 | | /* Do a little validation on the top o-stack entry. */ |
1112 | | if (iosp >= osbot && |
1113 | | (r_type(iosp) == t__invalid || r_type(iosp) >= tx_next_op) |
1114 | | ) { |
1115 | | mlprintf(imemory, "Invalid value on o-stack!\n"); |
1116 | | return_with_error_iref(gs_error_Fatal); |
1117 | | } |
1118 | | if (gs_debug['I'] || |
1119 | | (gs_debug['i'] && |
1120 | | (r_is_packed(iref_packed) ? |
1121 | | r_packed_is_name(iref_packed) : |
1122 | | r_has_type(IREF, t_name))) |
1123 | | ) { |
1124 | | os_ptr save_osp = osp; /* avoid side-effects */ |
1125 | | es_ptr save_esp = esp; |
1126 | | |
1127 | | osp = iosp; |
1128 | | esp = iesp; |
1129 | | dmlprintf5(imemory, "d%u,e%u<%u>"PRI_INTPTR"(%d): ", |
1130 | | ref_stack_count(&d_stack), ref_stack_count(&e_stack), |
1131 | | ref_stack_count(&o_stack), (intptr_t)IREF, icount); |
1132 | | debug_print_ref(imemory, IREF); |
1133 | | if (iosp >= osbot) { |
1134 | | dmputs(imemory, " // "); |
1135 | | debug_print_ref(imemory, iosp); |
1136 | | } |
1137 | | dmputc(imemory, '\n'); |
1138 | | osp = save_osp; |
1139 | | esp = save_esp; |
1140 | | dmflush(imemory); |
1141 | | } |
1142 | | #endif |
1143 | | /* Objects that have attributes (arrays, dictionaries, files, and strings) */ |
1144 | | /* use lit and exec; other objects use plain and plain_exec. */ |
1145 | 106G | #define lit(t) type_xe_value(t, a_execute) |
1146 | 106G | #define exec(t) type_xe_value(t, a_execute + a_executable) |
1147 | 106G | #define nox(t) type_xe_value(t, 0) |
1148 | 106G | #define nox_exec(t) type_xe_value(t, a_executable) |
1149 | 106G | #define plain(t) type_xe_value(t, 0) |
1150 | 106G | #define plain_exec(t) type_xe_value(t, a_executable) |
1151 | | /* |
1152 | | * We have to populate enough cases of the switch statement to force |
1153 | | * some compilers to use a dispatch rather than a testing loop. |
1154 | | * What a nuisance! |
1155 | | */ |
1156 | 106G | switch (r_type_xe(iref_packed)) { |
1157 | | /* Access errors. */ |
1158 | 0 | #define cases_invalid()\ |
1159 | 0 | case plain(t__invalid): case plain_exec(t__invalid) |
1160 | 0 | cases_invalid(): |
1161 | 0 | return_with_error_iref(gs_error_Fatal); |
1162 | 0 | #define cases_nox()\ |
1163 | 0 | case nox_exec(t_array): case nox_exec(t_dictionary):\ |
1164 | 0 | case nox_exec(t_file): case nox_exec(t_string):\ |
1165 | 0 | case nox_exec(t_mixedarray): case nox_exec(t_shortarray) |
1166 | 0 | cases_nox(): |
1167 | 0 | return_with_error_iref(gs_error_invalidaccess); |
1168 | | /* |
1169 | | * Literal objects. We have to enumerate all the types. |
1170 | | * In fact, we have to include some extra plain_exec entries |
1171 | | * just to populate the switch. We break them up into groups |
1172 | | * to avoid overflowing some preprocessors. |
1173 | | */ |
1174 | 0 | #define cases_lit_1()\ |
1175 | 266M | case lit(t_array): case nox(t_array):\ |
1176 | 1.35G | case plain(t_boolean): case plain_exec(t_boolean):\ |
1177 | 2.02G | case lit(t_dictionary): case nox(t_dictionary) |
1178 | 0 | #define cases_lit_2()\ |
1179 | 2.02G | case lit(t_file): case nox(t_file):\ |
1180 | 2.02G | case plain(t_fontID): case plain_exec(t_fontID):\ |
1181 | 3.27G | case plain(t_integer): case plain_exec(t_integer):\ |
1182 | 3.27G | case plain(t_mark): case plain_exec(t_mark) |
1183 | 0 | #define cases_lit_3()\ |
1184 | 5.13G | case plain(t_name):\ |
1185 | 5.78G | case plain(t_null):\ |
1186 | 5.78G | case plain(t_oparray):\ |
1187 | 5.78G | case plain(t_operator) |
1188 | 0 | #define cases_lit_4()\ |
1189 | 5.96G | case plain(t_real): case plain_exec(t_real):\ |
1190 | 5.96G | case plain(t_save): case plain_exec(t_save):\ |
1191 | 6.33G | case lit(t_string): case nox(t_string) |
1192 | 0 | #define cases_lit_5()\ |
1193 | 8.23G | case lit(t_mixedarray): case nox(t_mixedarray):\ |
1194 | 8.23G | case lit(t_shortarray): case nox(t_shortarray):\ |
1195 | 8.23G | case plain(t_device): case plain_exec(t_device):\ |
1196 | 8.23G | case plain(t_struct): case plain_exec(t_struct):\ |
1197 | 8.23G | case plain(t_astruct): case plain_exec(t_astruct):\ |
1198 | 8.23G | case plain(t_pdfctx): case plain_exec(t_pdfctx) |
1199 | | /* Executable arrays are treated as literals in direct execution. */ |
1200 | 0 | #define cases_lit_array()\ |
1201 | 12.4G | case exec(t_array): case exec(t_mixedarray): case exec(t_shortarray) |
1202 | 6.42G | cases_lit_1(): |
1203 | 18.0G | cases_lit_2(): |
1204 | 20.7G | cases_lit_3(): |
1205 | 33.8G | cases_lit_4(): |
1206 | 33.8G | cases_lit_5(): |
1207 | 7.77G | INCR(lit); |
1208 | 7.77G | break; |
1209 | 12.4G | cases_lit_array(): |
1210 | 12.4G | INCR(lit_array); |
1211 | 12.4G | break; |
1212 | | /* Special operators. */ |
1213 | 124M | case plain_exec(tx_op_add): |
1214 | 584M | x_add: INCR(x_add); |
1215 | 584M | osp = iosp; /* sync o_stack */ |
1216 | 584M | if ((code = zop_add(i_ctx_p)) < 0) |
1217 | 584M | return_with_error_tx_op(code); |
1218 | 584M | iosp--; |
1219 | 584M | next_either(); |
1220 | 32.5M | case plain_exec(tx_op_def): |
1221 | 334M | x_def: INCR(x_def); |
1222 | 334M | osp = iosp; /* sync o_stack */ |
1223 | 334M | if ((code = zop_def(i_ctx_p)) < 0) |
1224 | 334M | return_with_error_tx_op(code); |
1225 | 334M | iosp -= 2; |
1226 | 334M | next_either(); |
1227 | 515M | case plain_exec(tx_op_dup): |
1228 | 5.68G | x_dup: INCR(x_dup); |
1229 | 5.68G | if (iosp < osbot) |
1230 | 5.68G | return_with_error_tx_op(gs_error_stackunderflow); |
1231 | 5.68G | if (iosp >= ostop) { |
1232 | 707 | o_stack.requested = 1; |
1233 | 707 | return_with_error_tx_op(gs_error_stackoverflow); |
1234 | 0 | } |
1235 | 5.68G | iosp++; |
1236 | 5.68G | ref_assign_inline(iosp, iosp - 1); |
1237 | 5.68G | next_either(); |
1238 | 908M | case plain_exec(tx_op_exch): |
1239 | 3.87G | x_exch: INCR(x_exch); |
1240 | 3.87G | if (iosp <= osbot) |
1241 | 3.87G | return_with_error_tx_op(gs_error_stackunderflow); |
1242 | 3.87G | ref_assign_inline(&token, iosp); |
1243 | 3.87G | ref_assign_inline(iosp, iosp - 1); |
1244 | 3.87G | ref_assign_inline(iosp - 1, &token); |
1245 | 3.87G | next_either(); |
1246 | 882M | case plain_exec(tx_op_if): |
1247 | 2.89G | x_if: INCR(x_if); |
1248 | 2.89G | if (!r_is_proc(iosp)) |
1249 | 2.89G | return_with_error_tx_op(check_proc_failed(iosp)); |
1250 | 2.89G | if (!r_has_type(iosp - 1, t_boolean)) |
1251 | 9 | return_with_error_tx_op((iosp <= osbot ? |
1252 | 2.89G | gs_error_stackunderflow : gs_error_typecheck)); |
1253 | 2.89G | if (!iosp[-1].value.boolval) { |
1254 | 2.30G | iosp -= 2; |
1255 | 2.30G | next_either(); |
1256 | 2.30G | } |
1257 | 585M | if (iesp >= estop) |
1258 | 585M | return_with_error_tx_op(gs_error_execstackoverflow); |
1259 | 585M | store_state_either(iesp); |
1260 | 585M | whichp = iosp; |
1261 | 585M | iosp -= 2; |
1262 | 585M | goto ifup; |
1263 | 875M | case plain_exec(tx_op_ifelse): |
1264 | 3.85G | x_ifelse: INCR(x_ifelse); |
1265 | 3.85G | if (!r_is_proc(iosp)) |
1266 | 3.85G | return_with_error_tx_op(check_proc_failed(iosp)); |
1267 | 3.85G | if (!r_is_proc(iosp - 1)) |
1268 | 3.85G | return_with_error_tx_op(check_proc_failed(iosp - 1)); |
1269 | 3.85G | if (!r_has_type(iosp - 2, t_boolean)) |
1270 | 22 | return_with_error_tx_op((iosp < osbot + 2 ? |
1271 | 3.85G | gs_error_stackunderflow : gs_error_typecheck)); |
1272 | 3.85G | if (iesp >= estop) |
1273 | 3.85G | return_with_error_tx_op(gs_error_execstackoverflow); |
1274 | 3.85G | store_state_either(iesp); |
1275 | 3.85G | whichp = (iosp[-2].value.boolval ? iosp - 1 : iosp); |
1276 | 3.85G | iosp -= 3; |
1277 | | /* Open code "up" for the array case(s) */ |
1278 | 4.44G | ifup:if ((icount = r_size(whichp) - 1) <= 0) { |
1279 | 1.26G | if (icount < 0) |
1280 | 0 | goto up; /* 0-element proc */ |
1281 | 1.26G | SET_IREF(whichp->value.refs); /* 1-element proc */ |
1282 | 1.26G | if (--(*ticks_left) > 0) |
1283 | 1.26G | goto top; |
1284 | 1.26G | } |
1285 | 3.17G | ++iesp; |
1286 | | /* Do a ref_assign, but also set iref. */ |
1287 | 3.17G | iesp->tas = whichp->tas; |
1288 | 3.17G | SET_IREF(iesp->value.refs = whichp->value.refs); |
1289 | 3.17G | if (--(*ticks_left) > 0) |
1290 | 3.17G | goto top; |
1291 | 301k | goto slice; |
1292 | 129M | case plain_exec(tx_op_index): |
1293 | 2.90G | x_index: INCR(x_index); |
1294 | 2.90G | osp = iosp; /* zindex references o_stack */ |
1295 | 2.90G | if ((code = zindex(i_ctx_p)) < 0) |
1296 | 2.90G | return_with_error_tx_op(code); |
1297 | 2.90G | next_either(); |
1298 | 1.30G | case plain_exec(tx_op_pop): |
1299 | 5.16G | x_pop: INCR(x_pop); |
1300 | 5.16G | if (iosp < osbot) |
1301 | 5.16G | return_with_error_tx_op(gs_error_stackunderflow); |
1302 | 5.16G | iosp--; |
1303 | 5.16G | next_either(); |
1304 | 434M | case plain_exec(tx_op_roll): |
1305 | 1.92G | x_roll: INCR(x_roll); |
1306 | 1.92G | osp = iosp; /* zroll references o_stack */ |
1307 | 1.92G | if ((code = zroll(i_ctx_p)) < 0) |
1308 | 1.92G | return_with_error_tx_op(code); |
1309 | 1.92G | iosp -= 2; |
1310 | 1.92G | next_either(); |
1311 | 53.9M | case plain_exec(tx_op_sub): |
1312 | 106M | x_sub: INCR(x_sub); |
1313 | 106M | osp = iosp; /* sync o_stack */ |
1314 | 106M | if ((code = zop_sub(i_ctx_p)) < 0) |
1315 | 106M | return_with_error_tx_op(code); |
1316 | 106M | iosp--; |
1317 | 106M | next_either(); |
1318 | | /* Executable types. */ |
1319 | 4.65M | case plain_exec(t_null): |
1320 | 4.65M | goto bot; |
1321 | 653M | case plain_exec(t_oparray): |
1322 | | /* Replace with the definition and go again. */ |
1323 | 653M | INCR(exec_array); |
1324 | 653M | opindex = op_index(IREF); |
1325 | 653M | pvalue = (ref *)IREF->value.const_refs; |
1326 | 1.45G | opst: /* Prepare to call a t_oparray procedure in *pvalue. */ |
1327 | 1.45G | store_state(iesp); |
1328 | 3.41G | oppr: /* Record the stack depths in case of failure. */ |
1329 | 3.41G | if (iesp >= estop - 4) |
1330 | 3.41G | return_with_error_iref(gs_error_execstackoverflow); |
1331 | 3.41G | iesp += 5; |
1332 | 3.41G | osp = iosp; /* ref_stack_count_inline needs this */ |
1333 | 3.41G | make_mark_estack(iesp - 4, es_other, oparray_cleanup); |
1334 | 3.41G | make_int(iesp - 3, opindex); /* for .errorexec effect */ |
1335 | 3.41G | make_int(iesp - 2, ref_stack_count_inline(&o_stack)); |
1336 | 3.41G | make_int(iesp - 1, ref_stack_count_inline(&d_stack)); |
1337 | 3.41G | make_op_estack(iesp, oparray_pop); |
1338 | 3.41G | goto pr; |
1339 | 454M | prst: /* Prepare to call the procedure (array) in *pvalue. */ |
1340 | 454M | store_state(iesp); |
1341 | 4.27G | pr: /* Call the array in *pvalue. State has been stored. */ |
1342 | | /* We want to do this check before assigning icount so icount is correct |
1343 | | * in the event of a gs_error_execstackoverflow |
1344 | | */ |
1345 | 4.27G | if (iesp >= estop) { |
1346 | 0 | return_with_error_iref(gs_error_execstackoverflow); |
1347 | 0 | } |
1348 | 4.27G | if ((icount = r_size(pvalue) - 1) <= 0) { |
1349 | 57.3M | if (icount < 0) |
1350 | 314k | goto up; /* 0-element proc */ |
1351 | 57.0M | SET_IREF(pvalue->value.refs); /* 1-element proc */ |
1352 | 57.0M | if (--(*ticks_left) > 0) |
1353 | 57.0M | goto top; |
1354 | 57.0M | } |
1355 | 4.21G | ++iesp; |
1356 | | /* Do a ref_assign, but also set iref. */ |
1357 | 4.21G | iesp->tas = pvalue->tas; |
1358 | 4.21G | SET_IREF(iesp->value.refs = pvalue->value.refs); |
1359 | 4.21G | if (--(*ticks_left) > 0) |
1360 | 4.21G | goto top; |
1361 | 49.1k | goto slice; |
1362 | 14.3G | case plain_exec(t_operator): |
1363 | 14.3G | INCR(exec_operator); |
1364 | 14.3G | if (--(*ticks_left) <= 0) { /* The following doesn't work, */ |
1365 | | /* and I can't figure out why. */ |
1366 | | /****** goto sst; ******/ |
1367 | 284k | } |
1368 | 14.3G | esp = iesp; /* save for operator */ |
1369 | 14.3G | osp = iosp; /* ditto */ |
1370 | | /* Operator routines take osp as an argument. */ |
1371 | | /* This is just a convenience, since they adjust */ |
1372 | | /* osp themselves to reflect the results. */ |
1373 | | /* Operators that (net) push information on the */ |
1374 | | /* operand stack must check for overflow: */ |
1375 | | /* this normally happens automatically through */ |
1376 | | /* the push macro (in oper.h). */ |
1377 | | /* Operators that do not typecheck their operands, */ |
1378 | | /* or take a variable number of arguments, */ |
1379 | | /* must check explicitly for stack underflow. */ |
1380 | | /* (See oper.h for more detail.) */ |
1381 | | /* Note that each case must set iosp = osp: */ |
1382 | | /* this is so we can switch on code without having to */ |
1383 | | /* store it and reload it (for dumb compilers). */ |
1384 | 14.3G | switch (code = call_operator(real_opproc(IREF), i_ctx_p)) { |
1385 | 6.87G | case 0: /* normal case */ |
1386 | 6.90G | case 1: /* alternative success case */ |
1387 | 6.90G | iosp = osp; |
1388 | 6.90G | next(); |
1389 | 3.69G | case o_push_estack: /* store the state and go to up */ |
1390 | 3.69G | store_state(iesp); |
1391 | 5.29G | opush:iosp = osp; |
1392 | 5.29G | iesp = esp; |
1393 | 5.29G | if (--(*ticks_left) > 0) |
1394 | 5.29G | goto up; |
1395 | 67.4k | goto slice; |
1396 | 3.70G | case o_pop_estack: /* just go to up */ |
1397 | 3.70G | opop:iosp = osp; |
1398 | 3.70G | if (esp == iesp) |
1399 | 3.81M | goto bot; |
1400 | 3.70G | iesp = esp; |
1401 | 3.70G | goto up; |
1402 | 0 | case gs_error_Remap_Color: |
1403 | 0 | oe_remap: store_state(iesp); |
1404 | 0 | remap: if (iesp + 2 >= estop) { |
1405 | 0 | esp = iesp; |
1406 | 0 | code = ref_stack_extend(&e_stack, 2); |
1407 | 0 | if (code < 0) |
1408 | 0 | return_with_error_iref(code); |
1409 | 0 | iesp = esp; |
1410 | 0 | } |
1411 | 0 | packed_get(imemory, iref_packed, iesp + 1); |
1412 | 0 | make_oper(iesp + 2, 0, |
1413 | 0 | r_ptr(&istate->remap_color_info, |
1414 | 0 | int_remap_color_info_t)->proc); |
1415 | 0 | iesp += 2; |
1416 | 0 | goto up; |
1417 | 14.3G | } |
1418 | 5.40M | iosp = osp; |
1419 | 5.40M | iesp = esp; |
1420 | 5.40M | return_with_code_iref(); |
1421 | 3.30G | case plain_exec(t_name): |
1422 | 3.30G | INCR(exec_name); |
1423 | 3.30G | pvalue = IREF->value.pname->pvalue; |
1424 | 3.30G | if (!pv_valid(pvalue)) { |
1425 | 3.07G | uint nidx = names_index(int_nt, IREF); |
1426 | 3.07G | uint htemp = 0; |
1427 | | |
1428 | 3.07G | INCR(find_name); |
1429 | 3.07G | if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0) |
1430 | 3.07G | return_with_error_iref(gs_error_undefined); |
1431 | 3.07G | } |
1432 | | /* Dispatch on the type of the value. */ |
1433 | | /* Again, we have to over-populate the switch. */ |
1434 | 3.30G | switch (r_type_xe(pvalue)) { |
1435 | 0 | cases_invalid(): |
1436 | 0 | return_with_error_iref(gs_error_Fatal); |
1437 | 0 | cases_nox(): /* access errors */ |
1438 | 0 | return_with_error_iref(gs_error_invalidaccess); |
1439 | 858M | cases_lit_1(): |
1440 | 3.12G | cases_lit_2(): |
1441 | 3.12G | cases_lit_3(): |
1442 | 2.72G | cases_lit_4(): |
1443 | 5.46G | cases_lit_5(): |
1444 | 5.46G | INCR(name_lit); |
1445 | | /* Just push the value */ |
1446 | 5.46G | if (iosp >= ostop) |
1447 | 455M | return_with_stackoverflow(pvalue); |
1448 | 455M | ++iosp; |
1449 | 455M | ref_assign_inline(iosp, pvalue); |
1450 | 455M | next(); |
1451 | 60.2M | case exec(t_array): |
1452 | 398M | case exec(t_mixedarray): |
1453 | 454M | case exec(t_shortarray): |
1454 | 454M | INCR(name_proc); |
1455 | | /* This is an executable procedure, execute it. */ |
1456 | 454M | goto prst; |
1457 | 8.42M | case plain_exec(tx_op_add): |
1458 | 8.42M | goto x_add; |
1459 | 185M | case plain_exec(tx_op_def): |
1460 | 185M | goto x_def; |
1461 | 25.4M | case plain_exec(tx_op_dup): |
1462 | 25.4M | goto x_dup; |
1463 | 42.0M | case plain_exec(tx_op_exch): |
1464 | 42.0M | goto x_exch; |
1465 | 22.8M | case plain_exec(tx_op_if): |
1466 | 22.8M | goto x_if; |
1467 | 10.2M | case plain_exec(tx_op_ifelse): |
1468 | 10.2M | goto x_ifelse; |
1469 | 8.31M | case plain_exec(tx_op_index): |
1470 | 8.31M | goto x_index; |
1471 | 35.9M | case plain_exec(tx_op_pop): |
1472 | 35.9M | goto x_pop; |
1473 | 6.39M | case plain_exec(tx_op_roll): |
1474 | 6.39M | goto x_roll; |
1475 | 3.36M | case plain_exec(tx_op_sub): |
1476 | 3.36M | goto x_sub; |
1477 | 0 | case plain_exec(t_null): |
1478 | 0 | goto bot; |
1479 | 798M | case plain_exec(t_oparray): |
1480 | 798M | INCR(name_oparray); |
1481 | 798M | opindex = op_index(pvalue); |
1482 | 798M | pvalue = (ref *)pvalue->value.const_refs; |
1483 | 798M | goto opst; |
1484 | 1.24G | case plain_exec(t_operator): |
1485 | 1.24G | INCR(name_operator); |
1486 | 1.24G | { /* Shortcut for operators. */ |
1487 | | /* See above for the logic. */ |
1488 | 1.24G | if (--(*ticks_left) <= 0) { /* The following doesn't work, */ |
1489 | | /* and I can't figure out why. */ |
1490 | | /****** goto sst; ******/ |
1491 | 18.8k | } |
1492 | 1.24G | esp = iesp; |
1493 | 1.24G | osp = iosp; |
1494 | 1.24G | switch (code = call_operator(real_opproc(pvalue), |
1495 | 1.24G | i_ctx_p) |
1496 | 1.24G | ) { |
1497 | 1.19G | case 0: /* normal case */ |
1498 | 1.20G | case 1: /* alternative success case */ |
1499 | 1.20G | iosp = osp; |
1500 | 1.20G | next(); |
1501 | 33.8M | case o_push_estack: |
1502 | 33.8M | store_state(iesp); |
1503 | 33.8M | goto opush; |
1504 | 2.46M | case o_pop_estack: |
1505 | 2.46M | goto opop; |
1506 | 0 | case gs_error_Remap_Color: |
1507 | 0 | goto oe_remap; |
1508 | 1.24G | } |
1509 | 168k | iosp = osp; |
1510 | 168k | iesp = esp; |
1511 | 168k | return_with_error(code, pvalue); |
1512 | 0 | } |
1513 | 0 | case plain_exec(t_name): |
1514 | 0 | case exec(t_file): |
1515 | 0 | case exec(t_string): |
1516 | 770k | default: |
1517 | | /* Not a procedure, reinterpret it. */ |
1518 | 770k | store_state(iesp); |
1519 | 770k | icount = 0; |
1520 | 770k | SET_IREF(pvalue); |
1521 | 770k | goto top; |
1522 | 3.30G | } |
1523 | 2.09G | case exec(t_file): |
1524 | 2.09G | { /* Executable file. Read the next token and interpret it. */ |
1525 | 2.09G | stream *s; |
1526 | 2.09G | scanner_state sstate; |
1527 | | |
1528 | 2.09G | check_read_known_file(i_ctx_p, s, IREF, return_with_error_iref); |
1529 | 5.93G | rt: |
1530 | 5.93G | if (iosp >= ostop) /* check early */ |
1531 | 5.93G | return_with_stackoverflow_iref(); |
1532 | 5.93G | osp = iosp; /* gs_scan_token uses ostack */ |
1533 | 5.93G | gs_scanner_init_options(&sstate, IREF, i_ctx_p->scanner_options); |
1534 | 5.93G | again: |
1535 | 5.93G | code = gs_scan_token(i_ctx_p, &token, &sstate); |
1536 | 5.93G | iosp = osp; /* ditto */ |
1537 | 5.93G | switch (code) { |
1538 | 5.93G | case 0: /* read a token */ |
1539 | | /* It's worth checking for literals, which make up */ |
1540 | | /* the majority of input tokens, before storing the */ |
1541 | | /* state on the e-stack. Note that because of //, */ |
1542 | | /* the token may have *any* type and attributes. */ |
1543 | | /* Note also that executable arrays aren't executed */ |
1544 | | /* at the top level -- they're treated as literals. */ |
1545 | 5.93G | if (!r_has_attr(&token, a_executable) || |
1546 | 5.93G | r_is_array(&token) |
1547 | 5.93G | ) { /* If gs_scan_token used the o-stack, */ |
1548 | | /* we know we can do a push now; if not, */ |
1549 | | /* the pre-check is still valid. */ |
1550 | 3.83G | iosp++; |
1551 | 3.83G | ref_assign_inline(iosp, &token); |
1552 | | /* With a construct like /f currentfile def //f we can |
1553 | | end up here with IREF == &token which can go badly wrong, |
1554 | | so find the current file we're interpeting on the estack |
1555 | | and have IREF point to that ref, rather than "token" |
1556 | | */ |
1557 | 3.83G | if (IREF == &token) { |
1558 | 0 | ref *st; |
1559 | 0 | int code2 = z_current_file(i_ctx_p, &st); |
1560 | 0 | if (code2 < 0 || st == NULL) { |
1561 | 0 | ierror.code = gs_error_Fatal; |
1562 | 0 | goto rweci; |
1563 | 0 | } |
1564 | 0 | SET_IREF(st); |
1565 | 0 | } |
1566 | 3.83G | goto rt; |
1567 | 3.83G | } |
1568 | 2.09G | store_state(iesp); |
1569 | | /* Push the file on the e-stack */ |
1570 | 2.09G | if (iesp >= estop) |
1571 | 2.09G | return_with_error_iref(gs_error_execstackoverflow); |
1572 | 2.09G | esfile_set_cache(++iesp); |
1573 | 2.09G | ref_assign_inline(iesp, IREF); |
1574 | 2.09G | SET_IREF(&token); |
1575 | 2.09G | icount = 0; |
1576 | 2.09G | goto top; |
1577 | 307 | case gs_error_undefined: /* //name undefined */ |
1578 | 307 | gs_scanner_error_object(i_ctx_p, &sstate, &token); |
1579 | 307 | return_with_error(code, &token); |
1580 | 1.32M | case scan_EOF: /* end of file */ |
1581 | 1.32M | esfile_clear_cache(); |
1582 | 1.32M | goto bot; |
1583 | 26.3k | case scan_BOS: |
1584 | | /* Binary object sequences */ |
1585 | | /* ARE executed at the top level. */ |
1586 | 26.3k | store_state(iesp); |
1587 | | /* Push the file on the e-stack */ |
1588 | 26.3k | if (iesp >= estop) |
1589 | 26.3k | return_with_error_iref(gs_error_execstackoverflow); |
1590 | 26.3k | esfile_set_cache(++iesp); |
1591 | 26.3k | ref_assign_inline(iesp, IREF); |
1592 | 26.3k | pvalue = &token; |
1593 | 26.3k | goto pr; |
1594 | 2.25M | case scan_Refill: |
1595 | 2.25M | store_state(iesp); |
1596 | | /* iref may point into the exec stack; */ |
1597 | | /* save its referent now. */ |
1598 | 2.25M | ref_assign_inline(&token, IREF); |
1599 | | /* Push the file on the e-stack */ |
1600 | 2.25M | if (iesp >= estop) |
1601 | 2.25M | return_with_error_iref(gs_error_execstackoverflow); |
1602 | 2.25M | ++iesp; |
1603 | 2.25M | ref_assign_inline(iesp, &token); |
1604 | 2.25M | esp = iesp; |
1605 | 2.25M | osp = iosp; |
1606 | 2.25M | code = gs_scan_handle_refill(i_ctx_p, &sstate, true, |
1607 | 2.25M | ztokenexec_continue); |
1608 | 2.36M | scan_cont: |
1609 | 2.36M | iosp = osp; |
1610 | 2.36M | iesp = esp; |
1611 | 2.36M | switch (code) { |
1612 | 633k | case 0: |
1613 | 633k | iesp--; /* don't push the file */ |
1614 | 633k | goto again; /* stacks are unchanged */ |
1615 | 1.72M | case o_push_estack: |
1616 | 1.72M | esfile_clear_cache(); |
1617 | 1.72M | if (--(*ticks_left) > 0) |
1618 | 1.72M | goto up; |
1619 | 93 | goto slice; |
1620 | 2.36M | } |
1621 | | /* must be an error */ |
1622 | 804 | iesp--; /* don't push the file */ |
1623 | 804 | return_with_code_iref(); |
1624 | 0 | case scan_Comment: |
1625 | 104k | case scan_DSC_Comment: { |
1626 | | /* See scan_Refill above for comments. */ |
1627 | 104k | ref file_token; |
1628 | | |
1629 | 104k | store_state(iesp); |
1630 | 104k | ref_assign_inline(&file_token, IREF); |
1631 | 104k | if (iesp >= estop) |
1632 | 104k | return_with_error_iref(gs_error_execstackoverflow); |
1633 | 104k | ++iesp; |
1634 | 104k | ref_assign_inline(iesp, &file_token); |
1635 | 104k | esp = iesp; |
1636 | 104k | osp = iosp; |
1637 | 104k | code = ztoken_handle_comment(i_ctx_p, |
1638 | 104k | &sstate, &token, |
1639 | 104k | code, true, true, |
1640 | 104k | ztokenexec_continue); |
1641 | 104k | } |
1642 | 0 | goto scan_cont; |
1643 | 19.0k | default: /* error */ |
1644 | 19.0k | ref_assign_inline(&token, IREF); |
1645 | 19.0k | gs_scanner_error_object(i_ctx_p, &sstate, &token); |
1646 | 19.0k | return_with_error(code, &token); |
1647 | 5.93G | } |
1648 | 5.93G | } |
1649 | 974k | case exec(t_string): |
1650 | 974k | { /* Executable string. Read a token and interpret it. */ |
1651 | 974k | stream ss; |
1652 | 974k | scanner_state sstate; |
1653 | | |
1654 | 974k | s_init(&ss, NULL); |
1655 | 974k | sread_string(&ss, IREF->value.bytes, r_size(IREF)); |
1656 | 974k | gs_scanner_init_stream_options(&sstate, &ss, SCAN_FROM_STRING); |
1657 | 974k | osp = iosp; /* gs_scan_token uses ostack */ |
1658 | 974k | code = gs_scan_token(i_ctx_p, &token, &sstate); |
1659 | 974k | iosp = osp; /* ditto */ |
1660 | 974k | switch (code) { |
1661 | 974k | case 0: /* read a token */ |
1662 | 974k | case scan_BOS: /* binary object sequence */ |
1663 | 974k | store_state(iesp); |
1664 | | /* If the updated string isn't empty, push it back */ |
1665 | | /* on the e-stack. */ |
1666 | 974k | { |
1667 | | /* This is just the available buffer size, so |
1668 | | a signed int is plenty big |
1669 | | */ |
1670 | 974k | int size = sbufavailable(&ss); |
1671 | | |
1672 | 974k | if (size > 0) { |
1673 | 0 | if (iesp >= estop) |
1674 | 0 | return_with_error_iref(gs_error_execstackoverflow); |
1675 | 0 | ++iesp; |
1676 | 0 | iesp->tas.type_attrs = IREF->tas.type_attrs; |
1677 | 0 | iesp->value.const_bytes = sbufptr(&ss); |
1678 | 0 | r_set_size(iesp, size); |
1679 | 0 | } |
1680 | 974k | } |
1681 | 974k | if (code == 0) { |
1682 | 974k | SET_IREF(&token); |
1683 | 974k | icount = 0; |
1684 | 974k | goto top; |
1685 | 974k | } |
1686 | | /* Handle BOS specially */ |
1687 | 0 | pvalue = &token; |
1688 | 0 | goto pr; |
1689 | 4 | case scan_EOF: /* end of string */ |
1690 | 4 | goto bot; |
1691 | 0 | case scan_Refill: /* error */ |
1692 | 0 | code = gs_note_error(gs_error_syntaxerror); |
1693 | | /* fall through */ |
1694 | 0 | default: /* error */ |
1695 | 0 | ref_assign_inline(&token, IREF); |
1696 | 0 | gs_scanner_error_object(i_ctx_p, &sstate, &token); |
1697 | 0 | return_with_error(code, &token); |
1698 | 974k | } |
1699 | 974k | } |
1700 | | /* Handle packed arrays here by re-dispatching. */ |
1701 | | /* This also picks up some anomalous cases of non-packed arrays. */ |
1702 | 61.1G | default: |
1703 | 61.1G | { |
1704 | 61.1G | uint index; |
1705 | | |
1706 | 61.1G | switch (*iref_packed >> r_packed_type_shift) { |
1707 | 770k | case pt_full_ref: |
1708 | 770k | case pt_full_ref + 1: |
1709 | 770k | INCR(p_full); |
1710 | 770k | if (iosp >= ostop) |
1711 | 770k | return_with_stackoverflow_iref(); |
1712 | | /* We know this can't be an executable object */ |
1713 | | /* requiring special handling, so we just push it. */ |
1714 | 770k | ++iosp; |
1715 | | /* We know that refs are properly aligned: */ |
1716 | | /* see packed.h for details. */ |
1717 | 770k | ref_assign_inline(iosp, IREF); |
1718 | 770k | next(); |
1719 | 40.3G | case pt_executable_operator: |
1720 | 40.3G | index = *iref_packed & packed_value_mask; |
1721 | 40.3G | if (--(*ticks_left) <= 0) { /* The following doesn't work, */ |
1722 | | /* and I can't figure out why. */ |
1723 | | /****** goto sst_short; ******/ |
1724 | 1.72M | } |
1725 | 40.3G | if (!op_index_is_operator(index)) { |
1726 | 1.96G | INCR(p_exec_oparray); |
1727 | 1.96G | store_state_short(iesp); |
1728 | 1.96G | opindex = index; |
1729 | | /* Call the operator procedure. */ |
1730 | 1.96G | index -= op_def_count; |
1731 | 1.96G | pvalue = (ref *) |
1732 | 1.96G | (index < r_size(&i_ctx_p->op_array_table_global.table) ? |
1733 | 1.96G | i_ctx_p->op_array_table_global.table.value.const_refs + |
1734 | 1.96G | index : |
1735 | 1.96G | i_ctx_p->op_array_table_local.table.value.const_refs + |
1736 | 0 | (index - r_size(&i_ctx_p->op_array_table_global.table))); |
1737 | 1.96G | goto oppr; |
1738 | 1.96G | } |
1739 | 40.3G | INCR(p_exec_operator); |
1740 | | /* See the main plain_exec(t_operator) case */ |
1741 | | /* for details of what happens here. */ |
1742 | 38.4G | #if PACKED_SPECIAL_OPS |
1743 | | /* |
1744 | | * We arranged in iinit.c that the special ops |
1745 | | * have operator indices starting at 1. |
1746 | | * |
1747 | | * The (int) cast in the next line is required |
1748 | | * because some compilers don't allow arithmetic |
1749 | | * involving two different enumerated types. |
1750 | | */ |
1751 | 38.4G | # define case_xop(xop) case xop - (int)tx_op + 1 |
1752 | 38.4G | switch (index) { |
1753 | 451M | case_xop(tx_op_add):goto x_add; |
1754 | 116M | case_xop(tx_op_def):goto x_def; |
1755 | 5.14G | case_xop(tx_op_dup):goto x_dup; |
1756 | 2.92G | case_xop(tx_op_exch):goto x_exch; |
1757 | 1.98G | case_xop(tx_op_if):goto x_if; |
1758 | 2.97G | case_xop(tx_op_ifelse):goto x_ifelse; |
1759 | 2.77G | case_xop(tx_op_index):goto x_index; |
1760 | 3.81G | case_xop(tx_op_pop):goto x_pop; |
1761 | 1.48G | case_xop(tx_op_roll):goto x_roll; |
1762 | 49.5M | case_xop(tx_op_sub):goto x_sub; |
1763 | 0 | case 0: /* for dumb compilers */ |
1764 | 16.6G | default: |
1765 | 16.6G | ; |
1766 | 38.4G | } |
1767 | 16.6G | # undef case_xop |
1768 | 16.6G | #endif |
1769 | 38.4G | INCR(p_exec_non_x_operator); |
1770 | 16.6G | esp = iesp; |
1771 | 16.6G | osp = iosp; |
1772 | 16.6G | switch (code = call_operator(op_index_proc(index), i_ctx_p)) { |
1773 | 14.8G | case 0: |
1774 | 14.8G | case 1: |
1775 | 14.8G | iosp = osp; |
1776 | 14.8G | next_short(); |
1777 | 1.55G | case o_push_estack: |
1778 | 1.55G | store_state_short(iesp); |
1779 | 1.55G | goto opush; |
1780 | 138M | case o_pop_estack: |
1781 | 138M | iosp = osp; |
1782 | 138M | if (esp == iesp) { |
1783 | 5.63M | next_short(); |
1784 | 5.63M | } |
1785 | 132M | iesp = esp; |
1786 | 132M | goto up; |
1787 | 0 | case gs_error_Remap_Color: |
1788 | 0 | store_state_short(iesp); |
1789 | 0 | goto remap; |
1790 | 16.6G | } |
1791 | 107M | iosp = osp; |
1792 | 107M | iesp = esp; |
1793 | 107M | return_with_code_iref(); |
1794 | 10.6G | case pt_integer: |
1795 | 10.6G | INCR(p_integer); |
1796 | 10.6G | if (iosp >= ostop) |
1797 | 10.6G | return_with_stackoverflow_iref(); |
1798 | 10.6G | ++iosp; |
1799 | 10.6G | make_int(iosp, |
1800 | 10.6G | ((int)*iref_packed & packed_int_mask) + |
1801 | 10.6G | packed_min_intval); |
1802 | 10.6G | next_short(); |
1803 | 2.37G | case pt_literal_name: |
1804 | 2.37G | INCR(p_lit_name); |
1805 | 2.37G | { |
1806 | 2.37G | uint nidx = *iref_packed & packed_value_mask; |
1807 | | |
1808 | 2.37G | if (iosp >= ostop) |
1809 | 2.37G | return_with_stackoverflow_iref(); |
1810 | 2.37G | ++iosp; |
1811 | 2.37G | name_index_ref_inline(int_nt, nidx, iosp); |
1812 | 2.37G | next_short(); |
1813 | 2.37G | } |
1814 | 7.74G | case pt_executable_name: |
1815 | 7.74G | INCR(p_exec_name); |
1816 | 7.74G | { |
1817 | 7.74G | uint nidx = *iref_packed & packed_value_mask; |
1818 | | |
1819 | 7.74G | pvalue = name_index_ptr_inline(int_nt, nidx)->pvalue; |
1820 | 7.74G | if (!pv_valid(pvalue)) { |
1821 | 7.38G | uint htemp = 0; |
1822 | | |
1823 | 7.38G | INCR(p_find_name); |
1824 | 7.38G | if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0) { |
1825 | 0 | names_index_ref(int_nt, nidx, &token); |
1826 | 0 | return_with_error(gs_error_undefined, &token); |
1827 | 0 | } |
1828 | 7.38G | } |
1829 | 7.74G | if (r_has_masked_attrs(pvalue, a_execute, a_execute + a_executable)) { /* Literal, push it. */ |
1830 | 383M | INCR(p_name_lit); |
1831 | 383M | if (iosp >= ostop) |
1832 | 383M | return_with_stackoverflow_iref(); |
1833 | 383M | ++iosp; |
1834 | 383M | ref_assign_inline(iosp, pvalue); |
1835 | 383M | next_short(); |
1836 | 383M | } |
1837 | 7.36G | if (r_is_proc(pvalue)) { /* This is an executable procedure, */ |
1838 | | /* execute it. */ |
1839 | 406M | INCR(p_name_proc); |
1840 | 406M | store_state_short(iesp); |
1841 | 406M | goto pr; |
1842 | 406M | } |
1843 | | /* Not a literal or procedure, reinterpret it. */ |
1844 | 6.95G | store_state_short(iesp); |
1845 | 6.95G | icount = 0; |
1846 | 6.95G | SET_IREF(pvalue); |
1847 | 6.95G | goto top; |
1848 | 7.36G | } |
1849 | | /* default can't happen here */ |
1850 | 61.1G | } |
1851 | 61.1G | } |
1852 | 106G | } |
1853 | | /* Literal type, just push it. */ |
1854 | 20.2G | if (iosp >= ostop) |
1855 | 20.2G | return_with_stackoverflow_iref(); |
1856 | 20.2G | ++iosp; |
1857 | 20.2G | ref_assign_inline(iosp, IREF); |
1858 | 20.3G | bot:next(); |
1859 | 11.6G | out: /* At most 1 more token in the current procedure. */ |
1860 | | /* (We already decremented icount.) */ |
1861 | 11.6G | if (!icount) { |
1862 | | /* Pop the execution stack for tail recursion. */ |
1863 | 5.84G | iesp--; |
1864 | 5.84G | iref_packed = IREF_NEXT(iref_packed); |
1865 | 5.84G | goto top; |
1866 | 5.84G | } |
1867 | 24.0G | up:if (--(*ticks_left) < 0) |
1868 | 432k | goto slice; |
1869 | | /* See if there is anything left on the execution stack. */ |
1870 | 24.0G | if (!r_is_proc(iesp)) { |
1871 | 8.95G | SET_IREF(iesp--); |
1872 | 8.95G | icount = 0; |
1873 | 8.95G | goto top; |
1874 | 8.95G | } |
1875 | 15.0G | SET_IREF(iesp->value.refs); /* next element of array */ |
1876 | 15.0G | icount = r_size(iesp) - 1; |
1877 | 15.0G | if (icount <= 0) { /* <= 1 more elements */ |
1878 | 2.41G | iesp--; /* pop, or tail recursion */ |
1879 | 2.41G | if (icount < 0) |
1880 | 46.2M | goto up; |
1881 | 2.41G | } |
1882 | 15.0G | goto top; |
1883 | 15.0G | sched: /* We've just called a scheduling procedure. */ |
1884 | | /* The interpreter state is in memory; iref is not current. */ |
1885 | 850k | if (code < 0) { |
1886 | 0 | set_error(code); |
1887 | | /* |
1888 | | * We need a real object to return as the error object. |
1889 | | * (It only has to last long enough to store in |
1890 | | * *perror_object.) |
1891 | | */ |
1892 | 0 | make_null_proc(&ierror.full); |
1893 | 0 | SET_IREF(ierror.obj = &ierror.full); |
1894 | 0 | goto error_exit; |
1895 | 0 | } |
1896 | | /* Reload state information from memory. */ |
1897 | 850k | iosp = osp; |
1898 | 850k | iesp = esp; |
1899 | 850k | goto up; |
1900 | | #if 0 /****** ****** ***** */ |
1901 | | sst: /* Time-slice, but push the current object first. */ |
1902 | | store_state(iesp); |
1903 | | if (iesp >= estop) |
1904 | | return_with_error_iref(gs_error_execstackoverflow); |
1905 | | iesp++; |
1906 | | ref_assign_inline(iesp, iref); |
1907 | | #endif /****** ****** ***** */ |
1908 | 850k | slice: /* It's time to time-slice or garbage collect. */ |
1909 | | /* iref is not live, so we don't need to do a store_state. */ |
1910 | 850k | osp = iosp; |
1911 | 850k | esp = iesp; |
1912 | | /* If *ticks_left <= -100, we need to GC now. */ |
1913 | 850k | if ((*ticks_left) <= -100) { /* We need to garbage collect now. */ |
1914 | 14.8k | *pi_ctx_p = i_ctx_p; |
1915 | 14.8k | code = interp_reclaim(pi_ctx_p, -1); |
1916 | 14.8k | i_ctx_p = *pi_ctx_p; |
1917 | 14.8k | } else |
1918 | 836k | code = 0; |
1919 | 850k | *ticks_left = i_ctx_p->time_slice_ticks; |
1920 | 850k | set_code_on_interrupt(imemory, &code); |
1921 | 850k | goto sched; |
1922 | | |
1923 | | /* Error exits. */ |
1924 | | |
1925 | 112M | rweci: |
1926 | 112M | ierror.code = code; |
1927 | 117M | rwei: |
1928 | 117M | ierror.obj = IREF; |
1929 | 118M | rwe: |
1930 | 118M | if (!r_is_packed(iref_packed)) |
1931 | 10.3M | store_state(iesp); |
1932 | 107M | else { |
1933 | | /* |
1934 | | * We need a real object to return as the error object. |
1935 | | * (It only has to last long enough to store in *perror_object.) |
1936 | | */ |
1937 | 107M | packed_get(imemory, (const ref_packed *)ierror.obj, &ierror.full); |
1938 | 107M | store_state_short(iesp); |
1939 | 107M | if (IREF == ierror.obj) |
1940 | 107M | SET_IREF(&ierror.full); |
1941 | 107M | ierror.obj = &ierror.full; |
1942 | 107M | } |
1943 | 118M | error_exit: |
1944 | 118M | if (GS_ERROR_IS_INTERRUPT(ierror.code)) { /* We must push the current object being interpreted */ |
1945 | | /* back on the e-stack so it will be re-executed. */ |
1946 | | /* Currently, this is always an executable operator, */ |
1947 | | /* but it might be something else someday if we check */ |
1948 | | /* for interrupts in the interpreter loop itself. */ |
1949 | 0 | if (iesp >= estop) |
1950 | 0 | ierror.code = gs_error_execstackoverflow; |
1951 | 0 | else { |
1952 | 0 | iesp++; |
1953 | 0 | ref_assign_inline(iesp, IREF); |
1954 | 0 | } |
1955 | 0 | } |
1956 | 118M | esp = iesp; |
1957 | 118M | osp = iosp; |
1958 | 118M | ref_assign_inline(perror_object, ierror.obj); |
1959 | | #ifdef DEBUG |
1960 | | if (ierror.code == gs_error_InterpreterExit) { |
1961 | | /* Do not call gs_log_error to reduce the noise. */ |
1962 | | return gs_error_InterpreterExit; |
1963 | | } |
1964 | | #endif |
1965 | 118M | return gs_log_error(ierror.code, __FILE__, ierror.line); |
1966 | 118M | } |
1967 | | |
1968 | | /* Pop the bookkeeping information for a normal exit from a t_oparray. */ |
1969 | | static int |
1970 | | oparray_pop(i_ctx_t *i_ctx_p) |
1971 | 3.30G | { |
1972 | 3.30G | esp -= 4; |
1973 | 3.30G | return o_pop_estack; |
1974 | 3.30G | } |
1975 | | |
1976 | | /* Restore the stack pointers after an error inside a t_oparray procedure. */ |
1977 | | /* This procedure is called only from pop_estack. */ |
1978 | | static int |
1979 | | oparray_cleanup(i_ctx_t *i_ctx_p) |
1980 | 104M | { /* esp points just below the cleanup procedure. */ |
1981 | 104M | es_ptr ep = esp; |
1982 | 104M | uint ocount_old = (uint) ep[3].value.intval; |
1983 | 104M | uint dcount_old = (uint) ep[4].value.intval; |
1984 | 104M | uint ocount = ref_stack_count(&o_stack); |
1985 | 104M | uint dcount = ref_stack_count(&d_stack); |
1986 | | |
1987 | 104M | if (ocount > ocount_old) |
1988 | 507k | ref_stack_pop(&o_stack, ocount - ocount_old); |
1989 | 104M | if (dcount > dcount_old) { |
1990 | 476k | ref_stack_pop(&d_stack, dcount - dcount_old); |
1991 | 476k | dict_set_top(); |
1992 | 476k | } |
1993 | 104M | return 0; |
1994 | 104M | } |
1995 | | |
1996 | | /* Don't restore the stack pointers. */ |
1997 | | static int |
1998 | | oparray_no_cleanup(i_ctx_t *i_ctx_p) |
1999 | 0 | { |
2000 | 0 | return 0; |
2001 | 0 | } |
2002 | | |
2003 | | /* Find the innermost oparray. */ |
2004 | | static ref * |
2005 | | oparray_find(i_ctx_t *i_ctx_p) |
2006 | 649k | { |
2007 | 649k | long i; |
2008 | 649k | ref *ep; |
2009 | | |
2010 | 8.77M | for (i = 0; (ep = ref_stack_index(&e_stack, i)) != 0; ++i) { |
2011 | 8.77M | if (r_is_estack_mark(ep) && |
2012 | 8.77M | (ep->value.opproc == oparray_cleanup || |
2013 | 1.29M | ep->value.opproc == oparray_no_cleanup) |
2014 | 8.77M | ) |
2015 | 649k | return ep; |
2016 | 8.77M | } |
2017 | 0 | return 0; |
2018 | 649k | } |
2019 | | |
2020 | | /* <errorobj> <obj> .errorexec ... */ |
2021 | | /* Execute an object, substituting errorobj for the 'command' if an error */ |
2022 | | /* occurs during the execution. Cf .execfile (in zfile.c). */ |
2023 | | static int |
2024 | | zerrorexec(i_ctx_t *i_ctx_p) |
2025 | 245M | { |
2026 | 245M | os_ptr op = osp; |
2027 | 245M | int code; |
2028 | | |
2029 | 245M | check_op(2); |
2030 | 245M | check_estack(4); /* mark/cleanup, errobj, pop, obj */ |
2031 | 245M | push_mark_estack(es_other, errorexec_cleanup); |
2032 | 245M | *++esp = op[-1]; |
2033 | 245M | push_op_estack(errorexec_pop); |
2034 | 245M | code = zexec(i_ctx_p); |
2035 | 245M | if (code >= 0) |
2036 | 245M | pop(1); |
2037 | 0 | else |
2038 | 0 | esp -= 3; /* undo our additions to estack */ |
2039 | 245M | return code; |
2040 | 245M | } |
2041 | | |
2042 | | /* - .finderrorobject <errorobj> true */ |
2043 | | /* - .finderrorobject false */ |
2044 | | /* If we are within an .errorexec or oparray, return the error object */ |
2045 | | /* and true, otherwise return false. */ |
2046 | | static int |
2047 | | zfinderrorobject(i_ctx_t *i_ctx_p) |
2048 | 474k | { |
2049 | 474k | os_ptr op = osp; |
2050 | 474k | ref errobj; |
2051 | | |
2052 | 474k | if (errorexec_find(i_ctx_p, &errobj)) { |
2053 | 474k | push(2); |
2054 | 474k | op[-1] = errobj; |
2055 | 474k | make_true(op); |
2056 | 474k | } else { |
2057 | 0 | push(1); |
2058 | 0 | make_false(op); |
2059 | 0 | } |
2060 | 474k | return 0; |
2061 | 474k | } |
2062 | | |
2063 | | /* |
2064 | | * Find the innermost .errorexec or oparray. If there is an oparray, or a |
2065 | | * .errorexec with errobj != null, store it in *perror_object and return 1, |
2066 | | * otherwise return 0; |
2067 | | */ |
2068 | | int |
2069 | | errorexec_find(i_ctx_t *i_ctx_p, ref *perror_object) |
2070 | 116M | { |
2071 | 116M | long i; |
2072 | 116M | const ref *ep; |
2073 | | |
2074 | 1.69G | for (i = 0; (ep = ref_stack_index(&e_stack, i)) != 0; ++i) { |
2075 | 1.69G | if (r_is_estack_mark(ep)) { |
2076 | 274M | if (ep->value.opproc == oparray_cleanup) { |
2077 | | /* See oppr: above. */ |
2078 | 112M | uint opindex = (uint)ep[1].value.intval; |
2079 | 112M | if (opindex == 0) /* internal operator, ignore */ |
2080 | 0 | continue; |
2081 | 112M | op_index_ref(imemory, opindex, perror_object); |
2082 | 112M | return 1; |
2083 | 112M | } |
2084 | 162M | if (ep->value.opproc == oparray_no_cleanup) |
2085 | 0 | return 0; /* protection disabled */ |
2086 | 162M | if (ep->value.opproc == errorexec_cleanup) { |
2087 | 2.02M | if (r_has_type(ep + 1, t_null)) |
2088 | 482k | return 0; |
2089 | 1.54M | *perror_object = ep[1]; /* see .errorexec above */ |
2090 | 1.54M | return 1; |
2091 | 2.02M | } |
2092 | 162M | } |
2093 | 1.69G | } |
2094 | 2.43M | return 0; |
2095 | 116M | } |
2096 | | |
2097 | | /* Pop the bookkeeping information on a normal exit from .errorexec. */ |
2098 | | static int |
2099 | | errorexec_pop(i_ctx_t *i_ctx_p) |
2100 | 244M | { |
2101 | 244M | esp -= 2; |
2102 | 244M | return o_pop_estack; |
2103 | 244M | } |
2104 | | |
2105 | | /* Clean up when unwinding the stack on an error. (No action needed.) */ |
2106 | | static int |
2107 | | errorexec_cleanup(i_ctx_t *i_ctx_p) |
2108 | 517k | { |
2109 | 517k | return 0; |
2110 | 517k | } |
2111 | | |
2112 | | /* <bool> .setstackprotect - */ |
2113 | | /* Set whether to protect the stack for the innermost oparray. */ |
2114 | | static int |
2115 | | zsetstackprotect(i_ctx_t *i_ctx_p) |
2116 | 649k | { |
2117 | 649k | os_ptr op = osp; |
2118 | 649k | ref *ep = oparray_find(i_ctx_p); |
2119 | | |
2120 | 649k | check_op(1); |
2121 | 649k | check_type(*op, t_boolean); |
2122 | 649k | if (ep == 0) |
2123 | 0 | return_error(gs_error_rangecheck); |
2124 | 649k | ep->value.opproc = |
2125 | 649k | (op->value.boolval ? oparray_cleanup : oparray_no_cleanup); |
2126 | 649k | pop(1); |
2127 | 649k | return 0; |
2128 | 649k | } |
2129 | | |
2130 | | /* - .currentstackprotect <bool> */ |
2131 | | /* Return the stack protection status. */ |
2132 | | static int |
2133 | | zcurrentstackprotect(i_ctx_t *i_ctx_p) |
2134 | 0 | { |
2135 | 0 | os_ptr op = osp; |
2136 | 0 | ref *ep = oparray_find(i_ctx_p); |
2137 | |
|
2138 | 0 | if (ep == 0) |
2139 | 0 | return_error(gs_error_rangecheck); |
2140 | 0 | push(1); |
2141 | 0 | make_bool(op, ep->value.opproc == oparray_cleanup); |
2142 | 0 | return 0; |
2143 | 0 | } |
2144 | | |
2145 | | static int |
2146 | | zactonuel(i_ctx_t *i_ctx_p) |
2147 | 162k | { |
2148 | 162k | os_ptr op = osp; |
2149 | | |
2150 | 162k | push(1); |
2151 | 162k | make_bool(op, !!gs_lib_ctx_get_act_on_uel((gs_memory_t *)(i_ctx_p->memory.current))); |
2152 | 162k | return 0; |
2153 | 162k | } |