/src/ghostpdl/psi/interp.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2021 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 1305 Grant Avenue - Suite 200, Novato, |
13 | | CA 94945, U.S.A., +1(415)492-9861, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Ghostscript language interpreter */ |
18 | | #include "memory_.h" |
19 | | #include "string_.h" |
20 | | #include "ghost.h" |
21 | | #include "gsstruct.h" /* for iastruct.h */ |
22 | | #include "gserrors.h" /* for gpcheck.h */ |
23 | | #include "stream.h" |
24 | | #include "ierrors.h" |
25 | | #include "estack.h" |
26 | | #include "ialloc.h" |
27 | | #include "iastruct.h" |
28 | | #include "icontext.h" |
29 | | #include "icremap.h" |
30 | | #include "idebug.h" |
31 | | #include "igstate.h" /* for handling gs_error_Remap_Color */ |
32 | | #include "inamedef.h" |
33 | | #include "iname.h" /* for the_name_table */ |
34 | | #include "interp.h" |
35 | | #include "ipacked.h" |
36 | | #include "ostack.h" /* must precede iscan.h */ |
37 | | #include "strimpl.h" /* for sfilter.h */ |
38 | | #include "sfilter.h" /* for iscan.h */ |
39 | | #include "iscan.h" |
40 | | #include "iddict.h" |
41 | | #include "isave.h" |
42 | | #include "istack.h" |
43 | | #include "itoken.h" |
44 | | #include "iutil.h" /* for array_get */ |
45 | | #include "ivmspace.h" |
46 | | #include "iinit.h" |
47 | | #include "dstack.h" |
48 | | #include "files.h" /* for file_check_read */ |
49 | | #include "oper.h" |
50 | | #include "store.h" |
51 | | #include "gpcheck.h" |
52 | | |
53 | | /* |
54 | | * We may or may not optimize the handling of the special fast operators |
55 | | * in packed arrays. If we do this, they run much faster when packed, but |
56 | | * slightly slower when not packed. |
57 | | */ |
58 | | #define PACKED_SPECIAL_OPS 1 |
59 | | |
60 | | /* |
61 | | * Pseudo-operators (procedures of type t_oparray) record |
62 | | * the operand and dictionary stack pointers, and restore them if an error |
63 | | * occurs during the execution of the procedure and if the procedure hasn't |
64 | | * (net) decreased the depth of the stack. While this obviously doesn't |
65 | | * do all the work of restoring the state if a pseudo-operator gets an |
66 | | * error, it's a big help. The only downside is that pseudo-operators run |
67 | | * a little slower. |
68 | | */ |
69 | | |
70 | | /* GC descriptors for stacks */ |
71 | | extern_st(st_ref_stack); |
72 | | public_st_dict_stack(); |
73 | | public_st_exec_stack(); |
74 | | public_st_op_stack(); |
75 | | |
76 | | /* |
77 | | * Apply an operator. When debugging, we route all operator calls |
78 | | * through a procedure. |
79 | | */ |
80 | | #if defined(DEBUG_TRACE_PS_OPERATORS) || defined(DEBUG) |
81 | | #define call_operator(proc, p) (*call_operator_fn)(proc, p) |
82 | | static int |
83 | | do_call_operator(op_proc_t op_proc, i_ctx_t *i_ctx_p) |
84 | | { |
85 | | int code; |
86 | | code = op_proc(i_ctx_p); |
87 | | if (gs_debug_c(gs_debug_flag_validate_clumps)) |
88 | | ivalidate_clean_spaces(i_ctx_p); |
89 | | return code; /* A good place for a conditional breakpoint. */ |
90 | | } |
91 | | static int |
92 | | do_call_operator_verbose(op_proc_t op_proc, i_ctx_t *i_ctx_p) |
93 | | { |
94 | | int code; |
95 | | |
96 | | #ifndef SHOW_STACK_DEPTHS |
97 | | if_debug1m('!', imemory, "[!]operator %s\n", op_get_name_string(op_proc)); |
98 | | #else |
99 | | if_debug3m('!', imemory, "[!][es=%d os=%d]operator %s\n", |
100 | | esp-i_ctx_p->exec_stack.stack.bot, |
101 | | osp-i_ctx_p->op_stack.stack.bot, |
102 | | op_get_name_string(op_proc)); |
103 | | #endif |
104 | | code = do_call_operator(op_proc, i_ctx_p); |
105 | | #if defined(SHOW_STACK_DEPTHS) |
106 | | if_debug2m('!', imemory, "[!][es=%d os=%d]\n", |
107 | | esp-i_ctx_p->exec_stack.stack.bot, |
108 | | osp-i_ctx_p->op_stack.stack.bot); |
109 | | #endif |
110 | | if (gs_debug_c(gs_debug_flag_validate_clumps)) |
111 | | ivalidate_clean_spaces(i_ctx_p); |
112 | | return code; /* A good place for a conditional breakpoint. */ |
113 | | } |
114 | | #else |
115 | 136M | # define call_operator(proc, p) ((*(proc))(p)) |
116 | | #endif |
117 | | |
118 | | /* Define debugging statistics (not threadsafe as uses globals) */ |
119 | | /* #define COLLECT_STATS_IDSTACK */ |
120 | | |
121 | | #ifdef COLLECT_STATS_INTERP |
122 | | struct stats_interp_s { |
123 | | long top; |
124 | | long lit, lit_array, exec_array, exec_operator, exec_name; |
125 | | long x_add, x_def, x_dup, x_exch, x_if, x_ifelse, |
126 | | x_index, x_pop, x_roll, x_sub; |
127 | | long find_name, name_lit, name_proc, name_oparray, name_operator; |
128 | | long p_full, p_exec_operator, p_exec_oparray, p_exec_non_x_operator, |
129 | | p_integer, p_lit_name, p_exec_name; |
130 | | long p_find_name, p_name_lit, p_name_proc; |
131 | | } stats_interp; |
132 | | # define INCR(v) (++(stats_interp.v)) |
133 | | #else |
134 | 1.19G | # define INCR(v) DO_NOTHING |
135 | | #endif |
136 | | |
137 | | /* Forward references */ |
138 | | static int estack_underflow(i_ctx_t *); |
139 | | static int interp(i_ctx_t **, const ref *, ref *); |
140 | | static int interp_exit(i_ctx_t *); |
141 | | static int zforceinterp_exit(i_ctx_t *i_ctx_p); |
142 | | static void set_gc_signal(i_ctx_t *, int); |
143 | | static int copy_stack(i_ctx_t *, const ref_stack_t *, int skip, ref *); |
144 | | static int oparray_pop(i_ctx_t *); |
145 | | static int oparray_cleanup(i_ctx_t *); |
146 | | static int zerrorexec(i_ctx_t *); |
147 | | static int zfinderrorobject(i_ctx_t *); |
148 | | static int errorexec_pop(i_ctx_t *); |
149 | | static int errorexec_cleanup(i_ctx_t *); |
150 | | static int zsetstackprotect(i_ctx_t *); |
151 | | static int zcurrentstackprotect(i_ctx_t *); |
152 | | static int zactonuel(i_ctx_t *); |
153 | | |
154 | | /* Stack sizes */ |
155 | | |
156 | | /* The maximum stack sizes may all be set in the makefile. */ |
157 | | |
158 | | /* |
159 | | * Define the initial maximum size of the operand stack (MaxOpStack |
160 | | * user parameter). |
161 | | */ |
162 | | #ifndef MAX_OSTACK |
163 | 683 | # define MAX_OSTACK 800 |
164 | | #endif |
165 | | /* |
166 | | * The minimum block size for extending the operand stack is the larger of: |
167 | | * - the maximum number of parameters to an operator |
168 | | * (currently setcolorscreen, with 12 parameters); |
169 | | * - the maximum number of values pushed by an operator |
170 | | * (currently setcolortransfer, which calls zcolor_remap_one 4 times |
171 | | * and therefore pushes 16 values). |
172 | | */ |
173 | | #define MIN_BLOCK_OSTACK 16 |
174 | | const int gs_interp_max_op_num_args = MIN_BLOCK_OSTACK; /* for iinit.c */ |
175 | | |
176 | | /* |
177 | | * Define the initial maximum size of the execution stack (MaxExecStack |
178 | | * user parameter). |
179 | | */ |
180 | | #ifndef MAX_ESTACK |
181 | 683 | # define MAX_ESTACK 5000 |
182 | | #endif |
183 | | /* |
184 | | * The minimum block size for extending the execution stack is the largest |
185 | | * size of a contiguous block surrounding an e-stack mark. (At least, |
186 | | * that's what the minimum value would be if we supported multi-block |
187 | | * estacks, which we currently don't.) Currently, the largest such block is |
188 | | * the one created for text processing, which is 8 (snumpush) slots. |
189 | | */ |
190 | 0 | #define MIN_BLOCK_ESTACK 8 |
191 | | /* |
192 | | * If we get an e-stack overflow, we need to cut it back far enough to |
193 | | * have some headroom for executing the error procedure. |
194 | | */ |
195 | 0 | #define ES_HEADROOM 20 |
196 | | |
197 | | /* |
198 | | * Define the initial maximum size of the dictionary stack (MaxDictStack |
199 | | * user parameter). Again, this is also currently the block size for |
200 | | * extending the d-stack. |
201 | | */ |
202 | | #ifndef MAX_DSTACK |
203 | 683 | # define MAX_DSTACK 20 |
204 | | #endif |
205 | | /* |
206 | | * The minimum block size for extending the dictionary stack is the number |
207 | | * of permanent entries on the dictionary stack, currently 3. |
208 | | */ |
209 | | #define MIN_BLOCK_DSTACK 3 |
210 | | |
211 | | /* See estack.h for a description of the execution stack. */ |
212 | | |
213 | | /* The logic for managing icount and iref below assumes that */ |
214 | | /* there are no control operators which pop and then push */ |
215 | | /* information on the execution stack. */ |
216 | | |
217 | | /* Stacks */ |
218 | | extern_st(st_ref_stack); |
219 | 2.04k | #define OS_GUARD_UNDER 10 |
220 | 2.04k | #define OS_GUARD_OVER 10 |
221 | | #define OS_REFS_SIZE(body_size)\ |
222 | 1.36k | (stack_block_refs + OS_GUARD_UNDER + (body_size) + OS_GUARD_OVER) |
223 | | |
224 | 2.04k | #define ES_GUARD_UNDER 1 |
225 | 2.04k | #define ES_GUARD_OVER 10 |
226 | | #define ES_REFS_SIZE(body_size)\ |
227 | 1.36k | (stack_block_refs + ES_GUARD_UNDER + (body_size) + ES_GUARD_OVER) |
228 | | |
229 | | #define DS_REFS_SIZE(body_size)\ |
230 | 683 | (stack_block_refs + (body_size)) |
231 | | |
232 | | /* Extended types. The interpreter may replace the type of operators */ |
233 | | /* in procedures with these, to speed up the interpretation loop. */ |
234 | | /****** NOTE: If you add or change entries in this list, */ |
235 | | /****** you must change the three dispatches in the interpreter loop. */ |
236 | | /* The operator procedures are declared in opextern.h. */ |
237 | 90.5M | #define tx_op t_next_index |
238 | | typedef enum { |
239 | | tx_op_add = tx_op, |
240 | | tx_op_def, |
241 | | tx_op_dup, |
242 | | tx_op_exch, |
243 | | tx_op_if, |
244 | | tx_op_ifelse, |
245 | | tx_op_index, |
246 | | tx_op_pop, |
247 | | tx_op_roll, |
248 | | tx_op_sub, |
249 | | tx_next_op |
250 | | } special_op_types; |
251 | | |
252 | 485k | #define num_special_ops ((int)tx_next_op - tx_op) |
253 | | const int gs_interp_num_special_ops = num_special_ops; /* for iinit.c */ |
254 | | const int tx_next_index = tx_next_op; |
255 | | |
256 | | /* |
257 | | * NOTE: if the size of either table below ever exceeds 15 real entries, it |
258 | | * will have to be split. |
259 | | */ |
260 | | /* Define the extended-type operators per the list above. */ |
261 | | const op_def interp1_op_defs[] = { |
262 | | /* |
263 | | * The very first entry, which corresponds to operator index 0, |
264 | | * must not contain an actual operator. |
265 | | */ |
266 | | op_def_begin_dict("systemdict"), |
267 | | {"2add", zadd}, |
268 | | {"2def", zdef}, |
269 | | {"1dup", zdup}, |
270 | | {"2exch", zexch}, |
271 | | {"2if", zif}, |
272 | | {"3ifelse", zifelse}, |
273 | | {"1index", zindex}, |
274 | | {"1pop", zpop}, |
275 | | {"2roll", zroll}, |
276 | | {"2sub", zsub}, |
277 | | op_def_end(0) |
278 | | }; |
279 | | /* Define the internal interpreter operators. */ |
280 | | const op_def interp2_op_defs[] = { |
281 | | {"0.currentstackprotect", zcurrentstackprotect}, |
282 | | {"1.setstackprotect", zsetstackprotect}, |
283 | | {"2.errorexec", zerrorexec}, |
284 | | {"0.finderrorobject", zfinderrorobject}, |
285 | | {"0%interp_exit", interp_exit}, |
286 | | {"0.forceinterp_exit", zforceinterp_exit}, |
287 | | {"0%oparray_pop", oparray_pop}, |
288 | | {"0%errorexec_pop", errorexec_pop}, |
289 | | {"0.actonuel", zactonuel}, |
290 | | op_def_end(0) |
291 | | }; |
292 | | |
293 | | #define make_null_proc(pref)\ |
294 | 0 | make_empty_const_array(pref, a_executable + a_readonly) |
295 | | |
296 | | /* Initialize the interpreter. */ |
297 | | int |
298 | | gs_interp_init(i_ctx_t **pi_ctx_p, const ref *psystem_dict, |
299 | | gs_dual_memory_t *dmem) |
300 | 683 | { |
301 | | /* Create and initialize a context state. */ |
302 | 683 | gs_context_state_t *pcst = 0; |
303 | 683 | int code = context_state_alloc(&pcst, psystem_dict, dmem); |
304 | 683 | if (code >= 0) { |
305 | 683 | code = context_state_load(pcst); |
306 | 683 | if (code < 0) { |
307 | 0 | context_state_free(pcst); |
308 | 0 | pcst = NULL; |
309 | 0 | } |
310 | 683 | } |
311 | | |
312 | 683 | if (code < 0) |
313 | 0 | lprintf1("Fatal error %d in gs_interp_init!\n", code); |
314 | 683 | *pi_ctx_p = pcst; |
315 | | |
316 | 683 | return code; |
317 | 683 | } |
318 | | /* |
319 | | * Create initial stacks for the interpreter. |
320 | | * We export this for creating new contexts. |
321 | | */ |
322 | | int |
323 | | gs_interp_alloc_stacks(gs_ref_memory_t *mem, gs_context_state_t * pcst) |
324 | 683 | { |
325 | 683 | int code; |
326 | 683 | gs_ref_memory_t *smem = |
327 | 683 | (gs_ref_memory_t *)gs_memory_stable((gs_memory_t *)mem); |
328 | 683 | ref stk; |
329 | | |
330 | 1.36k | #define REFS_SIZE_OSTACK OS_REFS_SIZE(MAX_OSTACK) |
331 | 1.36k | #define REFS_SIZE_ESTACK ES_REFS_SIZE(MAX_ESTACK) |
332 | 683 | #define REFS_SIZE_DSTACK DS_REFS_SIZE(MAX_DSTACK) |
333 | 683 | code = gs_alloc_ref_array(smem, &stk, 0, |
334 | 683 | REFS_SIZE_OSTACK + REFS_SIZE_ESTACK + |
335 | 683 | REFS_SIZE_DSTACK, "gs_interp_alloc_stacks"); |
336 | 683 | if (code < 0) |
337 | 0 | return code; |
338 | | |
339 | 683 | { |
340 | 683 | ref_stack_t *pos = &pcst->op_stack.stack; |
341 | | |
342 | 683 | r_set_size(&stk, REFS_SIZE_OSTACK); |
343 | 683 | code = ref_stack_init(pos, &stk, OS_GUARD_UNDER, OS_GUARD_OVER, NULL, |
344 | 683 | smem, NULL); |
345 | 683 | if (code < 0) |
346 | 0 | return code; |
347 | 683 | ref_stack_set_error_codes(pos, gs_error_stackunderflow, gs_error_stackoverflow); |
348 | 683 | ref_stack_set_max_count(pos, MAX_OSTACK); |
349 | 683 | stk.value.refs += REFS_SIZE_OSTACK; |
350 | 683 | } |
351 | | |
352 | 0 | { |
353 | 683 | ref_stack_t *pes = &pcst->exec_stack.stack; |
354 | 683 | ref euop; |
355 | | |
356 | 683 | r_set_size(&stk, REFS_SIZE_ESTACK); |
357 | 683 | make_oper(&euop, 0, estack_underflow); |
358 | 683 | code = ref_stack_init(pes, &stk, ES_GUARD_UNDER, ES_GUARD_OVER, &euop, |
359 | 683 | smem, NULL); |
360 | 683 | if (code < 0) |
361 | 0 | return code; |
362 | 683 | ref_stack_set_error_codes(pes, gs_error_ExecStackUnderflow, |
363 | 683 | gs_error_execstackoverflow); |
364 | | /**************** E-STACK EXPANSION IS NYI. ****************/ |
365 | 683 | ref_stack_allow_expansion(pes, false); |
366 | 683 | ref_stack_set_max_count(pes, MAX_ESTACK); |
367 | 683 | stk.value.refs += REFS_SIZE_ESTACK; |
368 | 683 | } |
369 | | |
370 | 0 | { |
371 | 683 | ref_stack_t *pds = &pcst->dict_stack.stack; |
372 | | |
373 | 683 | r_set_size(&stk, REFS_SIZE_DSTACK); |
374 | 683 | code = ref_stack_init(pds, &stk, 0, 0, NULL, smem, NULL); |
375 | 683 | if (code < 0) |
376 | 0 | return code; |
377 | 683 | ref_stack_set_error_codes(pds, gs_error_dictstackunderflow, |
378 | 683 | gs_error_dictstackoverflow); |
379 | 683 | ref_stack_set_max_count(pds, MAX_DSTACK); |
380 | 683 | } |
381 | | |
382 | 0 | #undef REFS_SIZE_OSTACK |
383 | 0 | #undef REFS_SIZE_ESTACK |
384 | 0 | #undef REFS_SIZE_DSTACK |
385 | 0 | return 0; |
386 | 683 | } |
387 | | /* |
388 | | * Free the stacks when destroying a context. This is the inverse of |
389 | | * create_stacks. |
390 | | */ |
391 | | void |
392 | | gs_interp_free_stacks(gs_ref_memory_t * smem, gs_context_state_t * pcst) |
393 | 0 | { |
394 | | /* Free the stacks in inverse order of allocation. */ |
395 | 0 | ref_stack_release(&pcst->dict_stack.stack); |
396 | 0 | ref_stack_release(&pcst->exec_stack.stack); |
397 | 0 | ref_stack_release(&pcst->op_stack.stack); |
398 | 0 | } |
399 | | void |
400 | | gs_interp_reset(i_ctx_t *i_ctx_p) |
401 | 683 | { /* Reset the stacks. */ |
402 | 683 | ref_stack_clear(&o_stack); |
403 | 683 | ref_stack_clear(&e_stack); |
404 | 683 | esp++; |
405 | 683 | make_oper(esp, 0, interp_exit); |
406 | 683 | ref_stack_pop_to(&d_stack, min_dstack_size); |
407 | 683 | dict_set_top(); |
408 | 683 | } |
409 | | /* Report an e-stack block underflow. The bottom guard slots of */ |
410 | | /* e-stack blocks contain a pointer to this procedure. */ |
411 | | static int |
412 | | estack_underflow(i_ctx_t *i_ctx_p) |
413 | 0 | { |
414 | 0 | return gs_error_ExecStackUnderflow; |
415 | 0 | } |
416 | | |
417 | | /* |
418 | | * Create an operator during initialization. |
419 | | * If operator is hard-coded into the interpreter, |
420 | | * assign it a special type and index. |
421 | | */ |
422 | | void |
423 | | gs_interp_make_oper(ref * opref, op_proc_t proc, int idx) |
424 | 485k | { |
425 | 485k | int i; |
426 | | |
427 | 5.26M | for (i = num_special_ops; i > 0 && proc != interp1_op_defs[i].proc; --i) |
428 | 4.78M | DO_NOTHING; |
429 | 485k | if (i > 0) |
430 | 13.6k | make_tasv(opref, tx_op + (i - 1), a_executable, i, opproc, proc); |
431 | 471k | else |
432 | 471k | make_tasv(opref, t_operator, a_executable, idx, opproc, proc); |
433 | 485k | } |
434 | | |
435 | | /* |
436 | | * Call the garbage collector, updating the context pointer properly. |
437 | | */ |
438 | | int |
439 | | interp_reclaim(i_ctx_t **pi_ctx_p, int space) |
440 | 1.40k | { |
441 | 1.40k | i_ctx_t *i_ctx_p = *pi_ctx_p; |
442 | 1.40k | gs_gc_root_t ctx_root, *r = &ctx_root; |
443 | 1.40k | int code; |
444 | | |
445 | | #ifdef DEBUG |
446 | | if (gs_debug_c(gs_debug_flag_gc_disable)) |
447 | | return 0; |
448 | | #endif |
449 | | |
450 | 1.40k | gs_register_struct_root(imemory_system, &r, |
451 | 1.40k | (void **)pi_ctx_p, "interp_reclaim(pi_ctx_p)"); |
452 | 1.40k | code = (*idmemory->reclaim)(idmemory, space); |
453 | 1.40k | i_ctx_p = *pi_ctx_p; /* may have moved */ |
454 | 1.40k | gs_unregister_root(imemory_system, r, "interp_reclaim(pi_ctx_p)"); |
455 | 1.40k | return code; |
456 | 1.40k | } |
457 | | |
458 | | /* |
459 | | * Invoke the interpreter. If execution completes normally, return 0. |
460 | | * If an error occurs, the action depends on user_errors as follows: |
461 | | * user_errors < 0: always return an error code. |
462 | | * user_errors >= 0: let the PostScript machinery handle all errors. |
463 | | * (This will eventually result in a fatal error if no 'stopped' |
464 | | * is active.) |
465 | | * In case of a quit or a fatal error, also store the exit code. |
466 | | * Set *perror_object to null or the error object. |
467 | | */ |
468 | | static int gs_call_interp(i_ctx_t **, ref *, int, int *, ref *); |
469 | | int |
470 | | gs_interpret(i_ctx_t **pi_ctx_p, ref * pref, int user_errors, int *pexit_code, |
471 | | ref * perror_object) |
472 | 10.2k | { |
473 | 10.2k | i_ctx_t *i_ctx_p = *pi_ctx_p; |
474 | 10.2k | gs_gc_root_t error_root, *r = &error_root; |
475 | 10.2k | int code; |
476 | | |
477 | 10.2k | gs_register_ref_root(imemory_system, &r, |
478 | 10.2k | (void **)&perror_object, "gs_interpret"); |
479 | 10.2k | code = gs_call_interp(pi_ctx_p, pref, user_errors, pexit_code, |
480 | 10.2k | perror_object); |
481 | 10.2k | i_ctx_p = *pi_ctx_p; |
482 | 10.2k | gs_unregister_root(imemory_system, &error_root, "gs_interpret"); |
483 | | /* Avoid a dangling reference to the lib context GC signal. */ |
484 | 10.2k | set_gc_signal(i_ctx_p, 0); |
485 | 10.2k | return code; |
486 | 10.2k | } |
487 | | static int |
488 | | gs_call_interp(i_ctx_t **pi_ctx_p, ref * pref, int user_errors, |
489 | | int *pexit_code, ref * perror_object) |
490 | 10.2k | { |
491 | 10.2k | ref *epref = pref; |
492 | 10.2k | ref doref; |
493 | 10.2k | ref *perrordict; |
494 | 10.2k | ref error_name; |
495 | 10.2k | int code, ccode; |
496 | 10.2k | ref saref; |
497 | 10.2k | i_ctx_t *i_ctx_p = *pi_ctx_p; |
498 | 10.2k | int *gc_signal = &imemory_system->gs_lib_ctx->gcsignal; |
499 | | |
500 | 10.2k | *pexit_code = 0; |
501 | 10.2k | *gc_signal = 0; |
502 | 10.2k | ialloc_reset_requested(idmemory); |
503 | 398k | again: |
504 | | /* Avoid a dangling error object that might get traced by a future GC. */ |
505 | 398k | make_null(perror_object); |
506 | 398k | o_stack.requested = e_stack.requested = d_stack.requested = 0; |
507 | 398k | while (*gc_signal) { /* Some routine below triggered a GC. */ |
508 | 0 | gs_gc_root_t epref_root, *r = &epref_root; |
509 | |
|
510 | 0 | *gc_signal = 0; |
511 | | /* Make sure that doref will get relocated properly if */ |
512 | | /* a garbage collection happens with epref == &doref. */ |
513 | 0 | gs_register_ref_root(imemory_system, &r, |
514 | 0 | (void **)&epref, "gs_call_interp(epref)"); |
515 | 0 | code = interp_reclaim(pi_ctx_p, -1); |
516 | 0 | i_ctx_p = *pi_ctx_p; |
517 | 0 | gs_unregister_root(imemory_system, &epref_root, |
518 | 0 | "gs_call_interp(epref)"); |
519 | 0 | if (code < 0) |
520 | 0 | return code; |
521 | 0 | } |
522 | 398k | code = interp(pi_ctx_p, epref, perror_object); |
523 | 398k | i_ctx_p = *pi_ctx_p; |
524 | 398k | if (!r_has_type(&i_ctx_p->error_object, t__invalid)) { |
525 | 0 | *perror_object = i_ctx_p->error_object; |
526 | 0 | make_t(&i_ctx_p->error_object, t__invalid); |
527 | 0 | } |
528 | | /* Prevent a dangling reference to the GC signal in ticks_left */ |
529 | | /* in the frame of interp, but be prepared to do a GC if */ |
530 | | /* an allocation in this routine asks for it. */ |
531 | 398k | *gc_signal = 0; |
532 | 398k | set_gc_signal(i_ctx_p, 1); |
533 | 398k | if (esp < esbot) /* popped guard entry */ |
534 | 2.04k | esp = esbot; |
535 | 398k | switch (code) { |
536 | 0 | case gs_error_Fatal: |
537 | 0 | *pexit_code = 255; |
538 | 0 | return code; |
539 | 2.05k | case gs_error_Quit: |
540 | 2.05k | *perror_object = osp[-1]; |
541 | 2.05k | *pexit_code = code = osp->value.intval; |
542 | 2.05k | osp -= 2; |
543 | 2.05k | return |
544 | 2.05k | (code == 0 ? gs_error_Quit : |
545 | 2.05k | code < 0 && code > -100 ? code : gs_error_Fatal); |
546 | 2.04k | case gs_error_InterpreterExit: |
547 | 2.04k | return 0; |
548 | 0 | case gs_error_ExecStackUnderflow: |
549 | | /****** WRONG -- must keep mark blocks intact ******/ |
550 | 0 | ref_stack_pop_block(&e_stack); |
551 | 0 | doref = *perror_object; |
552 | 0 | epref = &doref; |
553 | 0 | goto again; |
554 | 683 | case gs_error_VMreclaim: |
555 | | /* Do the GC and continue. */ |
556 | | /* We ignore the return value here, if it fails here |
557 | | * we'll call it again having jumped to the "again" label. |
558 | | * Where, assuming it fails again, we'll handle the error. |
559 | | */ |
560 | 683 | (void)interp_reclaim(pi_ctx_p, |
561 | 683 | (osp->value.intval == 2 ? |
562 | 683 | avm_global : avm_local)); |
563 | 683 | i_ctx_p = *pi_ctx_p; |
564 | 683 | make_oper(&doref, 0, zpop); |
565 | 683 | epref = &doref; |
566 | 683 | goto again; |
567 | 6.14k | case gs_error_NeedInput: |
568 | 6.14k | case gs_error_interrupt: |
569 | 6.14k | return code; |
570 | 398k | } |
571 | | /* Adjust osp in case of operand stack underflow */ |
572 | 387k | if (osp < osbot - 1) |
573 | 0 | osp = osbot - 1; |
574 | | /* We have to handle stack over/underflow specially, because */ |
575 | | /* we might be able to recover by adding or removing a block. */ |
576 | 387k | switch (code) { |
577 | 0 | case gs_error_dictstackoverflow: |
578 | | /* We don't have to handle this specially: */ |
579 | | /* The only places that could generate it */ |
580 | | /* use check_dstack, which does a ref_stack_extend, */ |
581 | | /* so if` we get this error, it's a real one. */ |
582 | 0 | if (osp >= ostop) { |
583 | 0 | if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) |
584 | 0 | return ccode; |
585 | 0 | } |
586 | | /* Skip system dictionaries for CET 20-02-02 */ |
587 | 0 | ccode = copy_stack(i_ctx_p, &d_stack, min_dstack_size, &saref); |
588 | 0 | if (ccode < 0) |
589 | 0 | return ccode; |
590 | 0 | ref_stack_pop_to(&d_stack, min_dstack_size); |
591 | 0 | dict_set_top(); |
592 | 0 | *++osp = saref; |
593 | 0 | break; |
594 | 0 | case gs_error_dictstackunderflow: |
595 | 0 | if (ref_stack_pop_block(&d_stack) >= 0) { |
596 | 0 | dict_set_top(); |
597 | 0 | doref = *perror_object; |
598 | 0 | epref = &doref; |
599 | 0 | goto again; |
600 | 0 | } |
601 | 0 | break; |
602 | 0 | case gs_error_execstackoverflow: |
603 | | /* We don't have to handle this specially: */ |
604 | | /* The only places that could generate it */ |
605 | | /* use check_estack, which does a ref_stack_extend, */ |
606 | | /* so if we get this error, it's a real one. */ |
607 | 0 | if (osp >= ostop) { |
608 | 0 | if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) |
609 | 0 | return ccode; |
610 | 0 | } |
611 | 0 | ccode = copy_stack(i_ctx_p, &e_stack, 0, &saref); |
612 | 0 | if (ccode < 0) |
613 | 0 | return ccode; |
614 | 0 | { |
615 | 0 | uint count = ref_stack_count(&e_stack); |
616 | 0 | uint limit = ref_stack_max_count(&e_stack) - ES_HEADROOM; |
617 | |
|
618 | 0 | if (count > limit) { |
619 | | /* |
620 | | * If there is an e-stack mark within MIN_BLOCK_ESTACK of |
621 | | * the new top, cut the stack back to remove the mark. |
622 | | */ |
623 | 0 | int skip = count - limit; |
624 | 0 | int i; |
625 | |
|
626 | 0 | for (i = skip; i < skip + MIN_BLOCK_ESTACK; ++i) { |
627 | 0 | const ref *ep = ref_stack_index(&e_stack, i); |
628 | |
|
629 | 0 | if (r_has_type_attrs(ep, t_null, a_executable)) { |
630 | 0 | skip = i + 1; |
631 | 0 | break; |
632 | 0 | } |
633 | 0 | } |
634 | 0 | pop_estack(i_ctx_p, skip); |
635 | 0 | } |
636 | 0 | } |
637 | 0 | *++osp = saref; |
638 | 0 | break; |
639 | 20.9k | case gs_error_stackoverflow: |
640 | 20.9k | if (ref_stack_extend(&o_stack, o_stack.requested) >= 0) { /* We can't just re-execute the object, because */ |
641 | | /* it might be a procedure being pushed as a */ |
642 | | /* literal. We check for this case specially. */ |
643 | 20.9k | doref = *perror_object; |
644 | 20.9k | if (r_is_proc(&doref)) { |
645 | 0 | *++osp = doref; |
646 | 0 | make_null_proc(&doref); |
647 | 0 | } |
648 | 20.9k | epref = &doref; |
649 | 20.9k | goto again; |
650 | 20.9k | } |
651 | 0 | ccode = copy_stack(i_ctx_p, &o_stack, 0, &saref); |
652 | 0 | if (ccode < 0) |
653 | 0 | return ccode; |
654 | 0 | ref_stack_clear(&o_stack); |
655 | 0 | *++osp = saref; |
656 | 0 | break; |
657 | 0 | case gs_error_stackunderflow: |
658 | 0 | if (ref_stack_pop_block(&o_stack) >= 0) { |
659 | 0 | doref = *perror_object; |
660 | 0 | epref = &doref; |
661 | 0 | goto again; |
662 | 0 | } |
663 | 0 | break; |
664 | 387k | } |
665 | 366k | if (user_errors < 0) |
666 | 0 | return code; |
667 | 366k | if (gs_errorname(i_ctx_p, code, &error_name) < 0) |
668 | 0 | return code; /* out-of-range error code! */ |
669 | | |
670 | | /* We refer to gserrordict first, which is not accessible to Postcript jobs |
671 | | * If we're running with SAFERERRORS all the handlers are copied to gserrordict |
672 | | * so we'll always find the default one. If not SAFERERRORS, only gs specific |
673 | | * errors are in gserrordict. |
674 | | */ |
675 | 366k | if ((dict_find_string(systemdict, "gserrordict", &perrordict) <= 0 || |
676 | 366k | !r_has_type(perrordict, t_dictionary) || |
677 | 366k | dict_find(perrordict, &error_name, &epref) <= 0) && |
678 | 366k | (dict_find_string(systemdict, "errordict", &perrordict) <= 0 || |
679 | 366k | !r_has_type(perrordict, t_dictionary) || |
680 | 366k | dict_find(perrordict, &error_name, &epref) <= 0)) |
681 | 0 | return code; /* error name not in errordict??? */ |
682 | | |
683 | 366k | doref = *epref; |
684 | 366k | epref = &doref; |
685 | | /* Push the error object on the operand stack if appropriate. */ |
686 | 366k | if (!GS_ERROR_IS_INTERRUPT(code)) { |
687 | 366k | byte buf[260], *bufptr; |
688 | 366k | uint rlen; |
689 | | /* Replace the error object if within an oparray or .errorexec. */ |
690 | 366k | osp++; |
691 | 366k | if (osp >= ostop) { |
692 | 0 | *pexit_code = gs_error_Fatal; |
693 | 0 | return_error(gs_error_Fatal); |
694 | 0 | } |
695 | 366k | *osp = *perror_object; |
696 | 366k | errorexec_find(i_ctx_p, osp); |
697 | | |
698 | 366k | if (!r_has_type(osp, t_string) && !r_has_type(osp, t_name)) { |
699 | 364k | code = obj_cvs(imemory, osp, buf + 2, 256, &rlen, (const byte **)&bufptr); |
700 | 364k | if (code < 0) { |
701 | 0 | const char *unknownstr = "--unknown--"; |
702 | 0 | rlen = strlen(unknownstr); |
703 | 0 | memcpy(buf, unknownstr, rlen); |
704 | 0 | bufptr = buf; |
705 | 0 | } |
706 | 364k | else { |
707 | 364k | ref *tobj; |
708 | 364k | bufptr[rlen] = '\0'; |
709 | | /* Only pass a name object if the operator doesn't exist in systemdict |
710 | | * i.e. it's an internal operator we have hidden |
711 | | */ |
712 | 364k | code = dict_find_string(systemdict, (const char *)bufptr, &tobj); |
713 | 364k | if (code <= 0) { |
714 | 6.83k | buf[0] = buf[1] = buf[rlen + 2] = buf[rlen + 3] = '-'; |
715 | 6.83k | rlen += 4; |
716 | 6.83k | bufptr = buf; |
717 | 6.83k | } |
718 | 357k | else { |
719 | 357k | bufptr = NULL; |
720 | 357k | } |
721 | 364k | } |
722 | 364k | if (bufptr) { |
723 | 6.83k | code = name_ref(imemory, buf, rlen, osp, 1); |
724 | 6.83k | if (code < 0) |
725 | 6.83k | make_null(osp); |
726 | 6.83k | } |
727 | 364k | } |
728 | 366k | } |
729 | 366k | goto again; |
730 | 366k | } |
731 | | static int |
732 | | interp_exit(i_ctx_t *i_ctx_p) |
733 | 2.04k | { |
734 | 2.04k | return gs_error_InterpreterExit; |
735 | 2.04k | } |
736 | | |
737 | | /* Only used (currently) with language switching: |
738 | | * allows the PS interpreter to co-exist with the |
739 | | * PJL interpreter. |
740 | | */ |
741 | | static int |
742 | | zforceinterp_exit(i_ctx_t *i_ctx_p) |
743 | 0 | { |
744 | 0 | os_ptr op = osp; |
745 | 0 | stream *s; |
746 | |
|
747 | 0 | check_file(s, op); |
748 | 0 | i_ctx_p->uel_position = stell(s)-1; |
749 | | /* resetfile */ |
750 | 0 | if (file_is_valid(s, op)) |
751 | 0 | sreset(s); |
752 | |
|
753 | 0 | if (!gs_lib_ctx_get_act_on_uel((gs_memory_t *)(i_ctx_p->memory.current))) |
754 | 0 | return 0; |
755 | | |
756 | 0 | gs_interp_reset(i_ctx_p); |
757 | | /* gs_interp_reset() actually leaves the op stack one entry below |
758 | | * the bottom of the stack, and that can cause problems depending |
759 | | * on the interpreter state at the end of the job. |
760 | | * So push a null object, and the return code before continuing. |
761 | | */ |
762 | 0 | push(2); |
763 | 0 | op = osp; |
764 | 0 | make_null(op - 1); |
765 | 0 | make_int(op, gs_error_InterpreterExit); |
766 | 0 | return_error(gs_error_Quit); |
767 | 0 | } |
768 | | |
769 | | /* Set the GC signal for all VMs. */ |
770 | | static void |
771 | | set_gc_signal(i_ctx_t *i_ctx_p, int value) |
772 | 807k | { |
773 | 807k | gs_memory_gc_status_t stat; |
774 | 807k | int i; |
775 | | |
776 | 4.03M | for (i = 0; i < countof(idmemory->spaces_indexed); i++) { |
777 | 3.22M | gs_ref_memory_t *mem = idmemory->spaces_indexed[i]; |
778 | 3.22M | gs_ref_memory_t *mem_stable; |
779 | | |
780 | 3.22M | if (mem == 0) |
781 | 807k | continue; |
782 | 4.03M | for (;; mem = mem_stable) { |
783 | 4.03M | mem_stable = (gs_ref_memory_t *) |
784 | 4.03M | gs_memory_stable((gs_memory_t *)mem); |
785 | 4.03M | gs_memory_gc_status(mem, &stat); |
786 | 4.03M | stat.signal_value = value; |
787 | 4.03M | gs_memory_set_gc_status(mem, &stat); |
788 | 4.03M | if (mem_stable == mem) |
789 | 2.42M | break; |
790 | 4.03M | } |
791 | 2.42M | } |
792 | 807k | } |
793 | | |
794 | | /* Create a printable string ref (or null) from an arbitrary ref. |
795 | | * For the purpose this is used here, it cannot fail, any |
796 | | * error in the process results in a null object, instead |
797 | | * of the string. |
798 | | */ |
799 | | static void obj_cvs_ref(i_ctx_t *i_ctx_p, const ref *in, ref *out) |
800 | 0 | { |
801 | 0 | uint rlen; |
802 | 0 | int code; |
803 | 0 | byte sbuf[65], *buf = sbuf; |
804 | 0 | uint len = sizeof(sbuf) - 1; |
805 | |
|
806 | 0 | code = obj_cvs(imemory, in, buf, len, &rlen, NULL); |
807 | 0 | if (code == gs_error_rangecheck) { |
808 | 0 | len = rlen; |
809 | 0 | buf = gs_alloc_bytes(imemory, len + 1, "obj_cvs_ref"); |
810 | 0 | if (!buf) |
811 | 0 | code = -1; |
812 | 0 | else |
813 | 0 | code = obj_cvs(imemory, in, buf, len, &rlen, NULL); |
814 | 0 | } |
815 | 0 | if (code < 0) { |
816 | 0 | make_null(out); |
817 | 0 | } |
818 | 0 | else { |
819 | 0 | buf[rlen] = '\0'; |
820 | 0 | code = string_to_ref((const char *)buf, out, iimemory, "obj_cvs_ref"); |
821 | 0 | if (code < 0) |
822 | 0 | make_null(out); |
823 | 0 | } |
824 | 0 | if (buf != sbuf) |
825 | 0 | gs_free_object(imemory, buf, "obj_cvs_ref"); |
826 | 0 | return; |
827 | 0 | } |
828 | | |
829 | | /* Copy top elements of an overflowed stack into a (local) array. */ |
830 | | /* Adobe copies only 500 top elements, we copy up to 65535 top elements */ |
831 | | /* for better debugging, PLRM compliance, and backward compatibility. */ |
832 | | static int |
833 | | copy_stack(i_ctx_t *i_ctx_p, const ref_stack_t * pstack, int skip, ref * arr) |
834 | 0 | { |
835 | 0 | uint size = ref_stack_count(pstack) - skip; |
836 | 0 | uint save_space = ialloc_space(idmemory); |
837 | 0 | int code, i; |
838 | 0 | ref *safety, *safe; |
839 | |
|
840 | 0 | if (size > 65535) |
841 | 0 | size = 65535; |
842 | 0 | ialloc_set_space(idmemory, avm_local); |
843 | 0 | code = ialloc_ref_array(arr, a_all, size, "copy_stack"); |
844 | 0 | if (code >= 0) |
845 | 0 | code = ref_stack_store(pstack, arr, size, 0, 1, true, idmemory, |
846 | 0 | "copy_stack"); |
847 | | /* If we are copying the exec stack, try to replace any oparrays with |
848 | | * the operator that references them |
849 | | * We also replace any internal objects (t_struct and t_astruct) with |
850 | | * string representations, since these can contain references to objects |
851 | | * with uncertain lifespans, it is safer not to risk them persisting. |
852 | | * Since we basically did this later on for the error handler, it isn't |
853 | | * a significant speed hit. |
854 | | */ |
855 | 0 | if (pstack == &e_stack) { |
856 | 0 | for (i = 0; i < size; i++) { |
857 | 0 | if (errorexec_find(i_ctx_p, &arr->value.refs[i]) < 0) |
858 | 0 | make_null(&arr->value.refs[i]); |
859 | 0 | else if (r_has_type(&arr->value.refs[i], t_struct) |
860 | 0 | || r_has_type(&arr->value.refs[i], t_astruct)) { |
861 | 0 | ref r; |
862 | 0 | obj_cvs_ref(i_ctx_p, (const ref *)&arr->value.refs[i], &r); |
863 | 0 | ref_assign(&arr->value.refs[i], &r); |
864 | 0 | } |
865 | 0 | } |
866 | 0 | } |
867 | 0 | if (pstack == &o_stack && dict_find_string(systemdict, "SAFETY", &safety) > 0 && |
868 | 0 | dict_find_string(safety, "safe", &safe) > 0 && r_has_type(safe, t_boolean) && |
869 | 0 | safe->value.boolval == true) { |
870 | 0 | code = ref_stack_array_sanitize(i_ctx_p, arr, arr); |
871 | 0 | if (code < 0) |
872 | 0 | return code; |
873 | 0 | } |
874 | 0 | ialloc_set_space(idmemory, save_space); |
875 | 0 | return code; |
876 | 0 | } |
877 | | |
878 | | /* Get the name corresponding to an error number. */ |
879 | | int |
880 | | gs_errorname(i_ctx_t *i_ctx_p, int code, ref * perror_name) |
881 | 367k | { |
882 | 367k | ref *perrordict, *pErrorNames; |
883 | | |
884 | 367k | if (dict_find_string(systemdict, "errordict", &perrordict) <= 0 || |
885 | 367k | dict_find_string(systemdict, "ErrorNames", &pErrorNames) <= 0 |
886 | 367k | ) |
887 | 0 | return_error(gs_error_undefined); /* errordict or ErrorNames not found?! */ |
888 | 367k | return array_get(imemory, pErrorNames, (long)(-code - 1), perror_name); |
889 | 367k | } |
890 | | |
891 | | /* Store an error string in $error.errorinfo. */ |
892 | | /* This routine is here because of the proximity to the error handler. */ |
893 | | int |
894 | | gs_errorinfo_put_string(i_ctx_t *i_ctx_p, const char *str) |
895 | 0 | { |
896 | 0 | ref rstr; |
897 | 0 | ref *pderror; |
898 | 0 | int code = string_to_ref(str, &rstr, iimemory, "gs_errorinfo_put_string"); |
899 | |
|
900 | 0 | if (code < 0) |
901 | 0 | return code; |
902 | 0 | if (dict_find_string(systemdict, "$error", &pderror) <= 0 || |
903 | 0 | !r_has_type(pderror, t_dictionary) || |
904 | 0 | idict_put_string(pderror, "errorinfo", &rstr) < 0 |
905 | 0 | ) |
906 | 0 | return_error(gs_error_Fatal); |
907 | 0 | return 0; |
908 | 0 | } |
909 | | |
910 | | /* Main interpreter. */ |
911 | | /* If execution terminates normally, return gs_error_InterpreterExit. */ |
912 | | /* If an error occurs, leave the current object in *perror_object */ |
913 | | /* and return a (negative) error code. */ |
914 | | static int |
915 | | interp(/* lgtm [cpp/use-of-goto] */ |
916 | | i_ctx_t **pi_ctx_p /* context for execution, updated if resched */, |
917 | | const ref * pref /* object to interpret */, |
918 | | ref * perror_object) |
919 | 398k | { |
920 | 398k | i_ctx_t *i_ctx_p = *pi_ctx_p; |
921 | | /* |
922 | | * Note that iref may actually be either a ref * or a ref_packed *. |
923 | | * Certain DEC compilers assume that a ref * is ref-aligned even if it |
924 | | * is cast to a short *, and generate code on this assumption, leading |
925 | | * to "unaligned access" errors. For this reason, we declare |
926 | | * iref_packed, and use a macro to cast it to the more aligned type |
927 | | * where necessary (which is almost everywhere it is used). This may |
928 | | * lead to compiler warnings about "cast increases alignment |
929 | | * requirements", but this is less harmful than expensive traps at run |
930 | | * time. |
931 | | */ |
932 | 398k | register const ref_packed *iref_packed = (const ref_packed *)pref; |
933 | | /* |
934 | | * To make matters worse, some versions of gcc/egcs have a bug that |
935 | | * leads them to assume that if iref_packed is EVER cast to a ref *, |
936 | | * it is ALWAYS ref-aligned. We detect this in stdpre.h and provide |
937 | | * the following workaround: |
938 | | */ |
939 | | #ifdef ALIGNMENT_ALIASING_BUG |
940 | | const ref *iref_temp; |
941 | | # define IREF (iref_temp = (const ref *)iref_packed, iref_temp) |
942 | | #else |
943 | 50.9M | # define IREF ((const ref *)iref_packed) |
944 | 398k | #endif |
945 | 178M | #define SET_IREF(rp) (iref_packed = (const ref_packed *)(rp)) |
946 | 398k | register int icount = 0; /* # of consecutive tokens at iref */ |
947 | 398k | register os_ptr iosp = osp; /* private copy of osp */ |
948 | 398k | register es_ptr iesp = esp; /* private copy of esp */ |
949 | 398k | int code; |
950 | 398k | ref token; /* token read from file or string, */ |
951 | | /* must be declared in this scope */ |
952 | 398k | ref *pvalue; |
953 | 398k | ref refnull; |
954 | 398k | uint opindex; /* needed for oparrays */ |
955 | 398k | os_ptr whichp; |
956 | | |
957 | | /* |
958 | | * We have to make the error information into a struct; |
959 | | * otherwise, the Watcom compiler will assign it to registers |
960 | | * strictly on the basis of textual frequency. |
961 | | * We also have to use ref_assign_inline everywhere, and |
962 | | * avoid direct assignments of refs, so that esi and edi |
963 | | * will remain available on Intel processors. |
964 | | */ |
965 | 398k | struct interp_error_s { |
966 | 398k | int code; |
967 | 398k | int line; |
968 | 398k | const ref *obj; |
969 | 398k | ref full; |
970 | 398k | } ierror; |
971 | | |
972 | | /* |
973 | | * Get a pointer to the name table so that we can use the |
974 | | * inline version of name_index_ref. |
975 | | */ |
976 | 398k | const name_table *const int_nt = imemory->gs_lib_ctx->gs_name_table; |
977 | | |
978 | 398k | #define set_error(ecode)\ |
979 | 398k | { ierror.code = ecode; ierror.line = __LINE__; } |
980 | 398k | #define return_with_error(ecode, objp)\ |
981 | 398k | { set_error(ecode); ierror.obj = objp; goto rwe; } |
982 | 398k | #define return_with_error_iref(ecode)\ |
983 | 398k | { set_error(ecode); goto rwei; } |
984 | 398k | #define return_with_code_iref()\ |
985 | 398k | { ierror.line = __LINE__; goto rweci; } |
986 | 398k | #define return_with_stackoverflow(objp)\ |
987 | 398k | { o_stack.requested = 1; return_with_error(gs_error_stackoverflow, objp); } |
988 | 398k | #define return_with_stackoverflow_iref()\ |
989 | 398k | { o_stack.requested = 1; return_with_error_iref(gs_error_stackoverflow); } |
990 | | /* |
991 | | * If control reaches the special operators (x_add, etc.) as a result of |
992 | | * interpreting an executable name, iref points to the name, not the |
993 | | * operator, so the name rather than the operator becomes the error object, |
994 | | * which is wrong. We detect and handle this case explicitly when an error |
995 | | * occurs, so as not to slow down the non-error case. |
996 | | */ |
997 | 398k | #define return_with_error_tx_op(err_code)\ |
998 | 398k | { if (r_has_type(IREF, t_name)) {\ |
999 | 0 | return_with_error(err_code, pvalue);\ |
1000 | 0 | } else {\ |
1001 | 0 | return_with_error_iref(err_code);\ |
1002 | 0 | }\ |
1003 | 0 | } |
1004 | | |
1005 | 398k | int *ticks_left = &imemory_system->gs_lib_ctx->gcsignal; |
1006 | | |
1007 | | #if defined(DEBUG_TRACE_PS_OPERATORS) || defined(DEBUG) |
1008 | | int (*call_operator_fn)(op_proc_t, i_ctx_t *) = do_call_operator; |
1009 | | |
1010 | | if (gs_debug_c('!')) |
1011 | | call_operator_fn = do_call_operator_verbose; |
1012 | | #endif |
1013 | | |
1014 | 398k | *ticks_left = i_ctx_p->time_slice_ticks; |
1015 | | |
1016 | 398k | make_null(&ierror.full); |
1017 | 398k | ierror.obj = &ierror.full; |
1018 | 398k | make_null(&refnull); |
1019 | 398k | pvalue = &refnull; |
1020 | | |
1021 | | /* |
1022 | | * If we exceed the VMThreshold, set *ticks_left to -100 |
1023 | | * to alert the interpreter that we need to garbage collect. |
1024 | | */ |
1025 | 398k | set_gc_signal(i_ctx_p, -100); |
1026 | | |
1027 | 398k | esfile_clear_cache(); |
1028 | | /* |
1029 | | * From here on, if icount > 0, iref and icount correspond |
1030 | | * to the top entry on the execution stack: icount is the count |
1031 | | * of sequential entries remaining AFTER the current one. |
1032 | | */ |
1033 | 398k | #define IREF_NEXT(ip)\ |
1034 | 102M | ((const ref_packed *)((const ref *)(ip) + 1)) |
1035 | 398k | #define IREF_NEXT_EITHER(ip)\ |
1036 | 77.7M | ( r_is_packed(ip) ? (ip) + 1 : IREF_NEXT(ip) ) |
1037 | 398k | #define store_state(ep)\ |
1038 | 36.7M | ( icount > 0 ? (ep->value.const_refs = IREF + 1, r_set_size(ep, icount)) : 0 ) |
1039 | 398k | #define store_state_short(ep)\ |
1040 | 44.6M | ( icount > 0 ? (ep->value.packed = iref_packed + 1, r_set_size(ep, icount)) : 0 ) |
1041 | 398k | #define store_state_either(ep)\ |
1042 | 16.6M | ( icount > 0 ? (ep->value.packed = IREF_NEXT_EITHER(iref_packed), r_set_size(ep, icount)) : 0 ) |
1043 | 398k | #define next()\ |
1044 | 116M | if ( --icount > 0 ) { iref_packed = IREF_NEXT(iref_packed); goto top; } else goto out |
1045 | 398k | #define next_short()\ |
1046 | 115M | if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\ |
1047 | 115M | ++iref_packed; goto top |
1048 | 398k | #define next_either()\ |
1049 | 96.6M | if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\ |
1050 | 96.6M | iref_packed = IREF_NEXT_EITHER(iref_packed); goto top |
1051 | | |
1052 | | #if !PACKED_SPECIAL_OPS |
1053 | | # undef next_either |
1054 | | # define next_either() next() |
1055 | | # undef store_state_either |
1056 | | # define store_state_either(ep) store_state(ep) |
1057 | | #endif |
1058 | | |
1059 | | /* We want to recognize executable arrays here, */ |
1060 | | /* so we push the argument on the estack and enter */ |
1061 | | /* the loop at the bottom. */ |
1062 | 398k | if (iesp >= estop) |
1063 | 398k | return_with_error(gs_error_execstackoverflow, pref); |
1064 | 398k | ++iesp; |
1065 | 398k | ref_assign_inline(iesp, pref); |
1066 | 398k | goto bot; |
1067 | 441M | top: |
1068 | | /* |
1069 | | * This is the top of the interpreter loop. |
1070 | | * iref points to the ref being interpreted. |
1071 | | * Note that this might be an element of a packed array, |
1072 | | * not a real ref: we carefully arranged the first 16 bits of |
1073 | | * a ref and of a packed array element so they could be distinguished |
1074 | | * from each other. (See ghost.h and packed.h for more detail.) |
1075 | | */ |
1076 | 441M | INCR(top); |
1077 | | #ifdef DEBUG |
1078 | | /* Do a little validation on the top o-stack entry. */ |
1079 | | if (iosp >= osbot && |
1080 | | (r_type(iosp) == t__invalid || r_type(iosp) >= tx_next_op) |
1081 | | ) { |
1082 | | mlprintf(imemory, "Invalid value on o-stack!\n"); |
1083 | | return_with_error_iref(gs_error_Fatal); |
1084 | | } |
1085 | | if (gs_debug['I'] || |
1086 | | (gs_debug['i'] && |
1087 | | (r_is_packed(iref_packed) ? |
1088 | | r_packed_is_name(iref_packed) : |
1089 | | r_has_type(IREF, t_name))) |
1090 | | ) { |
1091 | | os_ptr save_osp = osp; /* avoid side-effects */ |
1092 | | es_ptr save_esp = esp; |
1093 | | |
1094 | | osp = iosp; |
1095 | | esp = iesp; |
1096 | | dmlprintf5(imemory, "d%u,e%u<%u>"PRI_INTPTR"(%d): ", |
1097 | | ref_stack_count(&d_stack), ref_stack_count(&e_stack), |
1098 | | ref_stack_count(&o_stack), (intptr_t)IREF, icount); |
1099 | | debug_print_ref(imemory, IREF); |
1100 | | if (iosp >= osbot) { |
1101 | | dmputs(imemory, " // "); |
1102 | | debug_print_ref(imemory, iosp); |
1103 | | } |
1104 | | dmputc(imemory, '\n'); |
1105 | | osp = save_osp; |
1106 | | esp = save_esp; |
1107 | | dmflush(imemory); |
1108 | | } |
1109 | | #endif |
1110 | | /* Objects that have attributes (arrays, dictionaries, files, and strings) */ |
1111 | | /* use lit and exec; other objects use plain and plain_exec. */ |
1112 | 441M | #define lit(t) type_xe_value(t, a_execute) |
1113 | 441M | #define exec(t) type_xe_value(t, a_execute + a_executable) |
1114 | 441M | #define nox(t) type_xe_value(t, 0) |
1115 | 441M | #define nox_exec(t) type_xe_value(t, a_executable) |
1116 | 441M | #define plain(t) type_xe_value(t, 0) |
1117 | 441M | #define plain_exec(t) type_xe_value(t, a_executable) |
1118 | | /* |
1119 | | * We have to populate enough cases of the switch statement to force |
1120 | | * some compilers to use a dispatch rather than a testing loop. |
1121 | | * What a nuisance! |
1122 | | */ |
1123 | 441M | switch (r_type_xe(iref_packed)) { |
1124 | | /* Access errors. */ |
1125 | 0 | #define cases_invalid()\ |
1126 | 0 | case plain(t__invalid): case plain_exec(t__invalid) |
1127 | 0 | cases_invalid(): |
1128 | 0 | return_with_error_iref(gs_error_Fatal); |
1129 | 0 | #define cases_nox()\ |
1130 | 0 | case nox_exec(t_array): case nox_exec(t_dictionary):\ |
1131 | 0 | case nox_exec(t_file): case nox_exec(t_string):\ |
1132 | 0 | case nox_exec(t_mixedarray): case nox_exec(t_shortarray) |
1133 | 0 | cases_nox(): |
1134 | 0 | return_with_error_iref(gs_error_invalidaccess); |
1135 | | /* |
1136 | | * Literal objects. We have to enumerate all the types. |
1137 | | * In fact, we have to include some extra plain_exec entries |
1138 | | * just to populate the switch. We break them up into groups |
1139 | | * to avoid overflowing some preprocessors. |
1140 | | */ |
1141 | 0 | #define cases_lit_1()\ |
1142 | 1.10M | case lit(t_array): case nox(t_array):\ |
1143 | 5.27M | case plain(t_boolean): case plain_exec(t_boolean):\ |
1144 | 8.14M | case lit(t_dictionary): case nox(t_dictionary) |
1145 | 0 | #define cases_lit_2()\ |
1146 | 8.15M | case lit(t_file): case nox(t_file):\ |
1147 | 8.15M | case plain(t_fontID): case plain_exec(t_fontID):\ |
1148 | 13.5M | case plain(t_integer): case plain_exec(t_integer):\ |
1149 | 13.5M | case plain(t_mark): case plain_exec(t_mark) |
1150 | 0 | #define cases_lit_3()\ |
1151 | 20.7M | case plain(t_name):\ |
1152 | 23.3M | case plain(t_null):\ |
1153 | 23.3M | case plain(t_oparray):\ |
1154 | 23.3M | case plain(t_operator) |
1155 | 0 | #define cases_lit_4()\ |
1156 | 24.2M | case plain(t_real): case plain_exec(t_real):\ |
1157 | 24.2M | case plain(t_save): case plain_exec(t_save):\ |
1158 | 25.2M | case lit(t_string): case nox(t_string) |
1159 | 0 | #define cases_lit_5()\ |
1160 | 32.5M | case lit(t_mixedarray): case nox(t_mixedarray):\ |
1161 | 32.5M | case lit(t_shortarray): case nox(t_shortarray):\ |
1162 | 32.5M | case plain(t_device): case plain_exec(t_device):\ |
1163 | 32.5M | case plain(t_struct): case plain_exec(t_struct):\ |
1164 | 32.5M | case plain(t_astruct): case plain_exec(t_astruct):\ |
1165 | 32.5M | case plain(t_pdfctx): case plain_exec(t_pdfctx) |
1166 | | /* Executable arrays are treated as literals in direct execution. */ |
1167 | 0 | #define cases_lit_array()\ |
1168 | 48.0M | case exec(t_array): case exec(t_mixedarray): case exec(t_shortarray) |
1169 | 25.6M | cases_lit_1(): |
1170 | 74.0M | cases_lit_2(): |
1171 | 83.9M | cases_lit_3(): |
1172 | 137M | cases_lit_4(): |
1173 | 137M | cases_lit_5(): |
1174 | 30.8M | INCR(lit); |
1175 | 30.8M | break; |
1176 | 48.0M | cases_lit_array(): |
1177 | 48.0M | INCR(lit_array); |
1178 | 48.0M | break; |
1179 | | /* Special operators. */ |
1180 | 889k | case plain_exec(tx_op_add): |
1181 | 2.77M | x_add: INCR(x_add); |
1182 | 2.77M | osp = iosp; /* sync o_stack */ |
1183 | 2.77M | if ((code = zop_add(i_ctx_p)) < 0) |
1184 | 2.77M | return_with_error_tx_op(code); |
1185 | 2.77M | iosp--; |
1186 | 2.77M | next_either(); |
1187 | 135k | case plain_exec(tx_op_def): |
1188 | 1.47M | x_def: INCR(x_def); |
1189 | 1.47M | osp = iosp; /* sync o_stack */ |
1190 | 1.47M | if ((code = zop_def(i_ctx_p)) < 0) |
1191 | 1.47M | return_with_error_tx_op(code); |
1192 | 1.47M | iosp -= 2; |
1193 | 1.47M | next_either(); |
1194 | 2.17M | case plain_exec(tx_op_dup): |
1195 | 27.1M | x_dup: INCR(x_dup); |
1196 | 27.1M | if (iosp < osbot) |
1197 | 27.1M | return_with_error_tx_op(gs_error_stackunderflow); |
1198 | 27.1M | if (iosp >= ostop) { |
1199 | 0 | o_stack.requested = 1; |
1200 | 0 | return_with_error_tx_op(gs_error_stackoverflow); |
1201 | 0 | } |
1202 | 27.1M | iosp++; |
1203 | 27.1M | ref_assign_inline(iosp, iosp - 1); |
1204 | 27.1M | next_either(); |
1205 | 3.73M | case plain_exec(tx_op_exch): |
1206 | 15.6M | x_exch: INCR(x_exch); |
1207 | 15.6M | if (iosp <= osbot) |
1208 | 15.6M | return_with_error_tx_op(gs_error_stackunderflow); |
1209 | 15.6M | ref_assign_inline(&token, iosp); |
1210 | 15.6M | ref_assign_inline(iosp, iosp - 1); |
1211 | 15.6M | ref_assign_inline(iosp - 1, &token); |
1212 | 15.6M | next_either(); |
1213 | 3.31M | case plain_exec(tx_op_if): |
1214 | 12.0M | x_if: INCR(x_if); |
1215 | 12.0M | if (!r_is_proc(iosp)) |
1216 | 12.0M | return_with_error_tx_op(check_proc_failed(iosp)); |
1217 | 12.0M | if (!r_has_type(iosp - 1, t_boolean)) |
1218 | 0 | return_with_error_tx_op((iosp <= osbot ? |
1219 | 12.0M | gs_error_stackunderflow : gs_error_typecheck)); |
1220 | 12.0M | if (!iosp[-1].value.boolval) { |
1221 | 9.50M | iosp -= 2; |
1222 | 9.50M | next_either(); |
1223 | 9.50M | } |
1224 | 2.49M | if (iesp >= estop) |
1225 | 2.49M | return_with_error_tx_op(gs_error_execstackoverflow); |
1226 | 2.49M | store_state_either(iesp); |
1227 | 2.49M | whichp = iosp; |
1228 | 2.49M | iosp -= 2; |
1229 | 2.49M | goto ifup; |
1230 | 3.18M | case plain_exec(tx_op_ifelse): |
1231 | 14.1M | x_ifelse: INCR(x_ifelse); |
1232 | 14.1M | if (!r_is_proc(iosp)) |
1233 | 14.1M | return_with_error_tx_op(check_proc_failed(iosp)); |
1234 | 14.1M | if (!r_is_proc(iosp - 1)) |
1235 | 14.1M | return_with_error_tx_op(check_proc_failed(iosp - 1)); |
1236 | 14.1M | if (!r_has_type(iosp - 2, t_boolean)) |
1237 | 0 | return_with_error_tx_op((iosp < osbot + 2 ? |
1238 | 14.1M | gs_error_stackunderflow : gs_error_typecheck)); |
1239 | 14.1M | if (iesp >= estop) |
1240 | 14.1M | return_with_error_tx_op(gs_error_execstackoverflow); |
1241 | 14.1M | store_state_either(iesp); |
1242 | 14.1M | whichp = (iosp[-2].value.boolval ? iosp - 1 : iosp); |
1243 | 14.1M | iosp -= 3; |
1244 | | /* Open code "up" for the array case(s) */ |
1245 | 16.6M | ifup:if ((icount = r_size(whichp) - 1) <= 0) { |
1246 | 4.62M | if (icount < 0) |
1247 | 0 | goto up; /* 0-element proc */ |
1248 | 4.62M | SET_IREF(whichp->value.refs); /* 1-element proc */ |
1249 | 4.62M | if (--(*ticks_left) > 0) |
1250 | 4.62M | goto top; |
1251 | 4.62M | } |
1252 | 12.0M | ++iesp; |
1253 | | /* Do a ref_assign, but also set iref. */ |
1254 | 12.0M | iesp->tas = whichp->tas; |
1255 | 12.0M | SET_IREF(iesp->value.refs = whichp->value.refs); |
1256 | 12.0M | if (--(*ticks_left) > 0) |
1257 | 12.0M | goto top; |
1258 | 1.42k | goto slice; |
1259 | 485k | case plain_exec(tx_op_index): |
1260 | 10.8M | x_index: INCR(x_index); |
1261 | 10.8M | osp = iosp; /* zindex references o_stack */ |
1262 | 10.8M | if ((code = zindex(i_ctx_p)) < 0) |
1263 | 10.8M | return_with_error_tx_op(code); |
1264 | 10.8M | next_either(); |
1265 | 5.55M | case plain_exec(tx_op_pop): |
1266 | 20.2M | x_pop: INCR(x_pop); |
1267 | 20.2M | if (iosp < osbot) |
1268 | 20.2M | return_with_error_tx_op(gs_error_stackunderflow); |
1269 | 20.2M | iosp--; |
1270 | 20.2M | next_either(); |
1271 | 1.81M | case plain_exec(tx_op_roll): |
1272 | 8.52M | x_roll: INCR(x_roll); |
1273 | 8.52M | osp = iosp; /* zroll references o_stack */ |
1274 | 8.52M | if ((code = zroll(i_ctx_p)) < 0) |
1275 | 8.52M | return_with_error_tx_op(code); |
1276 | 8.52M | iosp -= 2; |
1277 | 8.52M | next_either(); |
1278 | 262k | case plain_exec(tx_op_sub): |
1279 | 462k | x_sub: INCR(x_sub); |
1280 | 462k | osp = iosp; /* sync o_stack */ |
1281 | 462k | if ((code = zop_sub(i_ctx_p)) < 0) |
1282 | 462k | return_with_error_tx_op(code); |
1283 | 462k | iosp--; |
1284 | 462k | next_either(); |
1285 | | /* Executable types. */ |
1286 | 13.9k | case plain_exec(t_null): |
1287 | 13.9k | goto bot; |
1288 | 2.71M | case plain_exec(t_oparray): |
1289 | | /* Replace with the definition and go again. */ |
1290 | 2.71M | INCR(exec_array); |
1291 | 2.71M | opindex = op_index(IREF); |
1292 | 2.71M | pvalue = (ref *)IREF->value.const_refs; |
1293 | 5.56M | opst: /* Prepare to call a t_oparray procedure in *pvalue. */ |
1294 | 5.56M | store_state(iesp); |
1295 | 12.9M | oppr: /* Record the stack depths in case of failure. */ |
1296 | 12.9M | if (iesp >= estop - 4) |
1297 | 12.9M | return_with_error_iref(gs_error_execstackoverflow); |
1298 | 12.9M | iesp += 5; |
1299 | 12.9M | osp = iosp; /* ref_stack_count_inline needs this */ |
1300 | 12.9M | make_mark_estack(iesp - 4, es_other, oparray_cleanup); |
1301 | 12.9M | make_int(iesp - 3, opindex); /* for .errorexec effect */ |
1302 | 12.9M | make_int(iesp - 2, ref_stack_count_inline(&o_stack)); |
1303 | 12.9M | make_int(iesp - 1, ref_stack_count_inline(&d_stack)); |
1304 | 12.9M | make_op_estack(iesp, oparray_pop); |
1305 | 12.9M | goto pr; |
1306 | 1.83M | prst: /* Prepare to call the procedure (array) in *pvalue. */ |
1307 | 1.83M | store_state(iesp); |
1308 | 16.4M | pr: /* Call the array in *pvalue. State has been stored. */ |
1309 | | /* We want to do this check before assigning icount so icount is correct |
1310 | | * in the event of a gs_error_execstackoverflow |
1311 | | */ |
1312 | 16.4M | if (iesp >= estop) { |
1313 | 0 | return_with_error_iref(gs_error_execstackoverflow); |
1314 | 0 | } |
1315 | 16.4M | if ((icount = r_size(pvalue) - 1) <= 0) { |
1316 | 173k | if (icount < 0) |
1317 | 669 | goto up; /* 0-element proc */ |
1318 | 173k | SET_IREF(pvalue->value.refs); /* 1-element proc */ |
1319 | 173k | if (--(*ticks_left) > 0) |
1320 | 173k | goto top; |
1321 | 173k | } |
1322 | 16.2M | ++iesp; |
1323 | | /* Do a ref_assign, but also set iref. */ |
1324 | 16.2M | iesp->tas = pvalue->tas; |
1325 | 16.2M | SET_IREF(iesp->value.refs = pvalue->value.refs); |
1326 | 16.2M | if (--(*ticks_left) > 0) |
1327 | 16.2M | goto top; |
1328 | 31 | goto slice; |
1329 | 61.2M | case plain_exec(t_operator): |
1330 | 61.2M | INCR(exec_operator); |
1331 | 61.2M | if (--(*ticks_left) <= 0) { /* The following doesn't work, */ |
1332 | | /* and I can't figure out why. */ |
1333 | | /****** goto sst; ******/ |
1334 | 764 | } |
1335 | 61.2M | esp = iesp; /* save for operator */ |
1336 | 61.2M | osp = iosp; /* ditto */ |
1337 | | /* Operator routines take osp as an argument. */ |
1338 | | /* This is just a convenience, since they adjust */ |
1339 | | /* osp themselves to reflect the results. */ |
1340 | | /* Operators that (net) push information on the */ |
1341 | | /* operand stack must check for overflow: */ |
1342 | | /* this normally happens automatically through */ |
1343 | | /* the push macro (in oper.h). */ |
1344 | | /* Operators that do not typecheck their operands, */ |
1345 | | /* or take a variable number of arguments, */ |
1346 | | /* must check explicitly for stack underflow. */ |
1347 | | /* (See oper.h for more detail.) */ |
1348 | | /* Note that each case must set iosp = osp: */ |
1349 | | /* this is so we can switch on code without having to */ |
1350 | | /* store it and reload it (for dumb compilers). */ |
1351 | 61.2M | switch (code = call_operator(real_opproc(IREF), i_ctx_p)) { |
1352 | 28.6M | case 0: /* normal case */ |
1353 | 28.7M | case 1: /* alternative success case */ |
1354 | 28.7M | iosp = osp; |
1355 | 28.7M | next(); |
1356 | 18.0M | case o_push_estack: /* store the state and go to up */ |
1357 | 18.0M | store_state(iesp); |
1358 | 24.7M | opush:iosp = osp; |
1359 | 24.7M | iesp = esp; |
1360 | 24.7M | if (--(*ticks_left) > 0) |
1361 | 24.7M | goto up; |
1362 | 44 | goto slice; |
1363 | 14.4M | case o_pop_estack: /* just go to up */ |
1364 | 14.4M | opop:iosp = osp; |
1365 | 14.4M | if (esp == iesp) |
1366 | 12.8k | goto bot; |
1367 | 14.4M | iesp = esp; |
1368 | 14.4M | goto up; |
1369 | 0 | case gs_error_Remap_Color: |
1370 | 0 | oe_remap: store_state(iesp); |
1371 | 0 | remap: if (iesp + 2 >= estop) { |
1372 | 0 | esp = iesp; |
1373 | 0 | code = ref_stack_extend(&e_stack, 2); |
1374 | 0 | if (code < 0) |
1375 | 0 | return_with_error_iref(code); |
1376 | 0 | iesp = esp; |
1377 | 0 | } |
1378 | 0 | packed_get(imemory, iref_packed, iesp + 1); |
1379 | 0 | make_oper(iesp + 2, 0, |
1380 | 0 | r_ptr(&istate->remap_color_info, |
1381 | 0 | int_remap_color_info_t)->proc); |
1382 | 0 | iesp += 2; |
1383 | 0 | goto up; |
1384 | 61.2M | } |
1385 | 20.4k | iosp = osp; |
1386 | 20.4k | iesp = esp; |
1387 | 20.4k | return_with_code_iref(); |
1388 | 14.8M | case plain_exec(t_name): |
1389 | 14.8M | INCR(exec_name); |
1390 | 14.8M | pvalue = IREF->value.pname->pvalue; |
1391 | 14.8M | if (!pv_valid(pvalue)) { |
1392 | 13.9M | uint nidx = names_index(int_nt, IREF); |
1393 | 13.9M | uint htemp = 0; |
1394 | | |
1395 | 13.9M | INCR(find_name); |
1396 | 13.9M | if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0) |
1397 | 13.9M | return_with_error_iref(gs_error_undefined); |
1398 | 13.9M | } |
1399 | | /* Dispatch on the type of the value. */ |
1400 | | /* Again, we have to over-populate the switch. */ |
1401 | 14.8M | switch (r_type_xe(pvalue)) { |
1402 | 0 | cases_invalid(): |
1403 | 0 | return_with_error_iref(gs_error_Fatal); |
1404 | 0 | cases_nox(): /* access errors */ |
1405 | 0 | return_with_error_iref(gs_error_invalidaccess); |
1406 | 3.35M | cases_lit_1(): |
1407 | 12.5M | cases_lit_2(): |
1408 | 12.5M | cases_lit_3(): |
1409 | 10.1M | cases_lit_4(): |
1410 | 20.3M | cases_lit_5(): |
1411 | 20.3M | INCR(name_lit); |
1412 | | /* Just push the value */ |
1413 | 20.3M | if (iosp >= ostop) |
1414 | 1.69M | return_with_stackoverflow(pvalue); |
1415 | 1.69M | ++iosp; |
1416 | 1.69M | ref_assign_inline(iosp, pvalue); |
1417 | 1.69M | next(); |
1418 | 5.48k | case exec(t_array): |
1419 | 1.59M | case exec(t_mixedarray): |
1420 | 1.83M | case exec(t_shortarray): |
1421 | 1.83M | INCR(name_proc); |
1422 | | /* This is an executable procedure, execute it. */ |
1423 | 1.83M | goto prst; |
1424 | 8.19k | case plain_exec(tx_op_add): |
1425 | 8.19k | goto x_add; |
1426 | 1.15M | case plain_exec(tx_op_def): |
1427 | 1.15M | goto x_def; |
1428 | 112k | case plain_exec(tx_op_dup): |
1429 | 112k | goto x_dup; |
1430 | 101k | case plain_exec(tx_op_exch): |
1431 | 101k | goto x_exch; |
1432 | 148k | case plain_exec(tx_op_if): |
1433 | 148k | goto x_if; |
1434 | 39.6k | case plain_exec(tx_op_ifelse): |
1435 | 39.6k | goto x_ifelse; |
1436 | 53.3k | case plain_exec(tx_op_index): |
1437 | 53.3k | goto x_index; |
1438 | 97.1k | case plain_exec(tx_op_pop): |
1439 | 97.1k | goto x_pop; |
1440 | 23.2k | case plain_exec(tx_op_roll): |
1441 | 23.2k | goto x_roll; |
1442 | 2.05k | case plain_exec(tx_op_sub): |
1443 | 2.05k | goto x_sub; |
1444 | 0 | case plain_exec(t_null): |
1445 | 0 | goto bot; |
1446 | 2.84M | case plain_exec(t_oparray): |
1447 | 2.84M | INCR(name_oparray); |
1448 | 2.84M | opindex = op_index(pvalue); |
1449 | 2.84M | pvalue = (ref *)pvalue->value.const_refs; |
1450 | 2.84M | goto opst; |
1451 | 6.73M | case plain_exec(t_operator): |
1452 | 6.73M | INCR(name_operator); |
1453 | 6.73M | { /* Shortcut for operators. */ |
1454 | | /* See above for the logic. */ |
1455 | 6.73M | if (--(*ticks_left) <= 0) { /* The following doesn't work, */ |
1456 | | /* and I can't figure out why. */ |
1457 | | /****** goto sst; ******/ |
1458 | 0 | } |
1459 | 6.73M | esp = iesp; |
1460 | 6.73M | osp = iosp; |
1461 | 6.73M | switch (code = call_operator(real_opproc(pvalue), |
1462 | 6.73M | i_ctx_p) |
1463 | 6.73M | ) { |
1464 | 6.56M | case 0: /* normal case */ |
1465 | 6.64M | case 1: /* alternative success case */ |
1466 | 6.64M | iosp = osp; |
1467 | 6.64M | next(); |
1468 | 75.5k | case o_push_estack: |
1469 | 75.5k | store_state(iesp); |
1470 | 75.5k | goto opush; |
1471 | 6.14k | case o_pop_estack: |
1472 | 6.14k | goto opop; |
1473 | 0 | case gs_error_Remap_Color: |
1474 | 0 | goto oe_remap; |
1475 | 6.73M | } |
1476 | 683 | iosp = osp; |
1477 | 683 | iesp = esp; |
1478 | 683 | return_with_error(code, pvalue); |
1479 | 0 | } |
1480 | 0 | case plain_exec(t_name): |
1481 | 0 | case exec(t_file): |
1482 | 0 | case exec(t_string): |
1483 | 7.70k | default: |
1484 | | /* Not a procedure, reinterpret it. */ |
1485 | 7.70k | store_state(iesp); |
1486 | 7.70k | icount = 0; |
1487 | 7.70k | SET_IREF(pvalue); |
1488 | 7.70k | goto top; |
1489 | 14.8M | } |
1490 | 11.1M | case exec(t_file): |
1491 | 11.1M | { /* Executable file. Read the next token and interpret it. */ |
1492 | 11.1M | stream *s; |
1493 | 11.1M | scanner_state sstate; |
1494 | | |
1495 | 11.1M | check_read_known_file(i_ctx_p, s, IREF, return_with_error_iref); |
1496 | 28.2M | rt: |
1497 | 28.2M | if (iosp >= ostop) /* check early */ |
1498 | 28.2M | return_with_stackoverflow_iref(); |
1499 | 28.2M | osp = iosp; /* gs_scan_token uses ostack */ |
1500 | 28.2M | gs_scanner_init_options(&sstate, IREF, i_ctx_p->scanner_options); |
1501 | 28.2M | again: |
1502 | 28.2M | code = gs_scan_token(i_ctx_p, &token, &sstate); |
1503 | 28.2M | iosp = osp; /* ditto */ |
1504 | 28.2M | switch (code) { |
1505 | 28.2M | case 0: /* read a token */ |
1506 | | /* It's worth checking for literals, which make up */ |
1507 | | /* the majority of input tokens, before storing the */ |
1508 | | /* state on the e-stack. Note that because of //, */ |
1509 | | /* the token may have *any* type and attributes. */ |
1510 | | /* Note also that executable arrays aren't executed */ |
1511 | | /* at the top level -- they're treated as literals. */ |
1512 | 28.2M | if (!r_has_attr(&token, a_executable) || |
1513 | 28.2M | r_is_array(&token) |
1514 | 28.2M | ) { /* If gs_scan_token used the o-stack, */ |
1515 | | /* we know we can do a push now; if not, */ |
1516 | | /* the pre-check is still valid. */ |
1517 | 17.0M | iosp++; |
1518 | 17.0M | ref_assign_inline(iosp, &token); |
1519 | 17.0M | goto rt; |
1520 | 17.0M | } |
1521 | 11.1M | store_state(iesp); |
1522 | | /* Push the file on the e-stack */ |
1523 | 11.1M | if (iesp >= estop) |
1524 | 11.1M | return_with_error_iref(gs_error_execstackoverflow); |
1525 | 11.1M | esfile_set_cache(++iesp); |
1526 | 11.1M | ref_assign_inline(iesp, IREF); |
1527 | 11.1M | SET_IREF(&token); |
1528 | 11.1M | icount = 0; |
1529 | 11.1M | goto top; |
1530 | 0 | case gs_error_undefined: /* //name undefined */ |
1531 | 0 | gs_scanner_error_object(i_ctx_p, &sstate, &token); |
1532 | 0 | return_with_error(code, &token); |
1533 | 5.46k | case scan_EOF: /* end of file */ |
1534 | 5.46k | esfile_clear_cache(); |
1535 | 5.46k | goto bot; |
1536 | 0 | case scan_BOS: |
1537 | | /* Binary object sequences */ |
1538 | | /* ARE executed at the top level. */ |
1539 | 0 | store_state(iesp); |
1540 | | /* Push the file on the e-stack */ |
1541 | 0 | if (iesp >= estop) |
1542 | 0 | return_with_error_iref(gs_error_execstackoverflow); |
1543 | 0 | esfile_set_cache(++iesp); |
1544 | 0 | ref_assign_inline(iesp, IREF); |
1545 | 0 | pvalue = &token; |
1546 | 0 | goto pr; |
1547 | 5.49k | case scan_Refill: |
1548 | 5.49k | store_state(iesp); |
1549 | | /* iref may point into the exec stack; */ |
1550 | | /* save its referent now. */ |
1551 | 5.49k | ref_assign_inline(&token, IREF); |
1552 | | /* Push the file on the e-stack */ |
1553 | 5.49k | if (iesp >= estop) |
1554 | 5.49k | return_with_error_iref(gs_error_execstackoverflow); |
1555 | 5.49k | ++iesp; |
1556 | 5.49k | ref_assign_inline(iesp, &token); |
1557 | 5.49k | esp = iesp; |
1558 | 5.49k | osp = iosp; |
1559 | 5.49k | code = gs_scan_handle_refill(i_ctx_p, &sstate, true, |
1560 | 5.49k | ztokenexec_continue); |
1561 | 5.50k | scan_cont: |
1562 | 5.50k | iosp = osp; |
1563 | 5.50k | iesp = esp; |
1564 | 5.50k | switch (code) { |
1565 | 27 | case 0: |
1566 | 27 | iesp--; /* don't push the file */ |
1567 | 27 | goto again; /* stacks are unchanged */ |
1568 | 5.47k | case o_push_estack: |
1569 | 5.47k | esfile_clear_cache(); |
1570 | 5.47k | if (--(*ticks_left) > 0) |
1571 | 5.47k | goto up; |
1572 | 0 | goto slice; |
1573 | 5.50k | } |
1574 | | /* must be an error */ |
1575 | 0 | iesp--; /* don't push the file */ |
1576 | 0 | return_with_code_iref(); |
1577 | 0 | case scan_Comment: |
1578 | 10 | case scan_DSC_Comment: { |
1579 | | /* See scan_Refill above for comments. */ |
1580 | 10 | ref file_token; |
1581 | | |
1582 | 10 | store_state(iesp); |
1583 | 10 | ref_assign_inline(&file_token, IREF); |
1584 | 10 | if (iesp >= estop) |
1585 | 10 | return_with_error_iref(gs_error_execstackoverflow); |
1586 | 10 | ++iesp; |
1587 | 10 | ref_assign_inline(iesp, &file_token); |
1588 | 10 | esp = iesp; |
1589 | 10 | osp = iosp; |
1590 | 10 | code = ztoken_handle_comment(i_ctx_p, |
1591 | 10 | &sstate, &token, |
1592 | 10 | code, true, true, |
1593 | 10 | ztokenexec_continue); |
1594 | 10 | } |
1595 | 0 | goto scan_cont; |
1596 | 7 | default: /* error */ |
1597 | 7 | ref_assign_inline(&token, IREF); |
1598 | 7 | gs_scanner_error_object(i_ctx_p, &sstate, &token); |
1599 | 7 | return_with_error(code, &token); |
1600 | 28.2M | } |
1601 | 28.2M | } |
1602 | 3.41k | case exec(t_string): |
1603 | 3.41k | { /* Executable string. Read a token and interpret it. */ |
1604 | 3.41k | stream ss; |
1605 | 3.41k | scanner_state sstate; |
1606 | | |
1607 | 3.41k | s_init(&ss, NULL); |
1608 | 3.41k | sread_string(&ss, IREF->value.bytes, r_size(IREF)); |
1609 | 3.41k | gs_scanner_init_stream_options(&sstate, &ss, SCAN_FROM_STRING); |
1610 | 3.41k | osp = iosp; /* gs_scan_token uses ostack */ |
1611 | 3.41k | code = gs_scan_token(i_ctx_p, &token, &sstate); |
1612 | 3.41k | iosp = osp; /* ditto */ |
1613 | 3.41k | switch (code) { |
1614 | 3.41k | case 0: /* read a token */ |
1615 | 3.41k | case scan_BOS: /* binary object sequence */ |
1616 | 3.41k | store_state(iesp); |
1617 | | /* If the updated string isn't empty, push it back */ |
1618 | | /* on the e-stack. */ |
1619 | 3.41k | { |
1620 | | /* This is just the available buffer size, so |
1621 | | a signed int is plenty big |
1622 | | */ |
1623 | 3.41k | int size = sbufavailable(&ss); |
1624 | | |
1625 | 3.41k | if (size > 0) { |
1626 | 0 | if (iesp >= estop) |
1627 | 0 | return_with_error_iref(gs_error_execstackoverflow); |
1628 | 0 | ++iesp; |
1629 | 0 | iesp->tas.type_attrs = IREF->tas.type_attrs; |
1630 | 0 | iesp->value.const_bytes = sbufptr(&ss); |
1631 | 0 | r_set_size(iesp, size); |
1632 | 0 | } |
1633 | 3.41k | } |
1634 | 3.41k | if (code == 0) { |
1635 | 3.41k | SET_IREF(&token); |
1636 | 3.41k | icount = 0; |
1637 | 3.41k | goto top; |
1638 | 3.41k | } |
1639 | | /* Handle BOS specially */ |
1640 | 0 | pvalue = &token; |
1641 | 0 | goto pr; |
1642 | 0 | case scan_EOF: /* end of string */ |
1643 | 0 | goto bot; |
1644 | 0 | case scan_Refill: /* error */ |
1645 | 0 | code = gs_note_error(gs_error_syntaxerror); |
1646 | | /* fall through */ |
1647 | 0 | default: /* error */ |
1648 | 0 | ref_assign_inline(&token, IREF); |
1649 | 0 | gs_scanner_error_object(i_ctx_p, &sstate, &token); |
1650 | 0 | return_with_error(code, &token); |
1651 | 3.41k | } |
1652 | 3.41k | } |
1653 | | /* Handle packed arrays here by re-dispatching. */ |
1654 | | /* This also picks up some anomalous cases of non-packed arrays. */ |
1655 | 250M | default: |
1656 | 250M | { |
1657 | 250M | uint index; |
1658 | | |
1659 | 250M | switch (*iref_packed >> r_packed_type_shift) { |
1660 | 7.70k | case pt_full_ref: |
1661 | 7.70k | case pt_full_ref + 1: |
1662 | 7.70k | INCR(p_full); |
1663 | 7.70k | if (iosp >= ostop) |
1664 | 7.70k | return_with_stackoverflow_iref(); |
1665 | | /* We know this can't be an executable object */ |
1666 | | /* requiring special handling, so we just push it. */ |
1667 | 7.70k | ++iosp; |
1668 | | /* We know that refs are properly aligned: */ |
1669 | | /* see packed.h for details. */ |
1670 | 7.70k | ref_assign_inline(iosp, IREF); |
1671 | 7.70k | next(); |
1672 | 166M | case pt_executable_operator: |
1673 | 166M | index = *iref_packed & packed_value_mask; |
1674 | 166M | if (--(*ticks_left) <= 0) { /* The following doesn't work, */ |
1675 | | /* and I can't figure out why. */ |
1676 | | /****** goto sst_short; ******/ |
1677 | 1.52k | } |
1678 | 166M | if (!op_index_is_operator(index)) { |
1679 | 7.38M | INCR(p_exec_oparray); |
1680 | 7.38M | store_state_short(iesp); |
1681 | 7.38M | opindex = index; |
1682 | | /* Call the operator procedure. */ |
1683 | 7.38M | index -= op_def_count; |
1684 | 7.38M | pvalue = (ref *) |
1685 | 7.38M | (index < r_size(&i_ctx_p->op_array_table_global.table) ? |
1686 | 7.38M | i_ctx_p->op_array_table_global.table.value.const_refs + |
1687 | 7.38M | index : |
1688 | 7.38M | i_ctx_p->op_array_table_local.table.value.const_refs + |
1689 | 0 | (index - r_size(&i_ctx_p->op_array_table_global.table))); |
1690 | 7.38M | goto oppr; |
1691 | 7.38M | } |
1692 | 166M | INCR(p_exec_operator); |
1693 | | /* See the main plain_exec(t_operator) case */ |
1694 | | /* for details of what happens here. */ |
1695 | 158M | #if PACKED_SPECIAL_OPS |
1696 | | /* |
1697 | | * We arranged in iinit.c that the special ops |
1698 | | * have operator indices starting at 1. |
1699 | | * |
1700 | | * The (int) cast in the next line is required |
1701 | | * because some compilers don't allow arithmetic |
1702 | | * involving two different enumerated types. |
1703 | | */ |
1704 | 158M | # define case_xop(xop) case xop - (int)tx_op + 1 |
1705 | 158M | switch (index) { |
1706 | 1.88M | case_xop(tx_op_add):goto x_add; |
1707 | 183k | case_xop(tx_op_def):goto x_def; |
1708 | 24.8M | case_xop(tx_op_dup):goto x_dup; |
1709 | 11.8M | case_xop(tx_op_exch):goto x_exch; |
1710 | 8.54M | case_xop(tx_op_if):goto x_if; |
1711 | 10.9M | case_xop(tx_op_ifelse):goto x_ifelse; |
1712 | 10.3M | case_xop(tx_op_index):goto x_index; |
1713 | 14.5M | case_xop(tx_op_pop):goto x_pop; |
1714 | 6.68M | case_xop(tx_op_roll):goto x_roll; |
1715 | 197k | case_xop(tx_op_sub):goto x_sub; |
1716 | 0 | case 0: /* for dumb compilers */ |
1717 | 68.8M | default: |
1718 | 68.8M | ; |
1719 | 158M | } |
1720 | 68.8M | # undef case_xop |
1721 | 68.8M | #endif |
1722 | 158M | INCR(p_exec_non_x_operator); |
1723 | 68.8M | esp = iesp; |
1724 | 68.8M | osp = iosp; |
1725 | 68.8M | switch (code = call_operator(op_index_proc(index), i_ctx_p)) { |
1726 | 61.3M | case 0: |
1727 | 61.3M | case 1: |
1728 | 61.3M | iosp = osp; |
1729 | 61.3M | next_short(); |
1730 | 6.61M | case o_push_estack: |
1731 | 6.61M | store_state_short(iesp); |
1732 | 6.61M | goto opush; |
1733 | 476k | case o_pop_estack: |
1734 | 476k | iosp = osp; |
1735 | 476k | if (esp == iesp) { |
1736 | 15.3k | next_short(); |
1737 | 15.3k | } |
1738 | 460k | iesp = esp; |
1739 | 460k | goto up; |
1740 | 0 | case gs_error_Remap_Color: |
1741 | 0 | store_state_short(iesp); |
1742 | 0 | goto remap; |
1743 | 68.8M | } |
1744 | 356k | iosp = osp; |
1745 | 356k | iesp = esp; |
1746 | 356k | return_with_code_iref(); |
1747 | 43.5M | case pt_integer: |
1748 | 43.5M | INCR(p_integer); |
1749 | 43.5M | if (iosp >= ostop) |
1750 | 43.5M | return_with_stackoverflow_iref(); |
1751 | 43.5M | ++iosp; |
1752 | 43.5M | make_int(iosp, |
1753 | 43.5M | ((int)*iref_packed & packed_int_mask) + |
1754 | 43.5M | packed_min_intval); |
1755 | 43.5M | next_short(); |
1756 | 9.44M | case pt_literal_name: |
1757 | 9.44M | INCR(p_lit_name); |
1758 | 9.44M | { |
1759 | 9.44M | uint nidx = *iref_packed & packed_value_mask; |
1760 | | |
1761 | 9.44M | if (iosp >= ostop) |
1762 | 9.44M | return_with_stackoverflow_iref(); |
1763 | 9.44M | ++iosp; |
1764 | 9.44M | name_index_ref_inline(int_nt, nidx, iosp); |
1765 | 9.44M | next_short(); |
1766 | 9.44M | } |
1767 | 31.7M | case pt_executable_name: |
1768 | 31.7M | INCR(p_exec_name); |
1769 | 31.7M | { |
1770 | 31.7M | uint nidx = *iref_packed & packed_value_mask; |
1771 | | |
1772 | 31.7M | pvalue = name_index_ptr_inline(int_nt, nidx)->pvalue; |
1773 | 31.7M | if (!pv_valid(pvalue)) { |
1774 | 30.3M | uint htemp = 0; |
1775 | | |
1776 | 30.3M | INCR(p_find_name); |
1777 | 30.3M | if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0) { |
1778 | 0 | names_index_ref(int_nt, nidx, &token); |
1779 | 0 | return_with_error(gs_error_undefined, &token); |
1780 | 0 | } |
1781 | 30.3M | } |
1782 | 31.7M | if (r_has_masked_attrs(pvalue, a_execute, a_execute + a_executable)) { /* Literal, push it. */ |
1783 | 1.45M | INCR(p_name_lit); |
1784 | 1.45M | if (iosp >= ostop) |
1785 | 1.45M | return_with_stackoverflow_iref(); |
1786 | 1.45M | ++iosp; |
1787 | 1.45M | ref_assign_inline(iosp, pvalue); |
1788 | 1.45M | next_short(); |
1789 | 1.45M | } |
1790 | 30.2M | if (r_is_proc(pvalue)) { /* This is an executable procedure, */ |
1791 | | /* execute it. */ |
1792 | 1.66M | INCR(p_name_proc); |
1793 | 1.66M | store_state_short(iesp); |
1794 | 1.66M | goto pr; |
1795 | 1.66M | } |
1796 | | /* Not a literal or procedure, reinterpret it. */ |
1797 | 28.6M | store_state_short(iesp); |
1798 | 28.6M | icount = 0; |
1799 | 28.6M | SET_IREF(pvalue); |
1800 | 28.6M | goto top; |
1801 | 30.2M | } |
1802 | | /* default can't happen here */ |
1803 | 250M | } |
1804 | 250M | } |
1805 | 441M | } |
1806 | | /* Literal type, just push it. */ |
1807 | 78.9M | if (iosp >= ostop) |
1808 | 78.9M | return_with_stackoverflow_iref(); |
1809 | 78.9M | ++iosp; |
1810 | 78.9M | ref_assign_inline(iosp, IREF); |
1811 | 79.3M | bot:next(); |
1812 | 47.8M | out: /* At most 1 more token in the current procedure. */ |
1813 | | /* (We already decremented icount.) */ |
1814 | 47.8M | if (!icount) { |
1815 | | /* Pop the execution stack for tail recursion. */ |
1816 | 22.4M | iesp--; |
1817 | 22.4M | iref_packed = IREF_NEXT(iref_packed); |
1818 | 22.4M | goto top; |
1819 | 22.4M | } |
1820 | 105M | up:if (--(*ticks_left) < 0) |
1821 | 742 | goto slice; |
1822 | | /* See if there is anything left on the execution stack. */ |
1823 | 105M | if (!r_is_proc(iesp)) { |
1824 | 41.4M | SET_IREF(iesp--); |
1825 | 41.4M | icount = 0; |
1826 | 41.4M | goto top; |
1827 | 41.4M | } |
1828 | 63.8M | SET_IREF(iesp->value.refs); /* next element of array */ |
1829 | 63.8M | icount = r_size(iesp) - 1; |
1830 | 63.8M | if (icount <= 0) { /* <= 1 more elements */ |
1831 | 10.2M | iesp--; /* pop, or tail recursion */ |
1832 | 10.2M | if (icount < 0) |
1833 | 173k | goto up; |
1834 | 10.2M | } |
1835 | 63.6M | goto top; |
1836 | 63.6M | sched: /* We've just called a scheduling procedure. */ |
1837 | | /* The interpreter state is in memory; iref is not current. */ |
1838 | 2.24k | if (code < 0) { |
1839 | 0 | set_error(code); |
1840 | | /* |
1841 | | * We need a real object to return as the error object. |
1842 | | * (It only has to last long enough to store in |
1843 | | * *perror_object.) |
1844 | | */ |
1845 | 0 | make_null_proc(&ierror.full); |
1846 | 0 | SET_IREF(ierror.obj = &ierror.full); |
1847 | 0 | goto error_exit; |
1848 | 0 | } |
1849 | | /* Reload state information from memory. */ |
1850 | 2.24k | iosp = osp; |
1851 | 2.24k | iesp = esp; |
1852 | 2.24k | goto up; |
1853 | | #if 0 /****** ****** ***** */ |
1854 | | sst: /* Time-slice, but push the current object first. */ |
1855 | | store_state(iesp); |
1856 | | if (iesp >= estop) |
1857 | | return_with_error_iref(gs_error_execstackoverflow); |
1858 | | iesp++; |
1859 | | ref_assign_inline(iesp, iref); |
1860 | | #endif /****** ****** ***** */ |
1861 | 2.24k | slice: /* It's time to time-slice or garbage collect. */ |
1862 | | /* iref is not live, so we don't need to do a store_state. */ |
1863 | 2.24k | osp = iosp; |
1864 | 2.24k | esp = iesp; |
1865 | | /* If *ticks_left <= -100, we need to GC now. */ |
1866 | 2.24k | if ((*ticks_left) <= -100) { /* We need to garbage collect now. */ |
1867 | 35 | *pi_ctx_p = i_ctx_p; |
1868 | 35 | code = interp_reclaim(pi_ctx_p, -1); |
1869 | 35 | i_ctx_p = *pi_ctx_p; |
1870 | 35 | } else |
1871 | 2.20k | code = 0; |
1872 | 2.24k | *ticks_left = i_ctx_p->time_slice_ticks; |
1873 | 2.24k | set_code_on_interrupt(imemory, &code); |
1874 | 2.24k | goto sched; |
1875 | | |
1876 | | /* Error exits. */ |
1877 | | |
1878 | 376k | rweci: |
1879 | 376k | ierror.code = code; |
1880 | 397k | rwei: |
1881 | 397k | ierror.obj = IREF; |
1882 | 398k | rwe: |
1883 | 398k | if (!r_is_packed(iref_packed)) |
1884 | 41.4k | store_state(iesp); |
1885 | 356k | else { |
1886 | | /* |
1887 | | * We need a real object to return as the error object. |
1888 | | * (It only has to last long enough to store in *perror_object.) |
1889 | | */ |
1890 | 356k | packed_get(imemory, (const ref_packed *)ierror.obj, &ierror.full); |
1891 | 356k | store_state_short(iesp); |
1892 | 356k | if (IREF == ierror.obj) |
1893 | 356k | SET_IREF(&ierror.full); |
1894 | 356k | ierror.obj = &ierror.full; |
1895 | 356k | } |
1896 | 398k | error_exit: |
1897 | 398k | if (GS_ERROR_IS_INTERRUPT(ierror.code)) { /* We must push the current object being interpreted */ |
1898 | | /* back on the e-stack so it will be re-executed. */ |
1899 | | /* Currently, this is always an executable operator, */ |
1900 | | /* but it might be something else someday if we check */ |
1901 | | /* for interrupts in the interpreter loop itself. */ |
1902 | 0 | if (iesp >= estop) |
1903 | 0 | ierror.code = gs_error_execstackoverflow; |
1904 | 0 | else { |
1905 | 0 | iesp++; |
1906 | 0 | ref_assign_inline(iesp, IREF); |
1907 | 0 | } |
1908 | 0 | } |
1909 | 398k | esp = iesp; |
1910 | 398k | osp = iosp; |
1911 | 398k | ref_assign_inline(perror_object, ierror.obj); |
1912 | | #ifdef DEBUG |
1913 | | if (ierror.code == gs_error_InterpreterExit) { |
1914 | | /* Do not call gs_log_error to reduce the noise. */ |
1915 | | return gs_error_InterpreterExit; |
1916 | | } |
1917 | | #endif |
1918 | 398k | return gs_log_error(ierror.code, __FILE__, ierror.line); |
1919 | 398k | } |
1920 | | |
1921 | | /* Pop the bookkeeping information for a normal exit from a t_oparray. */ |
1922 | | static int |
1923 | | oparray_pop(i_ctx_t *i_ctx_p) |
1924 | 12.5M | { |
1925 | 12.5M | esp -= 4; |
1926 | 12.5M | return o_pop_estack; |
1927 | 12.5M | } |
1928 | | |
1929 | | /* Restore the stack pointers after an error inside a t_oparray procedure. */ |
1930 | | /* This procedure is called only from pop_estack. */ |
1931 | | static int |
1932 | | oparray_cleanup(i_ctx_t *i_ctx_p) |
1933 | 347k | { /* esp points just below the cleanup procedure. */ |
1934 | 347k | es_ptr ep = esp; |
1935 | 347k | uint ocount_old = (uint) ep[3].value.intval; |
1936 | 347k | uint dcount_old = (uint) ep[4].value.intval; |
1937 | 347k | uint ocount = ref_stack_count(&o_stack); |
1938 | 347k | uint dcount = ref_stack_count(&d_stack); |
1939 | | |
1940 | 347k | if (ocount > ocount_old) |
1941 | 2.47k | ref_stack_pop(&o_stack, ocount - ocount_old); |
1942 | 347k | if (dcount > dcount_old) { |
1943 | 2.46k | ref_stack_pop(&d_stack, dcount - dcount_old); |
1944 | 2.46k | dict_set_top(); |
1945 | 2.46k | } |
1946 | 347k | return 0; |
1947 | 347k | } |
1948 | | |
1949 | | /* Don't restore the stack pointers. */ |
1950 | | static int |
1951 | | oparray_no_cleanup(i_ctx_t *i_ctx_p) |
1952 | 0 | { |
1953 | 0 | return 0; |
1954 | 0 | } |
1955 | | |
1956 | | /* Find the innermost oparray. */ |
1957 | | static ref * |
1958 | | oparray_find(i_ctx_t *i_ctx_p) |
1959 | 2.73k | { |
1960 | 2.73k | long i; |
1961 | 2.73k | ref *ep; |
1962 | | |
1963 | 36.8k | for (i = 0; (ep = ref_stack_index(&e_stack, i)) != 0; ++i) { |
1964 | 36.8k | if (r_is_estack_mark(ep) && |
1965 | 36.8k | (ep->value.opproc == oparray_cleanup || |
1966 | 5.46k | ep->value.opproc == oparray_no_cleanup) |
1967 | 36.8k | ) |
1968 | 2.73k | return ep; |
1969 | 36.8k | } |
1970 | 0 | return 0; |
1971 | 2.73k | } |
1972 | | |
1973 | | /* <errorobj> <obj> .errorexec ... */ |
1974 | | /* Execute an object, substituting errorobj for the 'command' if an error */ |
1975 | | /* occurs during the execution. Cf .execfile (in zfile.c). */ |
1976 | | static int |
1977 | | zerrorexec(i_ctx_t *i_ctx_p) |
1978 | 1.03M | { |
1979 | 1.03M | os_ptr op = osp; |
1980 | 1.03M | int code; |
1981 | | |
1982 | 1.03M | check_op(2); |
1983 | 1.03M | check_estack(4); /* mark/cleanup, errobj, pop, obj */ |
1984 | 1.03M | push_mark_estack(es_other, errorexec_cleanup); |
1985 | 1.03M | *++esp = op[-1]; |
1986 | 1.03M | push_op_estack(errorexec_pop); |
1987 | 1.03M | code = zexec(i_ctx_p); |
1988 | 1.03M | if (code >= 0) |
1989 | 1.03M | pop(1); |
1990 | 0 | else |
1991 | 0 | esp -= 3; /* undo our additions to estack */ |
1992 | 1.03M | return code; |
1993 | 1.03M | } |
1994 | | |
1995 | | /* - .finderrorobject <errorobj> true */ |
1996 | | /* - .finderrorobject false */ |
1997 | | /* If we are within an .errorexec or oparray, return the error object */ |
1998 | | /* and true, otherwise return false. */ |
1999 | | static int |
2000 | | zfinderrorobject(i_ctx_t *i_ctx_p) |
2001 | 2.45k | { |
2002 | 2.45k | os_ptr op = osp; |
2003 | 2.45k | ref errobj; |
2004 | | |
2005 | 2.45k | if (errorexec_find(i_ctx_p, &errobj)) { |
2006 | 2.45k | push(2); |
2007 | 2.45k | op[-1] = errobj; |
2008 | 2.45k | make_true(op); |
2009 | 2.45k | } else { |
2010 | 0 | push(1); |
2011 | 0 | make_false(op); |
2012 | 0 | } |
2013 | 2.45k | return 0; |
2014 | 2.45k | } |
2015 | | |
2016 | | /* |
2017 | | * Find the innermost .errorexec or oparray. If there is an oparray, or a |
2018 | | * .errorexec with errobj != null, store it in *perror_object and return 1, |
2019 | | * otherwise return 0; |
2020 | | */ |
2021 | | int |
2022 | | errorexec_find(i_ctx_t *i_ctx_p, ref *perror_object) |
2023 | 376k | { |
2024 | 376k | long i; |
2025 | 376k | const ref *ep; |
2026 | | |
2027 | 2.51M | for (i = 0; (ep = ref_stack_index(&e_stack, i)) != 0; ++i) { |
2028 | 2.50M | if (r_is_estack_mark(ep)) { |
2029 | 413k | if (ep->value.opproc == oparray_cleanup) { |
2030 | | /* See oppr: above. */ |
2031 | 360k | uint opindex = (uint)ep[1].value.intval; |
2032 | 360k | if (opindex == 0) /* internal operator, ignore */ |
2033 | 0 | continue; |
2034 | 360k | op_index_ref(imemory, opindex, perror_object); |
2035 | 360k | return 1; |
2036 | 360k | } |
2037 | 52.4k | if (ep->value.opproc == oparray_no_cleanup) |
2038 | 0 | return 0; /* protection disabled */ |
2039 | 52.4k | if (ep->value.opproc == errorexec_cleanup) { |
2040 | 4.97k | if (r_has_type(ep + 1, t_null)) |
2041 | 57 | return 0; |
2042 | 4.91k | *perror_object = ep[1]; /* see .errorexec above */ |
2043 | 4.91k | return 1; |
2044 | 4.97k | } |
2045 | 52.4k | } |
2046 | 2.50M | } |
2047 | 10.9k | return 0; |
2048 | 376k | } |
2049 | | |
2050 | | /* Pop the bookkeeping information on a normal exit from .errorexec. */ |
2051 | | static int |
2052 | | errorexec_pop(i_ctx_t *i_ctx_p) |
2053 | 1.03M | { |
2054 | 1.03M | esp -= 2; |
2055 | 1.03M | return o_pop_estack; |
2056 | 1.03M | } |
2057 | | |
2058 | | /* Clean up when unwinding the stack on an error. (No action needed.) */ |
2059 | | static int |
2060 | | errorexec_cleanup(i_ctx_t *i_ctx_p) |
2061 | 2.46k | { |
2062 | 2.46k | return 0; |
2063 | 2.46k | } |
2064 | | |
2065 | | /* <bool> .setstackprotect - */ |
2066 | | /* Set whether to protect the stack for the innermost oparray. */ |
2067 | | static int |
2068 | | zsetstackprotect(i_ctx_t *i_ctx_p) |
2069 | 2.73k | { |
2070 | 2.73k | os_ptr op = osp; |
2071 | 2.73k | ref *ep = oparray_find(i_ctx_p); |
2072 | | |
2073 | 2.73k | check_type(*op, t_boolean); |
2074 | 2.73k | if (ep == 0) |
2075 | 0 | return_error(gs_error_rangecheck); |
2076 | 2.73k | ep->value.opproc = |
2077 | 2.73k | (op->value.boolval ? oparray_cleanup : oparray_no_cleanup); |
2078 | 2.73k | pop(1); |
2079 | 2.73k | return 0; |
2080 | 2.73k | } |
2081 | | |
2082 | | /* - .currentstackprotect <bool> */ |
2083 | | /* Return the stack protection status. */ |
2084 | | static int |
2085 | | zcurrentstackprotect(i_ctx_t *i_ctx_p) |
2086 | 0 | { |
2087 | 0 | os_ptr op = osp; |
2088 | 0 | ref *ep = oparray_find(i_ctx_p); |
2089 | |
|
2090 | 0 | if (ep == 0) |
2091 | 0 | return_error(gs_error_rangecheck); |
2092 | 0 | push(1); |
2093 | 0 | make_bool(op, ep->value.opproc == oparray_cleanup); |
2094 | 0 | return 0; |
2095 | 0 | } |
2096 | | |
2097 | | static int |
2098 | | zactonuel(i_ctx_t *i_ctx_p) |
2099 | 683 | { |
2100 | 683 | os_ptr op = osp; |
2101 | | |
2102 | 683 | push(1); |
2103 | 683 | make_bool(op, !!gs_lib_ctx_get_act_on_uel((gs_memory_t *)(i_ctx_p->memory.current))); |
2104 | 683 | return 0; |
2105 | 683 | } |