/src/ghostpdl/psi/ireclaim.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2023 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Interpreter's interface to garbage collector */ |
18 | | #include "ghost.h" |
19 | | #include "ierrors.h" |
20 | | #include "gsstruct.h" |
21 | | #include "iastate.h" |
22 | | #include "icontext.h" |
23 | | #include "interp.h" |
24 | | #include "isave.h" /* for isstate.h */ |
25 | | #include "isstate.h" /* for mem->saved->state */ |
26 | | #include "dstack.h" /* for dsbot, dsp, dict_set_top */ |
27 | | #include "estack.h" /* for esbot, esp */ |
28 | | #include "ostack.h" /* for osbot, osp */ |
29 | | #include "opdef.h" /* for defining init procedure */ |
30 | | #include "store.h" /* for make_array */ |
31 | | |
32 | | /* Import preparation and cleanup routines. */ |
33 | | extern void ialloc_gc_prepare(gs_ref_memory_t *); |
34 | | |
35 | | /* Forward references */ |
36 | | static int gs_vmreclaim(gs_dual_memory_t *, bool); |
37 | | |
38 | | /* Initialize the GC hook in the allocator. */ |
39 | | static int ireclaim(gs_dual_memory_t *, int); |
40 | | static int |
41 | | ireclaim_init(i_ctx_t *i_ctx_p) |
42 | 162k | { |
43 | 162k | gs_imemory.reclaim = ireclaim; |
44 | 162k | return 0; |
45 | 162k | } |
46 | | |
47 | | /* GC hook called when the allocator signals a GC is needed (space = -1), */ |
48 | | /* or for vmreclaim (space = the space to collect). */ |
49 | | static int |
50 | | ireclaim(gs_dual_memory_t * dmem, int space) |
51 | 339k | { |
52 | 339k | bool global; |
53 | 339k | gs_ref_memory_t *mem = NULL; |
54 | 339k | int code; |
55 | | |
56 | 339k | if (space < 0) { |
57 | | /* Determine which allocator exceeded the limit. */ |
58 | 14.9k | int i; |
59 | | |
60 | 59.3k | for (i = 0; i < countof(dmem->spaces_indexed); i++) { |
61 | 59.3k | mem = dmem->spaces_indexed[i]; |
62 | 59.3k | if (mem == 0) |
63 | 14.9k | continue; |
64 | 44.4k | if (mem->gc_status.requested > 0 || |
65 | 44.4k | ((gs_ref_memory_t *)mem->stable_memory)->gc_status.requested > 0 |
66 | 44.4k | ) |
67 | 14.9k | break; |
68 | 44.4k | } |
69 | 14.9k | if (!mem) { |
70 | 0 | mem = dmem->space_global; /* just in case */ |
71 | 0 | } |
72 | 324k | } else { |
73 | 324k | mem = dmem->spaces_indexed[space >> r_space_shift]; |
74 | 324k | } |
75 | 339k | if_debug3m('0', (gs_memory_t *)mem, "[0]GC called, space=%d, requestor=%d, requested=%ld\n", |
76 | 339k | space, mem->space, (long)mem->gc_status.requested); |
77 | 339k | global = mem->space != avm_local; |
78 | | /* Since dmem may move, reset the request now. */ |
79 | 339k | ialloc_reset_requested(dmem); |
80 | 339k | code = gs_vmreclaim(dmem, global); |
81 | 339k | if (code < 0) |
82 | 1 | return code; |
83 | 339k | ialloc_set_limit(mem); |
84 | 339k | if (space < 0) { |
85 | 14.9k | gs_memory_status_t stats; |
86 | 14.9k | size_t allocated; |
87 | | |
88 | | /* If the ammount still allocated after the GC is complete */ |
89 | | /* exceeds the max_vm setting, then return a VMerror */ |
90 | 14.9k | gs_memory_status((gs_memory_t *) mem, &stats); |
91 | 14.9k | allocated = stats.allocated; |
92 | 14.9k | if (mem->stable_memory != (gs_memory_t *)mem) { |
93 | 14.9k | gs_memory_status(mem->stable_memory, &stats); |
94 | 14.9k | allocated += stats.allocated; |
95 | 14.9k | } |
96 | 14.9k | if (allocated >= mem->gc_status.max_vm) { |
97 | | /* We can't satisfy this request within max_vm. */ |
98 | 0 | return_error(gs_error_VMerror); |
99 | 0 | } |
100 | 14.9k | } |
101 | 339k | return 0; |
102 | 339k | } |
103 | | |
104 | | /* Interpreter entry to garbage collector. */ |
105 | | static int |
106 | | gs_vmreclaim(gs_dual_memory_t *dmem, bool global) |
107 | 339k | { |
108 | | /* HACK: we know the gs_dual_memory_t is embedded in a context state. */ |
109 | 339k | i_ctx_t *i_ctx_p = |
110 | 339k | (i_ctx_t *)((char *)dmem - offset_of(i_ctx_t, memory)); |
111 | 339k | gs_ref_memory_t *lmem = dmem->space_local; |
112 | 339k | int code = context_state_store(i_ctx_p); |
113 | 339k | gs_ref_memory_t *memories[5]; |
114 | 339k | gs_ref_memory_t *mem; |
115 | 339k | int nmem, i; |
116 | | |
117 | 339k | if (code < 0) |
118 | 0 | return code; |
119 | | |
120 | 339k | memories[0] = dmem->space_system; |
121 | 339k | memories[1] = mem = dmem->space_global; |
122 | 339k | nmem = 2; |
123 | 339k | if (lmem != dmem->space_global) |
124 | 339k | memories[nmem++] = lmem; |
125 | 1.35M | for (i = nmem; --i >= 0;) { |
126 | 1.01M | mem = memories[i]; |
127 | 1.01M | if (mem->stable_memory != (gs_memory_t *)mem) |
128 | 679k | memories[nmem++] = (gs_ref_memory_t *)mem->stable_memory; |
129 | 1.01M | } |
130 | | |
131 | | /****** ABORT IF code < 0 ******/ |
132 | 2.03M | for (i = nmem; --i >= 0; ) |
133 | 1.69M | alloc_close_clump(memories[i]); |
134 | | |
135 | | /* Prune the file list so it won't retain potentially collectible */ |
136 | | /* files. */ |
137 | | |
138 | 339k | for (i = (global ? i_vm_system : i_vm_local); |
139 | 1.33M | i < countof(dmem->spaces_indexed); |
140 | 990k | ++i |
141 | 990k | ) { |
142 | 990k | gs_ref_memory_t *mem = dmem->spaces_indexed[i]; |
143 | | |
144 | | /* Always safe to substract 1 from i here, as i is always at |
145 | | * least i_vm_system (1) or i_vm_local (2). */ |
146 | 990k | if (mem == 0 || (mem == dmem->spaces_indexed[i - 1])) |
147 | 0 | continue; |
148 | 990k | if (mem->stable_memory != (gs_memory_t *)mem) |
149 | 665k | ialloc_gc_prepare((gs_ref_memory_t *)mem->stable_memory); |
150 | 5.51M | for (;; mem = &mem->saved->state) { |
151 | 5.51M | ialloc_gc_prepare(mem); |
152 | 5.51M | if (mem->saved == 0) |
153 | 990k | break; |
154 | 5.51M | } |
155 | 990k | } |
156 | | |
157 | | /* Do the actual collection. */ |
158 | | |
159 | 339k | { |
160 | 339k | void *ctxp = i_ctx_p; |
161 | 339k | gs_gc_root_t context_root, *r = &context_root; |
162 | | |
163 | 339k | gs_register_struct_root((gs_memory_t *)lmem, &r, |
164 | 339k | &ctxp, "i_ctx_p root"); |
165 | 339k | GS_RECLAIM(&dmem->spaces, global); |
166 | 339k | gs_unregister_root((gs_memory_t *)lmem, r, "i_ctx_p root"); |
167 | 339k | i_ctx_p = ctxp; |
168 | 339k | dmem = &i_ctx_p->memory; |
169 | 339k | } |
170 | | |
171 | | /* Update caches not handled by context_state_load. */ |
172 | | |
173 | 339k | *systemdict = *ref_stack_index(&d_stack, ref_stack_count(&d_stack) - 1); |
174 | | |
175 | | /* Update the cached value pointers in names. */ |
176 | | |
177 | 339k | dicts_gc_cleanup(); |
178 | | |
179 | | /* Reopen the active clumps. */ |
180 | | |
181 | 2.03M | for (i = 0; i < nmem; ++i) |
182 | 1.69M | alloc_open_clump(memories[i]); |
183 | | |
184 | | /* Reload the context state. Note this should be done |
185 | | AFTER the clumps are reopened, since the context state |
186 | | load could do allocations that must remain. |
187 | | If it were done while the clumps were still closed, |
188 | | we would lose those allocations when the clumps were opened */ |
189 | | |
190 | 339k | code = context_state_load(i_ctx_p); |
191 | 339k | return code; |
192 | 339k | } |
193 | | |
194 | | /* ------ Initialization procedure ------ */ |
195 | | |
196 | | const op_def ireclaim_l2_op_defs[] = |
197 | | { |
198 | | op_def_end(ireclaim_init) |
199 | | }; |