/src/ghostpdl/psi/ialloc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2021 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 1305 Grant Avenue - Suite 200, Novato, |
13 | | CA 94945, U.S.A., +1(415)492-9861, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Memory allocator for Ghostscript interpreter */ |
18 | | #include "gx.h" |
19 | | #include "memory_.h" |
20 | | #include "gsexit.h" |
21 | | #include "ierrors.h" |
22 | | #include "gsstruct.h" |
23 | | #include "iref.h" /* must precede iastate.h */ |
24 | | #include "iastate.h" |
25 | | #include "igc.h" /* for gs_gc_reclaim */ |
26 | | #include "ipacked.h" |
27 | | #include "iutil.h" |
28 | | #include "ivmspace.h" |
29 | | #include "store.h" |
30 | | |
31 | | /* |
32 | | * Define global and local instances. |
33 | | */ |
34 | | public_st_gs_dual_memory(); |
35 | | |
36 | | /* Initialize the allocator */ |
37 | | int |
38 | | ialloc_init(gs_dual_memory_t *dmem, gs_memory_t * rmem, uint clump_size, |
39 | | bool level2) |
40 | 683 | { |
41 | 683 | gs_ref_memory_t *ilmem = ialloc_alloc_state(rmem, clump_size); |
42 | 683 | gs_ref_memory_t *ilmem_stable = ialloc_alloc_state(rmem, clump_size); |
43 | 683 | gs_ref_memory_t *igmem = 0; |
44 | 683 | gs_ref_memory_t *igmem_stable = 0; |
45 | 683 | gs_ref_memory_t *ismem = ialloc_alloc_state(rmem, clump_size); |
46 | 683 | int i; |
47 | | |
48 | 683 | if (ilmem == 0 || ilmem_stable == 0 || ismem == 0) |
49 | 0 | goto fail; |
50 | 683 | ilmem->stable_memory = (gs_memory_t *)ilmem_stable; |
51 | 683 | if (level2) { |
52 | 683 | igmem = ialloc_alloc_state(rmem, clump_size); |
53 | 683 | igmem_stable = ialloc_alloc_state(rmem, clump_size); |
54 | 683 | if (igmem == 0 || igmem_stable == 0) |
55 | 0 | goto fail; |
56 | 683 | igmem->stable_memory = (gs_memory_t *)igmem_stable; |
57 | 683 | } else |
58 | 0 | igmem = ilmem, igmem_stable = ilmem_stable; |
59 | 3.41k | for (i = 0; i < countof(dmem->spaces_indexed); i++) |
60 | 2.73k | dmem->spaces_indexed[i] = 0; |
61 | 683 | dmem->space_local = ilmem; |
62 | 683 | dmem->space_global = igmem; |
63 | 683 | dmem->space_system = ismem; |
64 | 683 | dmem->spaces.vm_reclaim = gs_gc_reclaim; /* real GC */ |
65 | 683 | dmem->reclaim = 0; /* no interpreter GC yet */ |
66 | | /* Level 1 systems have only local VM. */ |
67 | 683 | igmem->space = avm_global; |
68 | 683 | igmem_stable->space = avm_global; |
69 | 683 | ilmem->space = avm_local; /* overrides if ilmem == igmem */ |
70 | 683 | ilmem_stable->space = avm_local; /* ditto */ |
71 | 683 | ismem->space = avm_system; |
72 | | # if IGC_PTR_STABILITY_CHECK |
73 | | igmem->space_id = (i_vm_global << 1) + 1; |
74 | | igmem_stable->space_id = i_vm_global << 1; |
75 | | ilmem->space_id = (i_vm_local << 1) + 1; /* overrides if ilmem == igmem */ |
76 | | ilmem_stable->space_id = i_vm_local << 1; /* ditto */ |
77 | | ismem->space_id = (i_vm_system << 1); |
78 | | # endif |
79 | 683 | ialloc_set_space(dmem, avm_global); |
80 | 683 | return 0; |
81 | 0 | fail: |
82 | 0 | ialloc_free_state(igmem_stable); |
83 | 0 | ialloc_free_state(igmem); |
84 | 0 | ialloc_free_state(ismem); |
85 | 0 | ialloc_free_state(ilmem_stable); |
86 | 0 | ialloc_free_state(ilmem); |
87 | 0 | return_error(gs_error_VMerror); |
88 | 683 | } |
89 | | |
90 | | /* Free the allocator */ |
91 | | void |
92 | | ialloc_finit(gs_dual_memory_t *mem) |
93 | 683 | { |
94 | 683 | if (mem != NULL) { |
95 | 683 | gs_ref_memory_t *ilmem = mem->space_local; |
96 | 683 | gs_ref_memory_t *igmem = mem->space_global; |
97 | 683 | gs_ref_memory_t *ismem = mem->space_system; |
98 | | |
99 | 683 | if (ilmem != NULL) { |
100 | 683 | gs_ref_memory_t *ilmem_stable = (gs_ref_memory_t *)(ilmem->stable_memory); |
101 | 683 | gs_memory_free_all((gs_memory_t *)ilmem_stable, FREE_ALL_EVERYTHING, "ialloc_finit"); |
102 | 683 | gs_memory_free_all((gs_memory_t *)ilmem, FREE_ALL_EVERYTHING, "ialloc_finit"); |
103 | 683 | } |
104 | | |
105 | 683 | if (igmem != NULL) { |
106 | 683 | gs_ref_memory_t *igmem_stable = (gs_ref_memory_t *)(igmem->stable_memory); |
107 | 683 | gs_memory_free_all((gs_memory_t *)igmem_stable, FREE_ALL_EVERYTHING, "ialloc_finit"); |
108 | 683 | gs_memory_free_all((gs_memory_t *)igmem, FREE_ALL_EVERYTHING, "ialloc_finit"); |
109 | 683 | } |
110 | | |
111 | 683 | if (ismem != NULL) |
112 | 683 | gs_memory_free_all((gs_memory_t *)ismem, FREE_ALL_EVERYTHING, "ialloc_finit"); |
113 | 683 | } |
114 | 683 | } |
115 | | |
116 | | /* ================ Local/global VM ================ */ |
117 | | |
118 | | /* Get the space attribute of an allocator */ |
119 | | uint |
120 | | imemory_space(const gs_ref_memory_t * iimem) |
121 | 7.76M | { |
122 | 7.76M | return iimem->space; |
123 | 7.76M | } |
124 | | |
125 | | /* Select the allocation space. */ |
126 | | void |
127 | | ialloc_set_space(gs_dual_memory_t * dmem, uint space) |
128 | 2.14M | { |
129 | 2.14M | gs_ref_memory_t *mem = dmem->spaces_indexed[space >> r_space_shift]; |
130 | | |
131 | 2.14M | dmem->current = mem; |
132 | 2.14M | dmem->current_space = mem->space; |
133 | 2.14M | } |
134 | | |
135 | | /* Get the l_new attribute of a current allocator. */ |
136 | | /* (A copy of the new_mask in the gs_dual_memory_t.) */ |
137 | | uint |
138 | | imemory_new_mask(const gs_ref_memory_t *imem) |
139 | 5.07M | { |
140 | 5.07M | return imem->new_mask; |
141 | 5.07M | } |
142 | | |
143 | | /* Get the save level of an allocator. */ |
144 | | int |
145 | | imemory_save_level(const gs_ref_memory_t *imem) |
146 | 59.9k | { |
147 | 59.9k | return imem->save_level; |
148 | 59.9k | } |
149 | | |
150 | | /* Reset the requests. */ |
151 | | void |
152 | | ialloc_reset_requested(gs_dual_memory_t * dmem) |
153 | 11.6k | { |
154 | 11.6k | dmem->space_system->gc_status.requested = 0; |
155 | 11.6k | dmem->space_global->gc_status.requested = 0; |
156 | 11.6k | dmem->space_local->gc_status.requested = 0; |
157 | 11.6k | } |
158 | | |
159 | | /* ================ Refs ================ */ |
160 | | |
161 | | #ifdef DEBUG |
162 | | static int |
163 | | ialloc_trace_space(const gs_ref_memory_t *imem) |
164 | | { |
165 | | return imem->space + (imem->stable_memory == (const gs_memory_t *)imem); |
166 | | } |
167 | | #endif |
168 | | |
169 | | /* Register a ref root. */ |
170 | | int |
171 | | gs_register_ref_root(gs_memory_t *mem, gs_gc_root_t **root, |
172 | | void **pp, client_name_t cname) |
173 | 10.2k | { |
174 | 10.2k | return gs_register_root(mem, root, ptr_ref_type, pp, cname); |
175 | 10.2k | } |
176 | | |
177 | | /* |
178 | | * As noted in iastate.h, every run of refs has an extra ref at the end |
179 | | * to hold relocation information for the garbage collector; |
180 | | * since sizeof(ref) % obj_align_mod == 0, we never need to |
181 | | * allocate any additional padding space at the end of the block. |
182 | | */ |
183 | | |
184 | | /* Allocate an array of refs. */ |
185 | | int |
186 | | gs_alloc_ref_array(gs_ref_memory_t * mem, ref * parr, uint attrs, |
187 | | uint num_refs, client_name_t cname) |
188 | 11.7M | { |
189 | 11.7M | ref *obj; |
190 | 11.7M | int i; |
191 | | |
192 | | /* If we're allocating a run of refs already, */ |
193 | | /* and we aren't about to overflow the maximum run length, use it. */ |
194 | 11.7M | if (mem->cc && mem->cc->has_refs == true && mem->cc->rtop == mem->cc->cbot && |
195 | 11.7M | num_refs < (mem->cc->ctop - mem->cc->cbot) / sizeof(ref) && |
196 | 11.7M | mem->cc->rtop - (byte *) mem->cc->rcur + num_refs * sizeof(ref) < |
197 | 11.5M | max_size_st_refs |
198 | 11.7M | ) { |
199 | 10.0M | ref *end; |
200 | | |
201 | 10.0M | obj = (ref *) mem->cc->rtop - 1; /* back up over last ref */ |
202 | 10.0M | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:+$ ]%s(%u) = "PRI_INTPTR"\n", |
203 | 10.0M | ialloc_trace_space(mem), client_name_string(cname), |
204 | 10.0M | num_refs, (intptr_t)obj); |
205 | 10.0M | mem->cc->rcur[-1].o_size += num_refs * sizeof(ref); |
206 | 10.0M | end = (ref *) (mem->cc->rtop = mem->cc->cbot += |
207 | 10.0M | num_refs * sizeof(ref)); |
208 | 10.0M | make_mark(end - 1); |
209 | 10.0M | } else { |
210 | | /* |
211 | | * Allocate a new run. We have to distinguish 3 cases: |
212 | | * - Same clump: cc unchanged, end == cc->cbot. |
213 | | * - Large clump: cc unchanged, end != cc->cbot. |
214 | | * - New clump: cc changed. |
215 | | */ |
216 | 1.72M | clump_t *cc = mem->cc; |
217 | 1.72M | ref *end; |
218 | 1.72M | alloc_change_t *cp = 0; |
219 | 1.72M | int code = 0; |
220 | | |
221 | 1.72M | if ((gs_memory_t *)mem != mem->stable_memory) { |
222 | 1.69M | code = alloc_save_change_alloc(mem, "gs_alloc_ref_array", &cp); |
223 | 1.69M | if (code < 0) |
224 | 0 | return code; |
225 | 1.69M | } |
226 | 1.72M | obj = gs_alloc_struct_array((gs_memory_t *) mem, num_refs + 1, |
227 | 1.72M | ref, &st_refs, cname); |
228 | 1.72M | if (obj == 0) |
229 | 0 | return_error(gs_error_VMerror); |
230 | | /* Set the terminating ref now. */ |
231 | 1.72M | end = (ref *) obj + num_refs; |
232 | 1.72M | make_mark(end); |
233 | | /* Set has_refs in the clump. */ |
234 | 1.72M | if (mem->cc && (mem->cc != cc || mem->cc->cbot == (byte *) (end + 1))) { |
235 | | /* Ordinary clump. */ |
236 | 1.68M | mem->cc->rcur = (obj_header_t *) obj; |
237 | 1.68M | mem->cc->rtop = (byte *) (end + 1); |
238 | 1.68M | mem->cc->has_refs = true; |
239 | 1.68M | } else { |
240 | | /* Large clump. */ |
241 | | /* This happens only for very large arrays, */ |
242 | | /* so it doesn't need to be cheap. */ |
243 | 37.9k | clump_locator_t cl; |
244 | | |
245 | 37.9k | cl.memory = mem; |
246 | 37.9k | cl.cp = mem->root; |
247 | | /* clump_locate_ptr() should *never* fail here */ |
248 | 37.9k | if (clump_locate_ptr(obj, &cl)) { |
249 | 37.9k | cl.cp->has_refs = true; |
250 | 37.9k | } |
251 | 0 | else { |
252 | 0 | gs_abort((gs_memory_t *) mem); |
253 | 0 | } |
254 | 37.9k | } |
255 | 1.72M | if (cp) { |
256 | 104k | mem->changes = cp; |
257 | 104k | cp->where = (ref_packed *)obj; |
258 | 104k | } |
259 | 1.72M | } |
260 | 133M | for (i = 0; i < num_refs; i++) { |
261 | 122M | make_null(&(obj[i])); |
262 | 122M | } |
263 | 11.7M | make_array(parr, attrs | mem->space, num_refs, obj); |
264 | 11.7M | return 0; |
265 | 11.7M | } |
266 | | |
267 | | /* Resize an array of refs. Currently this is only implemented */ |
268 | | /* for shrinking, not for growing. */ |
269 | | int |
270 | | gs_resize_ref_array(gs_ref_memory_t * mem, ref * parr, |
271 | | uint new_num_refs, client_name_t cname) |
272 | 0 | { |
273 | 0 | uint old_num_refs = r_size(parr); |
274 | 0 | uint diff; |
275 | 0 | ref *obj = parr->value.refs; |
276 | |
|
277 | 0 | if (new_num_refs > old_num_refs || !r_has_type(parr, t_array)) |
278 | 0 | return_error(gs_error_Fatal); |
279 | 0 | diff = old_num_refs - new_num_refs; |
280 | | /* Check for LIFO. See gs_free_ref_array for more details. */ |
281 | 0 | if (mem->cc && mem->cc->rtop == mem->cc->cbot && |
282 | 0 | (byte *) (obj + (old_num_refs + 1)) == mem->cc->rtop |
283 | 0 | ) { |
284 | | /* Shorten the refs object. */ |
285 | 0 | ref *end = (ref *) (mem->cc->cbot = mem->cc->rtop -= |
286 | 0 | diff * sizeof(ref)); |
287 | |
|
288 | 0 | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:<$ ]%s(%u) "PRI_INTPTR"\n", |
289 | 0 | ialloc_trace_space(mem), client_name_string(cname), diff, |
290 | 0 | (intptr_t)obj); |
291 | 0 | mem->cc->rcur[-1].o_size -= diff * sizeof(ref); |
292 | 0 | make_mark(end - 1); |
293 | 0 | } else { |
294 | | /* Punt. */ |
295 | 0 | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:<$#]%s(%u) "PRI_INTPTR"\n", |
296 | 0 | ialloc_trace_space(mem), client_name_string(cname), diff, |
297 | 0 | (intptr_t)obj); |
298 | 0 | mem->lost.refs += diff * sizeof(ref); |
299 | 0 | } |
300 | 0 | r_set_size(parr, new_num_refs); |
301 | 0 | return 0; |
302 | 0 | } |
303 | | |
304 | | /* Deallocate an array of refs. Only do this if LIFO, or if */ |
305 | | /* the array occupies an entire clump by itself. */ |
306 | | void |
307 | | gs_free_ref_array(gs_ref_memory_t * mem, ref * parr, client_name_t cname) |
308 | 484k | { |
309 | 484k | uint num_refs = r_size(parr); |
310 | 484k | ref *obj = parr->value.refs; |
311 | | |
312 | | /* |
313 | | * Compute the storage size of the array, and check for LIFO |
314 | | * freeing or a separate clump. Note that the array might be packed; |
315 | | * for the moment, if it's anything but a t_array, punt. |
316 | | * The +1s are for the extra ref for the GC. |
317 | | */ |
318 | 484k | if (!r_has_type(parr, t_array)) |
319 | 484k | DO_NOTHING; /* don't look for special cases */ |
320 | 142k | else if (mem->cc && mem->cc->rtop == mem->cc->cbot && |
321 | 142k | (byte *) (obj + (num_refs + 1)) == mem->cc->rtop |
322 | 142k | ) { |
323 | 197 | if ((obj_header_t *) obj == mem->cc->rcur) { |
324 | | /* Deallocate the entire refs object. */ |
325 | 197 | if ((gs_memory_t *)mem != mem->stable_memory) |
326 | 197 | alloc_save_remove(mem, (ref_packed *)obj, "gs_free_ref_array"); |
327 | 197 | gs_free_object((gs_memory_t *) mem, obj, cname); |
328 | 197 | mem->cc->rcur = 0; |
329 | 197 | mem->cc->rtop = 0; |
330 | 197 | } else { |
331 | | /* Deallocate it at the end of the refs object. */ |
332 | 0 | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:-$ ]%s(%u) "PRI_INTPTR"\n", |
333 | 0 | ialloc_trace_space(mem), client_name_string(cname), |
334 | 0 | num_refs, (intptr_t)obj); |
335 | 0 | mem->cc->rcur[-1].o_size -= num_refs * sizeof(ref); |
336 | 0 | mem->cc->rtop = mem->cc->cbot = (byte *) (obj + 1); |
337 | 0 | make_mark(obj); |
338 | 0 | } |
339 | 197 | return; |
340 | 141k | } else if (num_refs >= (mem->large_size / ARCH_SIZEOF_REF - 1)) { |
341 | | /* See if this array has a clump all to itself. */ |
342 | | /* We only make this check when freeing very large objects, */ |
343 | | /* so it doesn't need to be cheap. */ |
344 | 20.0k | clump_locator_t cl; |
345 | | |
346 | 20.0k | cl.memory = mem; |
347 | 20.0k | cl.cp = mem->root; |
348 | 20.0k | if (clump_locate_ptr(obj, &cl) && |
349 | 20.0k | obj == (ref *) ((obj_header_t *) (cl.cp->cbase) + 1) && |
350 | 20.0k | (byte *) (obj + (num_refs + 1)) == cl.cp->cend |
351 | 20.0k | ) { |
352 | | /* Free the clump. */ |
353 | 20.0k | if_debug4m('a', (const gs_memory_t *)mem, "[a%d:-$L]%s(%u) "PRI_INTPTR"\n", |
354 | 20.0k | ialloc_trace_space(mem), client_name_string(cname), |
355 | 20.0k | num_refs, (intptr_t)obj); |
356 | 20.0k | if ((gs_memory_t *)mem != mem->stable_memory) { |
357 | 0 | alloc_save_remove(mem, (ref_packed *)obj, "gs_free_ref_array"); |
358 | 0 | } |
359 | 20.0k | alloc_free_clump(cl.cp, mem); |
360 | 20.0k | return; |
361 | 20.0k | } |
362 | 20.0k | } |
363 | | /* Punt, but fill the array with nulls so that there won't be */ |
364 | | /* dangling references to confuse the garbage collector. */ |
365 | 484k | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:-$#]%s(%u) "PRI_INTPTR"\n", |
366 | 464k | ialloc_trace_space(mem), client_name_string(cname), num_refs, |
367 | 464k | (intptr_t)obj); |
368 | 464k | { |
369 | 464k | uint size; |
370 | | |
371 | 464k | switch (r_type(parr)) { |
372 | 342k | case t_shortarray: |
373 | 342k | size = num_refs * sizeof(ref_packed); |
374 | 342k | break; |
375 | 0 | case t_mixedarray:{ |
376 | | /* We have to parse the array to compute the storage size. */ |
377 | 0 | uint i = 0; |
378 | 0 | const ref_packed *p = parr->value.packed; |
379 | |
|
380 | 0 | for (; i < num_refs; ++i) |
381 | 0 | p = packed_next(p); |
382 | 0 | size = (const byte *)p - (const byte *)parr->value.packed; |
383 | 0 | break; |
384 | 0 | } |
385 | 121k | case t_array: |
386 | 121k | size = num_refs * sizeof(ref); |
387 | 121k | break; |
388 | 0 | default: |
389 | 0 | lprintf3("Unknown type 0x%x in free_ref_array(%u,"PRI_INTPTR")!", |
390 | 0 | r_type(parr), num_refs, (intptr_t)obj); |
391 | 0 | return; |
392 | 464k | } |
393 | | /* |
394 | | * If there are any leftover packed elements, we don't |
395 | | * worry about them, since they can't be dangling references. |
396 | | */ |
397 | 464k | refset_null_new(obj, size / sizeof(ref), 0); |
398 | 464k | mem->lost.refs += size; |
399 | 464k | } |
400 | 464k | } |
401 | | |
402 | | /* Allocate a string ref. */ |
403 | | int |
404 | | gs_alloc_string_ref(gs_ref_memory_t * mem, ref * psref, |
405 | | uint attrs, uint nbytes, client_name_t cname) |
406 | 9 | { |
407 | 9 | byte *str = gs_alloc_string((gs_memory_t *) mem, nbytes, cname); |
408 | | |
409 | 9 | if (str == 0) |
410 | 0 | return_error(gs_error_VMerror); |
411 | 9 | make_string(psref, attrs | mem->space, nbytes, str); |
412 | 9 | return 0; |
413 | 9 | } |