/src/ghostpdl/psi/ialloc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2025 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Memory allocator for Ghostscript interpreter */ |
18 | | #include "gx.h" |
19 | | #include "memory_.h" |
20 | | #include "gsexit.h" |
21 | | #include "ierrors.h" |
22 | | #include "gsstruct.h" |
23 | | #include "iref.h" /* must precede iastate.h */ |
24 | | #include "iastate.h" |
25 | | #include "igc.h" /* for gs_gc_reclaim */ |
26 | | #include "ipacked.h" |
27 | | #include "iutil.h" |
28 | | #include "ivmspace.h" |
29 | | #include "store.h" |
30 | | |
31 | | /* |
32 | | * Define global and local instances. |
33 | | */ |
34 | | public_st_gs_dual_memory(); |
35 | | |
36 | | /* Initialize the allocator */ |
37 | | int |
38 | | ialloc_init(gs_dual_memory_t *dmem, gs_memory_t * rmem, uint clump_size, |
39 | | bool level2) |
40 | 9.78k | { |
41 | 9.78k | gs_ref_memory_t *ilmem = ialloc_alloc_state(rmem, clump_size); |
42 | 9.78k | gs_ref_memory_t *ilmem_stable = ialloc_alloc_state(rmem, clump_size); |
43 | 9.78k | gs_ref_memory_t *igmem = 0; |
44 | 9.78k | gs_ref_memory_t *igmem_stable = 0; |
45 | 9.78k | gs_ref_memory_t *ismem = ialloc_alloc_state(rmem, clump_size); |
46 | 9.78k | int i; |
47 | | |
48 | 9.78k | if (ilmem == 0 || ilmem_stable == 0 || ismem == 0) |
49 | 0 | goto fail; |
50 | 9.78k | ilmem->stable_memory = (gs_memory_t *)ilmem_stable; |
51 | 9.78k | if (level2) { |
52 | 9.78k | igmem = ialloc_alloc_state(rmem, clump_size); |
53 | 9.78k | igmem_stable = ialloc_alloc_state(rmem, clump_size); |
54 | 9.78k | if (igmem == 0 || igmem_stable == 0) |
55 | 0 | goto fail; |
56 | 9.78k | igmem->stable_memory = (gs_memory_t *)igmem_stable; |
57 | 9.78k | } else |
58 | 0 | igmem = ilmem, igmem_stable = ilmem_stable; |
59 | 48.9k | for (i = 0; i < countof(dmem->spaces_indexed); i++) |
60 | 39.1k | dmem->spaces_indexed[i] = 0; |
61 | 9.78k | dmem->space_local = ilmem; |
62 | 9.78k | dmem->space_global = igmem; |
63 | 9.78k | dmem->space_system = ismem; |
64 | 9.78k | dmem->spaces.vm_reclaim = gs_gc_reclaim; /* real GC */ |
65 | 9.78k | dmem->reclaim = 0; /* no interpreter GC yet */ |
66 | | /* Level 1 systems have only local VM. */ |
67 | 9.78k | igmem->space = avm_global; |
68 | 9.78k | igmem_stable->space = avm_global; |
69 | 9.78k | ilmem->space = avm_local; /* overrides if ilmem == igmem */ |
70 | 9.78k | ilmem_stable->space = avm_local; /* ditto */ |
71 | 9.78k | ismem->space = avm_system; |
72 | | # if IGC_PTR_STABILITY_CHECK |
73 | | igmem->space_id = (i_vm_global << 1) + 1; |
74 | | igmem_stable->space_id = i_vm_global << 1; |
75 | | ilmem->space_id = (i_vm_local << 1) + 1; /* overrides if ilmem == igmem */ |
76 | | ilmem_stable->space_id = i_vm_local << 1; /* ditto */ |
77 | | ismem->space_id = (i_vm_system << 1); |
78 | | # endif |
79 | 9.78k | ialloc_set_space(dmem, avm_global); |
80 | 9.78k | return 0; |
81 | 0 | fail: |
82 | 0 | ialloc_free_state(igmem_stable); |
83 | 0 | ialloc_free_state(igmem); |
84 | 0 | ialloc_free_state(ismem); |
85 | 0 | ialloc_free_state(ilmem_stable); |
86 | 0 | ialloc_free_state(ilmem); |
87 | 0 | return_error(gs_error_VMerror); |
88 | 9.78k | } |
89 | | |
90 | | /* Free the allocator */ |
91 | | void |
92 | | ialloc_finit(gs_dual_memory_t *mem) |
93 | 9.78k | { |
94 | 9.78k | if (mem != NULL) { |
95 | 9.78k | gs_ref_memory_t *ilmem = mem->space_local; |
96 | 9.78k | gs_ref_memory_t *igmem = mem->space_global; |
97 | 9.78k | gs_ref_memory_t *ismem = mem->space_system; |
98 | | |
99 | 9.78k | if (ilmem != NULL) { |
100 | 9.78k | gs_ref_memory_t *ilmem_stable = (gs_ref_memory_t *)(ilmem->stable_memory); |
101 | 9.78k | gs_memory_free_all((gs_memory_t *)ilmem_stable, FREE_ALL_EVERYTHING, "ialloc_finit"); |
102 | 9.78k | gs_memory_free_all((gs_memory_t *)ilmem, FREE_ALL_EVERYTHING, "ialloc_finit"); |
103 | 9.78k | } |
104 | | |
105 | 9.78k | if (igmem != NULL) { |
106 | 9.78k | gs_ref_memory_t *igmem_stable = (gs_ref_memory_t *)(igmem->stable_memory); |
107 | 9.78k | gs_memory_free_all((gs_memory_t *)igmem_stable, FREE_ALL_EVERYTHING, "ialloc_finit"); |
108 | 9.78k | gs_memory_free_all((gs_memory_t *)igmem, FREE_ALL_EVERYTHING, "ialloc_finit"); |
109 | 9.78k | } |
110 | | |
111 | 9.78k | if (ismem != NULL) |
112 | 9.78k | gs_memory_free_all((gs_memory_t *)ismem, FREE_ALL_EVERYTHING, "ialloc_finit"); |
113 | 9.78k | } |
114 | 9.78k | } |
115 | | |
116 | | /* ================ Local/global VM ================ */ |
117 | | |
118 | | /* Get the space attribute of an allocator */ |
119 | | uint |
120 | | imemory_space(const gs_ref_memory_t * iimem) |
121 | 60.1M | { |
122 | 60.1M | return iimem->space; |
123 | 60.1M | } |
124 | | |
125 | | /* Select the allocation space. */ |
126 | | void |
127 | | ialloc_set_space(gs_dual_memory_t * dmem, uint space) |
128 | 30.9M | { |
129 | 30.9M | gs_ref_memory_t *mem = dmem->spaces_indexed[space >> r_space_shift]; |
130 | | |
131 | 30.9M | dmem->current = mem; |
132 | 30.9M | dmem->current_space = mem->space; |
133 | 30.9M | } |
134 | | |
135 | | /* Get the l_new attribute of a current allocator. */ |
136 | | /* (A copy of the new_mask in the gs_dual_memory_t.) */ |
137 | | uint |
138 | | imemory_new_mask(const gs_ref_memory_t *imem) |
139 | 77.9M | { |
140 | 77.9M | return imem->new_mask; |
141 | 77.9M | } |
142 | | |
143 | | /* Get the save level of an allocator. */ |
144 | | int |
145 | | imemory_save_level(const gs_ref_memory_t *imem) |
146 | 961k | { |
147 | 961k | return imem->save_level; |
148 | 961k | } |
149 | | |
150 | | /* Reset the requests. */ |
151 | | void |
152 | | ialloc_reset_requested(gs_dual_memory_t * dmem) |
153 | 195k | { |
154 | 195k | dmem->space_system->gc_status.requested = 0; |
155 | 195k | dmem->space_global->gc_status.requested = 0; |
156 | 195k | dmem->space_local->gc_status.requested = 0; |
157 | 195k | } |
158 | | |
159 | | /* ================ Refs ================ */ |
160 | | |
161 | | #ifdef DEBUG |
162 | | static int |
163 | | ialloc_trace_space(const gs_ref_memory_t *imem) |
164 | | { |
165 | | return imem->space + (imem->stable_memory == (const gs_memory_t *)imem); |
166 | | } |
167 | | #endif |
168 | | |
169 | | /* Register a ref root. */ |
170 | | int |
171 | | gs_register_ref_root(gs_memory_t *mem, gs_gc_root_t **root, |
172 | | void **pp, client_name_t cname) |
173 | 176k | { |
174 | 176k | return gs_register_root(mem, root, ptr_ref_type, pp, cname); |
175 | 176k | } |
176 | | |
177 | | /* |
178 | | * As noted in iastate.h, every run of refs has an extra ref at the end |
179 | | * to hold relocation information for the garbage collector; |
180 | | * since sizeof(ref) % obj_align_mod == 0, we never need to |
181 | | * allocate any additional padding space at the end of the block. |
182 | | */ |
183 | | |
184 | | /* Allocate an array of refs. */ |
185 | | int |
186 | | gs_alloc_ref_array(gs_ref_memory_t * mem, ref * parr, uint attrs, |
187 | | uint num_refs, client_name_t cname) |
188 | 123M | { |
189 | 123M | ref *obj; |
190 | 123M | int i; |
191 | | |
192 | | /* If we're allocating a run of refs already, */ |
193 | | /* and we aren't about to overflow the maximum run length, use it. */ |
194 | 123M | if (mem->cc && mem->cc->has_refs == true && mem->cc->rtop == mem->cc->cbot && |
195 | 123M | num_refs < (mem->cc->ctop - mem->cc->cbot) / sizeof(ref) && |
196 | 123M | mem->cc->rtop - (byte *) mem->cc->rcur + num_refs * sizeof(ref) < |
197 | 120M | max_size_st_refs |
198 | 123M | ) { |
199 | 107M | ref *end; |
200 | | |
201 | 107M | obj = (ref *) mem->cc->rtop - 1; /* back up over last ref */ |
202 | 107M | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:+$ ]%s(%u) = "PRI_INTPTR"\n", |
203 | 107M | ialloc_trace_space(mem), client_name_string(cname), |
204 | 107M | num_refs, (intptr_t)obj); |
205 | 107M | mem->cc->rcur[-1].o_size += num_refs * sizeof(ref); |
206 | 107M | end = (ref *) (mem->cc->rtop = mem->cc->cbot += |
207 | 107M | num_refs * sizeof(ref)); |
208 | 107M | make_mark(end - 1); |
209 | 107M | } else { |
210 | | /* |
211 | | * Allocate a new run. We have to distinguish 3 cases: |
212 | | * - Same clump: cc unchanged, end == cc->cbot. |
213 | | * - Large clump: cc unchanged, end != cc->cbot. |
214 | | * - New clump: cc changed. |
215 | | */ |
216 | 15.8M | clump_t *cc; |
217 | 15.8M | ref *end; |
218 | 15.8M | alloc_change_t *cp = 0; |
219 | 15.8M | int code = 0; |
220 | | |
221 | 15.8M | if ((gs_memory_t *)mem != mem->stable_memory) { |
222 | 15.4M | code = alloc_save_change_alloc(mem, "gs_alloc_ref_array", &cp); |
223 | 15.4M | if (code < 0) |
224 | 0 | return code; |
225 | 15.4M | } |
226 | | /* The save state allocation above may have moved mem->cc */ |
227 | 15.8M | cc = mem->cc; |
228 | 15.8M | obj = gs_alloc_struct_array((gs_memory_t *) mem, num_refs + 1, |
229 | 15.8M | ref, &st_refs, cname); |
230 | 15.8M | if (obj == 0) { |
231 | | /* We don't have to alloc_save_remove() because the change |
232 | | object hasn't been attached to the allocator yet. |
233 | | */ |
234 | 13 | gs_free_object((gs_memory_t *) mem, cp, "gs_alloc_ref_array"); |
235 | 13 | return_error(gs_error_VMerror); |
236 | 13 | } |
237 | | /* Set the terminating ref now. */ |
238 | 15.8M | end = (ref *) obj + num_refs; |
239 | 15.8M | make_mark(end); |
240 | | /* Set has_refs in the clump. */ |
241 | 15.8M | if (mem->cc && (mem->cc != cc || mem->cc->cbot == (byte *) (end + 1))) { |
242 | | /* Ordinary clump. */ |
243 | 15.2M | mem->cc->rcur = (obj_header_t *) obj; |
244 | 15.2M | mem->cc->rtop = (byte *) (end + 1); |
245 | 15.2M | mem->cc->has_refs = true; |
246 | 15.2M | } else { |
247 | | /* Large clump. */ |
248 | | /* This happens only for very large arrays, */ |
249 | | /* so it doesn't need to be cheap. */ |
250 | 513k | clump_locator_t cl; |
251 | | |
252 | 513k | cl.memory = mem; |
253 | 513k | cl.cp = mem->root; |
254 | | /* clump_locate_ptr() should *never* fail here */ |
255 | 513k | if (clump_locate_ptr(obj, &cl)) { |
256 | 513k | cl.cp->has_refs = true; |
257 | 513k | } |
258 | 0 | else { |
259 | 0 | gs_abort((gs_memory_t *) mem); |
260 | 0 | } |
261 | 513k | } |
262 | 15.8M | if (cp) { |
263 | 3.63M | mem->changes = cp; |
264 | 3.63M | cp->where = (ref_packed *)obj; |
265 | 3.63M | } |
266 | 15.8M | } |
267 | 2.46G | for (i = 0; i < num_refs; i++) { |
268 | 2.34G | make_null(&(obj[i])); |
269 | 2.34G | } |
270 | 123M | make_array(parr, attrs | mem->space, num_refs, obj); |
271 | 123M | return 0; |
272 | 123M | } |
273 | | |
274 | | /* Resize an array of refs. Currently this is only implemented */ |
275 | | /* for shrinking, not for growing. */ |
276 | | int |
277 | | gs_resize_ref_array(gs_ref_memory_t * mem, ref * parr, |
278 | | uint new_num_refs, client_name_t cname) |
279 | 404 | { |
280 | 404 | uint old_num_refs = r_size(parr); |
281 | 404 | uint diff; |
282 | 404 | ref *obj = parr->value.refs; |
283 | | |
284 | 404 | if (new_num_refs > old_num_refs || !r_has_type(parr, t_array)) |
285 | 0 | return_error(gs_error_Fatal); |
286 | 404 | diff = old_num_refs - new_num_refs; |
287 | | /* Check for LIFO. See gs_free_ref_array for more details. */ |
288 | 404 | if (mem->cc && mem->cc->rtop == mem->cc->cbot && |
289 | 404 | (byte *) (obj + (old_num_refs + 1)) == mem->cc->rtop |
290 | 404 | ) { |
291 | | /* Shorten the refs object. */ |
292 | 43 | ref *end = (ref *) (mem->cc->cbot = mem->cc->rtop -= |
293 | 43 | diff * sizeof(ref)); |
294 | | |
295 | 43 | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:<$ ]%s(%u) "PRI_INTPTR"\n", |
296 | 43 | ialloc_trace_space(mem), client_name_string(cname), diff, |
297 | 43 | (intptr_t)obj); |
298 | 43 | mem->cc->rcur[-1].o_size -= diff * sizeof(ref); |
299 | 43 | make_mark(end - 1); |
300 | 361 | } else { |
301 | | /* Punt. */ |
302 | 361 | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:<$#]%s(%u) "PRI_INTPTR"\n", |
303 | 361 | ialloc_trace_space(mem), client_name_string(cname), diff, |
304 | 361 | (intptr_t)obj); |
305 | 361 | mem->lost.refs += diff * sizeof(ref); |
306 | 361 | } |
307 | 404 | r_set_size(parr, new_num_refs); |
308 | 404 | return 0; |
309 | 404 | } |
310 | | |
311 | | /* Deallocate an array of refs. Only do this if LIFO, or if */ |
312 | | /* the array occupies an entire clump by itself. */ |
313 | | void |
314 | | gs_free_ref_array(gs_ref_memory_t * mem, ref * parr, client_name_t cname) |
315 | 6.29M | { |
316 | 6.29M | uint num_refs = r_size(parr); |
317 | 6.29M | ref *obj = parr->value.refs; |
318 | | |
319 | | /* |
320 | | * Compute the storage size of the array, and check for LIFO |
321 | | * freeing or a separate clump. Note that the array might be packed; |
322 | | * for the moment, if it's anything but a t_array, punt. |
323 | | * The +1s are for the extra ref for the GC. |
324 | | */ |
325 | 6.29M | if (!r_has_type(parr, t_array)) |
326 | 6.29M | DO_NOTHING; /* don't look for special cases */ |
327 | 1.94M | else if (mem->cc && mem->cc->rtop == mem->cc->cbot && |
328 | 1.94M | (byte *) (obj + (num_refs + 1)) == mem->cc->rtop |
329 | 1.94M | ) { |
330 | 17 | if ((obj_header_t *) obj == mem->cc->rcur) { |
331 | | /* Deallocate the entire refs object. */ |
332 | 17 | if ((gs_memory_t *)mem != mem->stable_memory) |
333 | 17 | alloc_save_remove(mem, (ref_packed *)obj, "gs_free_ref_array"); |
334 | 17 | gs_free_object((gs_memory_t *) mem, obj, cname); |
335 | 17 | mem->cc->rcur = 0; |
336 | 17 | mem->cc->rtop = 0; |
337 | 17 | } else { |
338 | | /* Deallocate it at the end of the refs object. */ |
339 | 0 | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:-$ ]%s(%u) "PRI_INTPTR"\n", |
340 | 0 | ialloc_trace_space(mem), client_name_string(cname), |
341 | 0 | num_refs, (intptr_t)obj); |
342 | 0 | mem->cc->rcur[-1].o_size -= num_refs * sizeof(ref); |
343 | 0 | mem->cc->rtop = mem->cc->cbot = (byte *) (obj + 1); |
344 | 0 | make_mark(obj); |
345 | 0 | } |
346 | 17 | return; |
347 | 1.94M | } else if (num_refs >= (mem->large_size / ARCH_SIZEOF_REF - 1)) { |
348 | | /* See if this array has a clump all to itself. */ |
349 | | /* We only make this check when freeing very large objects, */ |
350 | | /* so it doesn't need to be cheap. */ |
351 | 303k | clump_locator_t cl; |
352 | | |
353 | 303k | cl.memory = mem; |
354 | 303k | cl.cp = mem->root; |
355 | 303k | if (clump_locate_ptr(obj, &cl) && |
356 | 303k | obj == (ref *) ((obj_header_t *) (cl.cp->cbase) + 1) && |
357 | 303k | (byte *) (obj + (num_refs + 1)) == cl.cp->cend |
358 | 303k | ) { |
359 | | /* Free the clump. */ |
360 | 303k | if_debug4m('a', (const gs_memory_t *)mem, "[a%d:-$L]%s(%u) "PRI_INTPTR"\n", |
361 | 303k | ialloc_trace_space(mem), client_name_string(cname), |
362 | 303k | num_refs, (intptr_t)obj); |
363 | 303k | if ((gs_memory_t *)mem != mem->stable_memory) { |
364 | 8.45k | alloc_save_remove(mem, (ref_packed *)obj, "gs_free_ref_array"); |
365 | 8.45k | } |
366 | 303k | alloc_free_clump(cl.cp, mem); |
367 | 303k | return; |
368 | 303k | } |
369 | 303k | } |
370 | | /* Punt, but fill the array with nulls so that there won't be */ |
371 | | /* dangling references to confuse the garbage collector. */ |
372 | 6.29M | if_debug4m('A', (const gs_memory_t *)mem, "[a%d:-$#]%s(%u) "PRI_INTPTR"\n", |
373 | 5.99M | ialloc_trace_space(mem), client_name_string(cname), num_refs, |
374 | 5.99M | (intptr_t)obj); |
375 | 5.99M | { |
376 | 5.99M | uint size; |
377 | | |
378 | 5.99M | switch (r_type(parr)) { |
379 | 4.34M | case t_shortarray: |
380 | 4.34M | size = num_refs * sizeof(ref_packed); |
381 | 4.34M | break; |
382 | 0 | case t_mixedarray:{ |
383 | | /* We have to parse the array to compute the storage size. */ |
384 | 0 | uint i = 0; |
385 | 0 | const ref_packed *p = parr->value.packed; |
386 | |
|
387 | 0 | for (; i < num_refs; ++i) |
388 | 0 | p = packed_next(p); |
389 | 0 | size = (const byte *)p - (const byte *)parr->value.packed; |
390 | 0 | break; |
391 | 0 | } |
392 | 1.64M | case t_array: |
393 | 1.64M | size = num_refs * sizeof(ref); |
394 | 1.64M | break; |
395 | 0 | default: |
396 | 0 | if_debug3('A', "Unknown type 0x%x in free_ref_array(%u,"PRI_INTPTR")!", |
397 | 0 | r_type(parr), num_refs, (intptr_t)obj); |
398 | 0 | return; |
399 | 5.99M | } |
400 | | /* |
401 | | * If there are any leftover packed elements, we don't |
402 | | * worry about them, since they can't be dangling references. |
403 | | */ |
404 | 5.99M | refset_null_new(obj, size / sizeof(ref), 0); |
405 | 5.99M | mem->lost.refs += size; |
406 | 5.99M | } |
407 | 5.99M | } |
408 | | |
409 | | /* Allocate a string ref. */ |
410 | | int |
411 | | gs_alloc_string_ref(gs_ref_memory_t * mem, ref * psref, |
412 | | uint attrs, uint nbytes, client_name_t cname) |
413 | 343 | { |
414 | 343 | byte *str = gs_alloc_string((gs_memory_t *) mem, nbytes, cname); |
415 | | |
416 | 343 | if (str == 0) |
417 | 0 | return_error(gs_error_VMerror); |
418 | 343 | make_string(psref, attrs | mem->space, nbytes, str); |
419 | 343 | return 0; |
420 | 343 | } |