/src/ghostpdl/base/gsalloc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2023 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Standard memory allocator */ |
18 | | #include "gx.h" |
19 | | #include "memory_.h" |
20 | | #include "gserrors.h" |
21 | | #include "gsexit.h" |
22 | | #include "gsmdebug.h" |
23 | | #include "gsstruct.h" |
24 | | #include "gxalloc.h" |
25 | | #include "stream.h" /* for clearing stream list */ |
26 | | #include "malloc_.h" /* For MEMENTO */ |
27 | | |
28 | | /* |
29 | | * Define whether to try consolidating space before adding a new clump. |
30 | | * The default is not to do this, because it is computationally |
31 | | * expensive and doesn't seem to help much. However, this is done for |
32 | | * "controlled" spaces whether or not the #define is in effect. |
33 | | */ |
34 | | /*#define CONSOLIDATE_BEFORE_ADDING_CLUMP */ |
35 | | |
36 | | /* |
37 | | * This allocator produces tracing messages of the form |
38 | | * [aNMOTS]... |
39 | | * where |
40 | | * N is the VM space number, +1 if we are allocating from stable memory. |
41 | | * M is : for movable objects, | for immovable, |
42 | | * O is {alloc = +, free = -, grow = >, shrink = <}, |
43 | | * T is {bytes = b, object = <, ref = $, string = >}, and |
44 | | * S is {small freelist = f, large freelist = F, LIFO = space, |
45 | | * own clump = L, lost = #, lost own clump = ~, other = .}. |
46 | | */ |
47 | | #ifdef DEBUG |
48 | | static int |
49 | | alloc_trace_space(const gs_ref_memory_t *imem) |
50 | | { |
51 | | return (int)(imem->space + (imem->stable_memory == (const gs_memory_t *)imem)); |
52 | | } |
53 | | static void |
54 | | alloc_trace(const char *chars, gs_ref_memory_t * imem, client_name_t cname, |
55 | | gs_memory_type_ptr_t stype, uint size, const void *ptr) |
56 | | { |
57 | | if_debug7m('A', (const gs_memory_t *)imem, "[a%d%s]%s %s(%u) %s"PRI_INTPTR"\n", |
58 | | alloc_trace_space(imem), chars, client_name_string(cname), |
59 | | (ptr == 0 || stype == 0 ? "" : |
60 | | struct_type_name_string(stype)), |
61 | | size, (chars[1] == '+' ? "= " : ""), (intptr_t)ptr); |
62 | | } |
63 | | static bool |
64 | | alloc_size_is_ok(gs_memory_type_ptr_t stype) |
65 | | { |
66 | | return (stype->ssize > 0 && stype->ssize < 0x200000); |
67 | | } |
68 | | # define ALLOC_CHECK_SIZE(mem,stype)\ |
69 | | BEGIN\ |
70 | | if (!alloc_size_is_ok(stype)) {\ |
71 | | mlprintf2(mem,"size of struct type "PRI_INTPTR" is 0x%lx!\n",\ |
72 | | (intptr_t)(stype), (ulong)((stype)->ssize));\ |
73 | | return 0;\ |
74 | | }\ |
75 | | END |
76 | | #else |
77 | 764M | # define alloc_trace(chars, imem, cname, stype, size, ptr) DO_NOTHING |
78 | 275M | # define ALLOC_CHECK_SIZE(mem,stype) DO_NOTHING |
79 | | #endif |
80 | | |
81 | | /* |
82 | | * The structure descriptor for allocators. Even though allocators |
83 | | * are allocated outside GC space, they reference objects within it. |
84 | | */ |
85 | | public_st_ref_memory(); |
86 | | static |
87 | 763k | ENUM_PTRS_BEGIN(ref_memory_enum_ptrs) return 0; |
88 | 127k | ENUM_PTR3(0, gs_ref_memory_t, streams, names_array, changes); |
89 | 127k | ENUM_PTR(3, gs_ref_memory_t, saved); |
90 | 763k | ENUM_PTR(4, gs_ref_memory_t, scan_limit); |
91 | 763k | ENUM_PTRS_END |
92 | 124k | static RELOC_PTRS_WITH(ref_memory_reloc_ptrs, gs_ref_memory_t *mptr) |
93 | 124k | { |
94 | 124k | RELOC_PTR(gs_ref_memory_t, streams); |
95 | 124k | RELOC_PTR(gs_ref_memory_t, names_array); |
96 | 124k | RELOC_PTR(gs_ref_memory_t, changes); |
97 | 124k | RELOC_PTR(gs_ref_memory_t, scan_limit); |
98 | | /* Don't relocate the saved pointer now -- see igc.c for details. */ |
99 | 124k | mptr->reloc_saved = RELOC_OBJ(mptr->saved); |
100 | 124k | } |
101 | 124k | RELOC_PTRS_END |
102 | | |
103 | | /* |
104 | | * Define the flags for alloc_obj, which implements all but the fastest |
105 | | * case of allocation. |
106 | | */ |
107 | | typedef enum { |
108 | | ALLOC_IMMOVABLE = 1, |
109 | | ALLOC_DIRECT = 2 /* called directly, without fast-case checks */ |
110 | | } alloc_flags_t; |
111 | | |
112 | | /* Forward references */ |
113 | | static void remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top); |
114 | | static obj_header_t *large_freelist_alloc(gs_ref_memory_t *mem, obj_size_t size); |
115 | | static obj_header_t *scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size); |
116 | | static size_t compute_free_objects(gs_ref_memory_t *); |
117 | | static obj_header_t *alloc_obj(gs_ref_memory_t *, obj_size_t, gs_memory_type_ptr_t, alloc_flags_t, client_name_t); |
118 | | static void consolidate_clump_free(clump_t *cp, gs_ref_memory_t *mem); |
119 | | static void trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, obj_size_t size, clump_t *cp); |
120 | | static clump_t *alloc_acquire_clump(gs_ref_memory_t *, size_t, bool, client_name_t); |
121 | | static clump_t *alloc_add_clump(gs_ref_memory_t *, size_t, client_name_t); |
122 | | void alloc_close_clump(gs_ref_memory_t *); |
123 | | |
124 | | /* |
125 | | * Define the standard implementation (with garbage collection) |
126 | | * of Ghostscript's memory manager interface. |
127 | | */ |
128 | | /* Raw memory procedures */ |
129 | | static gs_memory_proc_alloc_bytes(i_alloc_bytes_immovable); |
130 | | static gs_memory_proc_resize_object(i_resize_object); |
131 | | static gs_memory_proc_free_object(i_free_object); |
132 | | static gs_memory_proc_stable(i_stable); |
133 | | static gs_memory_proc_status(i_status); |
134 | | static gs_memory_proc_free_all(i_free_all); |
135 | | static gs_memory_proc_consolidate_free(i_consolidate_free); |
136 | | |
137 | | /* Object memory procedures */ |
138 | | static gs_memory_proc_alloc_bytes(i_alloc_bytes); |
139 | | static gs_memory_proc_alloc_struct(i_alloc_struct); |
140 | | static gs_memory_proc_alloc_struct(i_alloc_struct_immovable); |
141 | | static gs_memory_proc_alloc_byte_array(i_alloc_byte_array); |
142 | | static gs_memory_proc_alloc_byte_array(i_alloc_byte_array_immovable); |
143 | | static gs_memory_proc_alloc_struct_array(i_alloc_struct_array); |
144 | | static gs_memory_proc_alloc_struct_array(i_alloc_struct_array_immovable); |
145 | | static gs_memory_proc_object_size(i_object_size); |
146 | | static gs_memory_proc_object_type(i_object_type); |
147 | | static gs_memory_proc_alloc_string(i_alloc_string); |
148 | | static gs_memory_proc_alloc_string(i_alloc_string_immovable); |
149 | | static gs_memory_proc_resize_string(i_resize_string); |
150 | | static gs_memory_proc_free_string(i_free_string); |
151 | | static gs_memory_proc_register_root(i_register_root); |
152 | | static gs_memory_proc_unregister_root(i_unregister_root); |
153 | | static gs_memory_proc_enable_free(i_enable_free); |
154 | | static gs_memory_proc_set_object_type(i_set_object_type); |
155 | | static gs_memory_proc_defer_frees(i_defer_frees); |
156 | | |
157 | | /* We export the procedures for subclasses. */ |
158 | | const gs_memory_procs_t gs_ref_memory_procs = |
159 | | { |
160 | | /* Raw memory procedures */ |
161 | | i_alloc_bytes_immovable, |
162 | | i_resize_object, |
163 | | i_free_object, |
164 | | i_stable, |
165 | | i_status, |
166 | | i_free_all, |
167 | | i_consolidate_free, |
168 | | /* Object memory procedures */ |
169 | | i_alloc_bytes, |
170 | | i_alloc_struct, |
171 | | i_alloc_struct_immovable, |
172 | | i_alloc_byte_array, |
173 | | i_alloc_byte_array_immovable, |
174 | | i_alloc_struct_array, |
175 | | i_alloc_struct_array_immovable, |
176 | | i_object_size, |
177 | | i_object_type, |
178 | | i_alloc_string, |
179 | | i_alloc_string_immovable, |
180 | | i_resize_string, |
181 | | i_free_string, |
182 | | i_register_root, |
183 | | i_unregister_root, |
184 | | i_enable_free, |
185 | | i_set_object_type, |
186 | | i_defer_frees |
187 | | }; |
188 | | |
189 | | /* |
190 | | * Previous versions of this code used a simple linked list of |
191 | | * clumps. We change here to use a splay tree of clumps. |
192 | | * Splay Trees can be found documented in "Algorithms and Data |
193 | | * Structures" by Jeffrey H Kingston. |
194 | | * |
195 | | * Essentially they are binary trees, ordered by address of the |
196 | | * 'cbase' pointer. The 'cunning' feature with them is that |
197 | | * when a node in the tree is accessed, we do a 'move to root' |
198 | | * operation. This involves performing various 'rotations' as |
199 | | * we move up the tree, the net effect of which tends to |
200 | | * lead to more balanced trees (see Kingston for analysis). |
201 | | * It also leads to better locality of reference in that |
202 | | * recently accessed nodes stay near the root. |
203 | | */ |
204 | | |
205 | | /* #define DEBUG_CLUMPS */ |
206 | | #ifdef DEBUG_CLUMPS |
207 | | #define SANITY_CHECK(cp) sanity_check(cp) |
208 | | #define SANITY_CHECK_MID(cp) sanity_check_mid(cp) |
209 | | |
210 | | static void |
211 | | broken_splay() |
212 | | { |
213 | | dlprintf("Broken splay tree!\n"); |
214 | | } |
215 | | |
216 | | void sanity_check_rec(clump_t *cp) |
217 | | { |
218 | | splay_dir_t from = SPLAY_FROM_ABOVE; |
219 | | |
220 | | while (cp) |
221 | | { |
222 | | if (from == SPLAY_FROM_ABOVE) |
223 | | { |
224 | | /* We have arrived from above. Step left. */ |
225 | | if (cp->left) |
226 | | { |
227 | | if (cp->left->cbase > cp->cbase || cp->left->parent != cp) |
228 | | broken_splay(); |
229 | | cp = cp->left; |
230 | | from = SPLAY_FROM_ABOVE; |
231 | | continue; |
232 | | } |
233 | | /* No left to step to, so imagine we have just arrived from there */ |
234 | | from = SPLAY_FROM_LEFT; |
235 | | } |
236 | | if (from == SPLAY_FROM_LEFT) |
237 | | { |
238 | | /* We have arrived from the left. Step right. */ |
239 | | if (cp->right) |
240 | | { |
241 | | if (cp->right->cbase < cp->cbase || cp->right->parent != cp) |
242 | | broken_splay(); |
243 | | cp = cp->right; |
244 | | from = SPLAY_FROM_ABOVE; |
245 | | continue; |
246 | | } |
247 | | /* No right to step to, so imagine we have just arrived from there. */ |
248 | | from = SPLAY_FROM_RIGHT; |
249 | | } |
250 | | if (from == SPLAY_FROM_RIGHT) |
251 | | { |
252 | | /* We have arrived from the right. Step up. */ |
253 | | if (cp->parent == NULL) |
254 | | break; |
255 | | if (cp->parent->left != cp && cp->parent->right != cp) |
256 | | broken_splay(); |
257 | | from = (cp->parent->left == cp ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT); |
258 | | cp = cp->parent; |
259 | | } |
260 | | } |
261 | | } |
262 | | |
263 | | void sanity_check(clump_t *cp) |
264 | | { |
265 | | sanity_check_rec(cp); |
266 | | } |
267 | | |
268 | | void sanity_check_mid(clump_t *cp) |
269 | | { |
270 | | clump_t *parent; |
271 | | |
272 | | while ((parent = cp->parent) != NULL) |
273 | | { |
274 | | if (parent->left == cp) |
275 | | { |
276 | | if (parent->right == cp) |
277 | | broken_splay(); |
278 | | } |
279 | | else if (parent->right != cp) |
280 | | broken_splay(); |
281 | | cp = parent; |
282 | | } |
283 | | |
284 | | sanity_check_rec(cp); |
285 | | } |
286 | | |
287 | | #else |
288 | 30.1M | #define SANITY_CHECK(cp) while (0) {} |
289 | 112M | #define SANITY_CHECK_MID(cp) while (0) {} |
290 | | #endif |
291 | | |
292 | | /* When initing with the root, we want to pass the smallest inorder one |
293 | | * back immediately, and set it up so that we step right for the next |
294 | | * one. */ |
295 | | clump_t * |
296 | | clump_splay_walk_init(clump_splay_walker *sw, const gs_ref_memory_t *mem) |
297 | 928k | { |
298 | 928k | clump_t *cp = mem->root; |
299 | | |
300 | 928k | if (cp) |
301 | 928k | { |
302 | 928k | SANITY_CHECK(cp); |
303 | | |
304 | 928k | sw->from = SPLAY_FROM_LEFT; |
305 | 5.87M | while (cp->left) |
306 | 4.95M | { |
307 | 4.95M | cp = cp->left; |
308 | 4.95M | } |
309 | 928k | } |
310 | 928k | sw->cp = cp; |
311 | 928k | sw->end = NULL; |
312 | 928k | return cp; |
313 | 928k | } |
314 | | |
315 | | clump_t * |
316 | | clump_splay_walk_bwd_init(clump_splay_walker *sw, const gs_ref_memory_t *mem) |
317 | 70.1k | { |
318 | 70.1k | clump_t *cp = mem->root; |
319 | | |
320 | 70.1k | if (cp) |
321 | 70.1k | { |
322 | 70.1k | SANITY_CHECK(cp); |
323 | | |
324 | 70.1k | sw->from = SPLAY_FROM_RIGHT; |
325 | 193k | while (cp->right) |
326 | 123k | { |
327 | 123k | cp = cp->right; |
328 | 123k | } |
329 | 70.1k | } |
330 | 70.1k | sw->cp = cp; |
331 | 70.1k | sw->end = NULL; |
332 | 70.1k | return cp; |
333 | 70.1k | } |
334 | | |
335 | | /* When initing 'mid walk' (i.e. with a non-root node), we want to |
336 | | * return the node we are given as the first one, and continue |
337 | | * onwards in an in order fashion. |
338 | | */ |
339 | | clump_t * |
340 | | clump_splay_walk_init_mid(clump_splay_walker *sw, clump_t *cp) |
341 | 98.1M | { |
342 | 98.1M | sw->from = SPLAY_FROM_LEFT; |
343 | 98.1M | sw->cp = cp; |
344 | 98.1M | sw->end = cp; |
345 | 98.1M | if (cp) |
346 | 98.0M | { |
347 | 98.0M | SANITY_CHECK_MID(cp); |
348 | 98.0M | } |
349 | 98.1M | return cp; |
350 | 98.1M | } |
351 | | |
352 | | clump_t * |
353 | | clump_splay_walk_fwd(clump_splay_walker *sw) |
354 | 107M | { |
355 | 107M | clump_t *cp = sw->cp; |
356 | 107M | int from = sw->from; |
357 | | |
358 | 107M | if (cp == NULL) |
359 | 20 | return NULL; |
360 | | |
361 | | /* We step through the tree, and stop when we arrive |
362 | | * at sw->end in an in order manner (i.e. by moving from |
363 | | * the left). */ |
364 | 255M | while (1) |
365 | 255M | { |
366 | 255M | if (from == SPLAY_FROM_ABOVE) |
367 | 102M | { |
368 | | /* We have arrived from above. Step left. */ |
369 | 102M | if (cp->left) |
370 | 54.8M | { |
371 | 54.8M | cp = cp->left; |
372 | 54.8M | from = SPLAY_FROM_ABOVE; |
373 | 54.8M | continue; |
374 | 54.8M | } |
375 | | /* No left to step to, so imagine we have just arrived from there */ |
376 | 47.8M | from = SPLAY_FROM_LEFT; |
377 | | /* Have we reached the stopping point? */ |
378 | 47.8M | if (cp == sw->end) |
379 | 193k | cp = NULL; |
380 | | /* We want to stop here, for inorder operation. So break out of the loop. */ |
381 | 47.8M | break; |
382 | 102M | } |
383 | 152M | if (from == SPLAY_FROM_LEFT) |
384 | 107M | { |
385 | | /* We have arrived from the left. Step right. */ |
386 | 107M | if (cp->right) |
387 | 46.6M | { |
388 | 46.6M | cp = cp->right; |
389 | 46.6M | from = SPLAY_FROM_ABOVE; |
390 | 46.6M | continue; |
391 | 46.6M | } |
392 | | /* No right to step to, so imagine we have just arrived from there. */ |
393 | 60.6M | from = SPLAY_FROM_RIGHT; |
394 | 60.6M | } |
395 | 106M | if (from == SPLAY_FROM_RIGHT) |
396 | 106M | { |
397 | | /* We have arrived from the right. Step up. */ |
398 | 106M | clump_t *old = cp; |
399 | 106M | cp = cp->parent; |
400 | 106M | if (cp == NULL) |
401 | 2.13M | { |
402 | | /* We've reached the root of the tree. Is this our stopping point? */ |
403 | 2.13M | if (sw->end == NULL) |
404 | 927k | break; |
405 | | /* If not, step on. */ |
406 | 1.21M | cp = old; |
407 | 1.21M | from = SPLAY_FROM_ABOVE; |
408 | 1.21M | } |
409 | 104M | else |
410 | 104M | { |
411 | 104M | from = (cp->left == old ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT); |
412 | 104M | if (from == SPLAY_FROM_LEFT) |
413 | 58.4M | { |
414 | | /* Have we reached the stopping point? */ |
415 | 58.4M | if (cp == sw->end) |
416 | 797k | cp = NULL; |
417 | 58.4M | break; |
418 | 58.4M | } |
419 | 104M | } |
420 | 106M | } |
421 | 106M | } |
422 | 107M | sw->cp = cp; |
423 | 107M | sw->from = from; |
424 | 107M | return cp; |
425 | 107M | } |
426 | | |
427 | | clump_t * |
428 | | clump_splay_walk_bwd(clump_splay_walker *sw) |
429 | 1.62M | { |
430 | 1.62M | clump_t *cp = sw->cp; |
431 | 1.62M | int from = sw->from; |
432 | | |
433 | 1.62M | if (cp == NULL) |
434 | 0 | return NULL; |
435 | | |
436 | | /* We step backwards through the tree, and stop when we arrive |
437 | | * at sw->end in a reverse in order manner (i.e. by moving from |
438 | | * the right). */ |
439 | 3.96M | while (1) |
440 | 3.96M | { |
441 | 3.96M | if (from == SPLAY_FROM_ABOVE) |
442 | 1.43M | { |
443 | | /* We have arrived from above. Step right. */ |
444 | 1.43M | if (cp->right) |
445 | 527k | { |
446 | 527k | cp = cp->right; |
447 | 527k | from = SPLAY_FROM_ABOVE; |
448 | 527k | continue; |
449 | 527k | } |
450 | | /* No right to step to, so imagine we have just arrived from there. */ |
451 | 905k | from = SPLAY_FROM_RIGHT; |
452 | | /* Have we reached our end? */ |
453 | 905k | if (cp == sw->end) |
454 | 0 | cp = NULL; |
455 | | /* Stop to run inorder operation */ |
456 | 905k | break; |
457 | 1.43M | } |
458 | 2.53M | if (from == SPLAY_FROM_RIGHT) |
459 | 1.62M | { |
460 | | /* We have arrived from the right. Step left. */ |
461 | 1.62M | if (cp->left) |
462 | 905k | { |
463 | 905k | cp = cp->left; |
464 | 905k | from = SPLAY_FROM_ABOVE; |
465 | 905k | continue; |
466 | 905k | } |
467 | | /* No left to step to, so imagine we have just arrived from there. */ |
468 | 721k | from = SPLAY_FROM_LEFT; |
469 | 721k | } |
470 | 1.62M | if (from == SPLAY_FROM_LEFT) |
471 | 1.62M | { |
472 | | /* We have arrived from the left. Step up. */ |
473 | 1.62M | clump_t *old = cp; |
474 | 1.62M | cp = cp->parent; |
475 | 1.62M | from = (cp == NULL || cp->left != old ? SPLAY_FROM_RIGHT : SPLAY_FROM_LEFT); |
476 | 1.62M | if (from == SPLAY_FROM_RIGHT) |
477 | 721k | { |
478 | 721k | if (cp == sw->end) |
479 | 70.1k | cp = NULL; |
480 | 721k | break; |
481 | 721k | } |
482 | 1.62M | } |
483 | 1.62M | } |
484 | 1.62M | sw->cp = cp; |
485 | 1.62M | sw->from = from; |
486 | 1.62M | return cp; |
487 | 1.62M | } |
488 | | |
489 | | static clump_t * |
490 | | clump_splay_remove(clump_t *cp, gs_ref_memory_t *imem) |
491 | 25.9M | { |
492 | 25.9M | clump_t *replacement; |
493 | | |
494 | 25.9M | if (cp->left == NULL) |
495 | 2.94M | { |
496 | | /* At most one child - easy */ |
497 | 2.94M | replacement = cp->right; |
498 | 2.94M | } |
499 | 23.0M | else if (cp->right == NULL) |
500 | 11.7M | { |
501 | | /* Strictly one child - easy */ |
502 | 11.7M | replacement = cp->left; |
503 | 11.7M | } |
504 | 11.2M | else |
505 | 11.2M | { |
506 | | /* 2 Children - tricky */ |
507 | | /* Find in-order predecessor to f */ |
508 | 11.2M | replacement = cp->left; |
509 | 12.5M | while (replacement->right) |
510 | 1.22M | replacement = replacement->right; |
511 | | /* Remove replacement - easy as just one child */ |
512 | 11.2M | (void)clump_splay_remove(replacement, imem); |
513 | | /* Replace cp with replacement */ |
514 | 11.2M | if (cp->left) |
515 | 10.8M | cp->left->parent = replacement; |
516 | 11.2M | cp->right->parent = replacement; |
517 | 11.2M | replacement->left = cp->left; |
518 | 11.2M | replacement->right = cp->right; |
519 | 11.2M | } |
520 | 25.9M | if (cp->parent) |
521 | 13.1M | { |
522 | 13.1M | if (cp->parent->left == cp) |
523 | 11.6M | cp->parent->left = replacement; |
524 | 1.50M | else |
525 | 1.50M | cp->parent->right = replacement; |
526 | 13.1M | } |
527 | 12.7M | else |
528 | 12.7M | imem->root = replacement; |
529 | 25.9M | if (replacement) |
530 | 23.4M | replacement->parent = cp->parent; |
531 | 25.9M | return replacement; |
532 | 25.9M | } |
533 | | |
534 | | /* Here we apply a function to all the nodes in a tree in |
535 | | * depth first order. This means that the given function |
536 | | * can safely alter: 1) the clump, 2) it's children, |
537 | | * 3) it's parents child pointer that points to it |
538 | | * without fear of corruption. Specifically this means |
539 | | * that the function can free (and unlink) the node |
540 | | * if it wants. |
541 | | */ |
542 | | clump_t * |
543 | | clump_splay_app(clump_t *root, gs_ref_memory_t *imem, splay_app_result_t (*fn)(clump_t *, void *), void *arg) |
544 | 277k | { |
545 | 277k | clump_t *step_to; |
546 | 277k | clump_t *cp = root; |
547 | 277k | int from = SPLAY_FROM_ABOVE; |
548 | 277k | splay_app_result_t res; |
549 | | |
550 | 277k | SANITY_CHECK(cp); |
551 | | |
552 | 10.0M | while (cp) |
553 | 9.85M | { |
554 | 9.85M | if (from == SPLAY_FROM_ABOVE) |
555 | 5.06M | { |
556 | | /* We have arrived from above. Step left. */ |
557 | 5.06M | step_to = cp->left; |
558 | 5.06M | if (step_to) |
559 | 2.71M | { |
560 | 2.71M | from = SPLAY_FROM_ABOVE; |
561 | 2.71M | cp = step_to; |
562 | 2.71M | } |
563 | 2.34M | else |
564 | 2.34M | { |
565 | | /* No left to step to, so imagine we have just arrived from the left */ |
566 | 2.34M | from = SPLAY_FROM_LEFT; |
567 | 2.34M | } |
568 | 5.06M | } |
569 | 9.85M | if (from == SPLAY_FROM_LEFT) |
570 | 5.06M | { |
571 | | /* We have arrived from the left. Step right. */ |
572 | 5.06M | step_to = cp->right; |
573 | 5.06M | if (step_to) |
574 | 2.07M | { |
575 | 2.07M | from = SPLAY_FROM_ABOVE; |
576 | 2.07M | cp = step_to; |
577 | 2.07M | } |
578 | 2.99M | else |
579 | 2.99M | { |
580 | | /* No right to step to, so imagine we have just arrived from the right. */ |
581 | 2.99M | from = SPLAY_FROM_RIGHT; |
582 | 2.99M | } |
583 | 5.06M | } |
584 | 9.85M | if (from == SPLAY_FROM_RIGHT) |
585 | 5.06M | { |
586 | | /* We have arrived from the right. Step up. */ |
587 | 5.06M | step_to = cp->parent; |
588 | 5.06M | if (step_to) |
589 | 4.79M | { |
590 | 4.79M | from = (step_to->left == cp ? SPLAY_FROM_LEFT : SPLAY_FROM_RIGHT); |
591 | 4.79M | } |
592 | 5.06M | res = fn(cp, arg); |
593 | 5.06M | if (res & SPLAY_APP_STOP) |
594 | 43.5k | return cp; |
595 | 5.02M | cp = step_to; |
596 | 5.02M | } |
597 | 9.85M | } |
598 | 234k | return cp; |
599 | 277k | } |
600 | | |
601 | | /* Move the given node to the root of the tree, by |
602 | | * performing a series of the following rotations. |
603 | | * The key observation here is that all these |
604 | | * rotations preserve the ordering of the tree, and |
605 | | * result in 'x' getting higher. |
606 | | * |
607 | | * Case 1: z x Case 1b: z x |
608 | | * # # # # # # # # |
609 | | * y D A y A y y D |
610 | | * # # => # # # # => # # |
611 | | * x C B z B x z C |
612 | | * # # # # # # # # |
613 | | * A B C D C D A B |
614 | | * |
615 | | * Case 2: z x Case 2b: z x |
616 | | * # # ## ## # # ## ## |
617 | | * y D y z A y z y |
618 | | * # # => # # # # # # => # # # # |
619 | | * A x A B C D x D A B C D |
620 | | * # # # # |
621 | | * B C B C |
622 | | * |
623 | | * Case 3: y x Case 3b: y x |
624 | | * # # # # # # # # |
625 | | * x C => A y A x => y C |
626 | | * # # # # # # # # |
627 | | * A B B C B C A B |
628 | | */ |
629 | | static void |
630 | | splay_move_to_root(clump_t *x, gs_ref_memory_t *mem) |
631 | 70.2M | { |
632 | 70.2M | clump_t *y, *z; |
633 | | |
634 | 70.2M | if (x == NULL) |
635 | 0 | return; |
636 | | |
637 | 141M | while ((y = x->parent) != NULL) { |
638 | 71.6M | if ((z = y->parent) != NULL) { |
639 | 40.2M | x->parent = z->parent; |
640 | 40.2M | if (x->parent) { |
641 | 13.9M | if (x->parent->left == z) |
642 | 7.16M | x->parent->left = x; |
643 | 6.73M | else |
644 | 6.73M | x->parent->right = x; |
645 | 13.9M | } |
646 | 40.2M | y->parent = x; |
647 | | /* Case 1, 1b, 2 or 2b */ |
648 | 40.2M | if (y->left == x) { |
649 | | /* Case 1 or 2b */ |
650 | 26.2M | if (z->left == y) { |
651 | | /* Case 1 */ |
652 | 11.8M | y->left = x->right; |
653 | 11.8M | if (y->left) |
654 | 9.20M | y->left->parent = y; |
655 | 11.8M | z->left = y->right; |
656 | 11.8M | if (z->left) |
657 | 8.64M | z->left->parent = z; |
658 | 11.8M | y->right = z; |
659 | 11.8M | z->parent = y; |
660 | 14.4M | } else { |
661 | | /* Case 2b */ |
662 | 14.4M | z->right = x->left; |
663 | 14.4M | if (z->right) |
664 | 2.19M | z->right->parent = z; |
665 | 14.4M | y->left = x->right; |
666 | 14.4M | if (y->left) |
667 | 2.62M | y->left->parent = y; |
668 | 14.4M | x->left = z; |
669 | 14.4M | z->parent = x; |
670 | 14.4M | } |
671 | 26.2M | x->right = y; |
672 | 26.2M | } else { |
673 | | /* Case 2 or 1b */ |
674 | 13.9M | if (z->left == y) { |
675 | | /* Case 2 */ |
676 | 3.60M | y->right = x->left; |
677 | 3.60M | if (y->right) |
678 | 2.35M | y->right->parent = y; |
679 | 3.60M | z->left = x->right; |
680 | 3.60M | if (z->left) |
681 | 2.32M | z->left->parent = z; |
682 | 3.60M | x->right = z; |
683 | 3.60M | z->parent = x; |
684 | 10.3M | } else { |
685 | | /* Case 1b */ |
686 | 10.3M | z->right = y->left; |
687 | 10.3M | if (z->right) |
688 | 7.97M | z->right->parent = z; |
689 | 10.3M | y->right = x->left; |
690 | 10.3M | if (y->right) |
691 | 7.92M | y->right->parent = y; |
692 | 10.3M | y->left = z; |
693 | 10.3M | z->parent = y; |
694 | 10.3M | } |
695 | 13.9M | x->left = y; |
696 | 13.9M | } |
697 | 40.2M | } else { |
698 | | /* Case 3 or 3b */ |
699 | 31.3M | x->parent = NULL; |
700 | 31.3M | y->parent = x; |
701 | 31.3M | if (y->left == x) { |
702 | | /* Case 3 */ |
703 | 14.5M | y->left = x->right; |
704 | 14.5M | if (y->left) |
705 | 12.0M | y->left->parent = y; |
706 | 14.5M | x->right = y; |
707 | 16.7M | } else { |
708 | | /* Case 3b */ |
709 | 16.7M | y->right = x->left; |
710 | 16.7M | if (y->right) |
711 | 12.7M | y->right->parent = y; |
712 | 16.7M | x->left = y; |
713 | 16.7M | } |
714 | 31.3M | } |
715 | 71.6M | } |
716 | 70.2M | mem->root = x; |
717 | 70.2M | } |
718 | | |
719 | | static void |
720 | | splay_insert(clump_t *cp, gs_ref_memory_t *mem) |
721 | 14.6M | { |
722 | 14.6M | clump_t *node = NULL; |
723 | 14.6M | clump_t **root = &mem->root; |
724 | | |
725 | 48.7M | while (*root) { |
726 | 34.1M | node = *root; |
727 | 34.1M | if (PTR_LT(cp->cbase, node->cbase)) { |
728 | 17.1M | root = &node->left; |
729 | 17.1M | } else { |
730 | 17.0M | root = &node->right; |
731 | 17.0M | } |
732 | 34.1M | } |
733 | 14.6M | *root = cp; |
734 | 14.6M | cp->left = NULL; |
735 | 14.6M | cp->right = NULL; |
736 | 14.6M | cp->parent = node; |
737 | 14.6M | splay_move_to_root(cp, mem); |
738 | 14.6M | } |
739 | | |
740 | | /* |
741 | | * Allocate and mostly initialize the state of an allocator (system, global, |
742 | | * or local). Does not initialize global or space. |
743 | | */ |
744 | | static void *ialloc_solo(gs_memory_t *, gs_memory_type_ptr_t, |
745 | | clump_t **); |
746 | | gs_ref_memory_t * |
747 | | ialloc_alloc_state(gs_memory_t * parent, uint clump_size) |
748 | 43.5k | { |
749 | 43.5k | clump_t *cp; |
750 | 43.5k | gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp); |
751 | | |
752 | 43.5k | if (iimem == 0) |
753 | 0 | return 0; |
754 | 43.5k | iimem->stable_memory = (gs_memory_t *)iimem; |
755 | 43.5k | iimem->procs = gs_ref_memory_procs; |
756 | 43.5k | iimem->gs_lib_ctx = parent->gs_lib_ctx; |
757 | 43.5k | iimem->non_gc_memory = parent; |
758 | 43.5k | iimem->thread_safe_memory = parent->thread_safe_memory; |
759 | 43.5k | iimem->clump_size = clump_size; |
760 | | #if defined(MEMENTO) || defined(SINGLE_OBJECT_MEMORY_BLOCKS_ONLY) |
761 | | iimem->large_size = 1; |
762 | | #else |
763 | 43.5k | iimem->large_size = ((clump_size / 4) & -obj_align_mod) + 1; |
764 | 43.5k | #endif |
765 | 43.5k | iimem->is_controlled = false; |
766 | 43.5k | iimem->gc_status.vm_threshold = clump_size * 3L; |
767 | 43.5k | iimem->gc_status.max_vm = MAX_MAX_VM; |
768 | 43.5k | iimem->gc_status.signal_value = 0; |
769 | 43.5k | iimem->gc_status.enabled = false; |
770 | 43.5k | iimem->gc_status.requested = 0; |
771 | 43.5k | iimem->gc_allocated = 0; |
772 | 43.5k | iimem->previous_status.allocated = 0; |
773 | 43.5k | iimem->previous_status.used = 0; |
774 | 43.5k | ialloc_reset(iimem); |
775 | 43.5k | iimem->root = cp; |
776 | 43.5k | ialloc_set_limit(iimem); |
777 | 43.5k | iimem->cc = NULL; |
778 | 43.5k | iimem->save_level = 0; |
779 | 43.5k | iimem->new_mask = 0; |
780 | 43.5k | iimem->test_mask = ~0; |
781 | 43.5k | iimem->streams = 0; |
782 | 43.5k | iimem->names_array = 0; |
783 | 43.5k | iimem->roots = 0; |
784 | 43.5k | iimem->num_contexts = 0; |
785 | 43.5k | iimem->saved = 0; |
786 | 43.5k | return iimem; |
787 | 43.5k | } |
788 | | |
789 | | /* Allocate a 'solo' object with its own clump. */ |
790 | | static void * |
791 | | ialloc_solo(gs_memory_t * parent, gs_memory_type_ptr_t pstype, |
792 | | clump_t ** pcp) |
793 | 43.5k | { /* |
794 | | * We can't assume that the parent uses the same object header |
795 | | * that we do, but the GC requires that allocators have |
796 | | * such a header. Therefore, we prepend one explicitly. |
797 | | */ |
798 | 43.5k | clump_t *cp = |
799 | 43.5k | gs_raw_alloc_struct_immovable(parent, &st_clump, |
800 | 43.5k | "ialloc_solo(clump)"); |
801 | 43.5k | uint csize = |
802 | 43.5k | ROUND_UP(sizeof(clump_head_t) + sizeof(obj_header_t) + |
803 | 43.5k | pstype->ssize, |
804 | 43.5k | obj_align_mod); |
805 | 43.5k | byte *cdata = gs_alloc_bytes_immovable(parent, csize, "ialloc_solo"); |
806 | 43.5k | obj_header_t *obj = (obj_header_t *) (cdata + sizeof(clump_head_t)); |
807 | | |
808 | 43.5k | if (cp == 0 || cdata == 0) { |
809 | 0 | gs_free_object(parent, cp, "ialloc_solo(allocation failure)"); |
810 | 0 | gs_free_object(parent, cdata, "ialloc_solo(allocation failure)"); |
811 | 0 | return 0; |
812 | 0 | } |
813 | 43.5k | alloc_init_clump(cp, cdata, cdata + csize, false, (clump_t *) NULL); |
814 | 43.5k | cp->cbot = cp->ctop; |
815 | 43.5k | cp->parent = cp->left = cp->right = 0; |
816 | 43.5k | cp->c_alone = true; |
817 | | /* Construct the object header "by hand". */ |
818 | 43.5k | obj->o_pad = 0; |
819 | 43.5k | obj->o_alone = 1; |
820 | 43.5k | obj->o_size = pstype->ssize; |
821 | 43.5k | obj->o_type = pstype; |
822 | 43.5k | *pcp = cp; |
823 | 43.5k | return (void *)(obj + 1); |
824 | 43.5k | } |
825 | | |
826 | | void |
827 | | ialloc_free_state(gs_ref_memory_t *iimem) |
828 | 0 | { |
829 | 0 | clump_t *cp; |
830 | 0 | gs_memory_t *mem; |
831 | 0 | if (iimem == NULL) |
832 | 0 | return; |
833 | 0 | cp = iimem->root; |
834 | 0 | mem = iimem->non_gc_memory; |
835 | 0 | if (cp == NULL) |
836 | 0 | return; |
837 | 0 | gs_free_object(mem, cp->chead, "ialloc_solo(allocation failure)"); |
838 | 0 | gs_free_object(mem, cp, "ialloc_solo(allocation failure)"); |
839 | 0 | } |
840 | | |
841 | | /* |
842 | | * Add a clump to an externally controlled allocator. Such allocators |
843 | | * allocate all objects as immovable, are not garbage-collected, and |
844 | | * don't attempt to acquire additional memory on their own. |
845 | | */ |
846 | | int |
847 | | ialloc_add_clump(gs_ref_memory_t *imem, ulong space, client_name_t cname) |
848 | 0 | { |
849 | 0 | clump_t *cp; |
850 | | |
851 | | /* Allow acquisition of this clump. */ |
852 | 0 | imem->is_controlled = false; |
853 | 0 | imem->large_size = imem->clump_size; |
854 | 0 | imem->limit = imem->gc_status.max_vm = MAX_MAX_VM; |
855 | | |
856 | | /* Acquire the clump. */ |
857 | 0 | cp = alloc_add_clump(imem, space, cname); |
858 | | |
859 | | /* |
860 | | * Make all allocations immovable. Since the "movable" allocators |
861 | | * allocate within existing clumps, whereas the "immovable" ones |
862 | | * allocate in new clumps, we equate the latter to the former, even |
863 | | * though this seems backwards. |
864 | | */ |
865 | 0 | imem->procs.alloc_bytes_immovable = imem->procs.alloc_bytes; |
866 | 0 | imem->procs.alloc_struct_immovable = imem->procs.alloc_struct; |
867 | 0 | imem->procs.alloc_byte_array_immovable = imem->procs.alloc_byte_array; |
868 | 0 | imem->procs.alloc_struct_array_immovable = imem->procs.alloc_struct_array; |
869 | 0 | imem->procs.alloc_string_immovable = imem->procs.alloc_string; |
870 | | |
871 | | /* Disable acquisition of additional clumps. */ |
872 | 0 | imem->is_controlled = true; |
873 | 0 | imem->limit = 0; |
874 | |
|
875 | 0 | return (cp ? 0 : gs_note_error(gs_error_VMerror)); |
876 | 0 | } |
877 | | |
878 | | /* Prepare for a GC by clearing the stream list. */ |
879 | | /* This probably belongs somewhere else.... */ |
880 | | void |
881 | | ialloc_gc_prepare(gs_ref_memory_t * mem) |
882 | 120k | { /* |
883 | | * We have to unlink every stream from its neighbors, |
884 | | * so that referenced streams don't keep all streams around. |
885 | | */ |
886 | 120k | while (mem->streams != 0) { |
887 | 0 | stream *s = mem->streams; |
888 | |
|
889 | 0 | mem->streams = s->next; |
890 | 0 | s->prev = s->next = 0; |
891 | 0 | } |
892 | 120k | } |
893 | | |
894 | | /* Initialize after a save. */ |
895 | | void |
896 | | ialloc_reset(gs_ref_memory_t * mem) |
897 | 70.1k | { |
898 | 70.1k | mem->root = 0; |
899 | 70.1k | mem->cc = NULL; |
900 | 70.1k | mem->allocated = 0; |
901 | 70.1k | mem->changes = 0; |
902 | 70.1k | mem->scan_limit = 0; |
903 | 70.1k | mem->total_scanned = 0; |
904 | 70.1k | mem->total_scanned_after_compacting = 0; |
905 | 70.1k | ialloc_reset_free(mem); |
906 | 70.1k | } |
907 | | |
908 | | /* Initialize after a save or GC. */ |
909 | | void |
910 | | ialloc_reset_free(gs_ref_memory_t * mem) |
911 | 190k | { |
912 | 190k | int i; |
913 | 190k | obj_header_t **p; |
914 | | |
915 | 190k | mem->lost.objects = 0; |
916 | 190k | mem->lost.refs = 0; |
917 | 190k | mem->lost.strings = 0; |
918 | 190k | mem->cfreed.cp = 0; |
919 | 19.6M | for (i = 0, p = &mem->freelists[0]; i < num_freelists; i++, p++) |
920 | 19.4M | *p = 0; |
921 | 190k | mem->largest_free_size = 0; |
922 | 190k | } |
923 | | |
924 | | /* |
925 | | * Set an arbitrary limit so that the amount of allocated VM does not grow |
926 | | * indefinitely even when GC is disabled. Benchmarks have shown that |
927 | | * the resulting GC's are infrequent enough not to degrade performance |
928 | | * significantly. |
929 | | */ |
930 | | #define FORCE_GC_LIMIT 8000000 |
931 | | |
932 | | /* Set the allocation limit after a change in one or more of */ |
933 | | /* vm_threshold, max_vm, or enabled, or after a GC. */ |
934 | | void |
935 | | ialloc_set_limit(register gs_ref_memory_t * mem) |
936 | 65.0M | { /* |
937 | | * The following code is intended to set the limit so that |
938 | | * we stop allocating when allocated + previous_status.allocated |
939 | | * exceeds the lesser of max_vm or (if GC is enabled) |
940 | | * gc_allocated + vm_threshold. |
941 | | */ |
942 | 65.0M | size_t max_allocated = |
943 | 65.0M | (mem->gc_status.max_vm > mem->previous_status.allocated ? |
944 | 65.0M | mem->gc_status.max_vm - mem->previous_status.allocated : |
945 | 65.0M | 0); |
946 | | |
947 | 65.0M | if (mem->gc_status.enabled) { |
948 | 3.97M | size_t limit = mem->gc_allocated + mem->gc_status.vm_threshold; |
949 | | |
950 | 3.97M | if (limit < mem->previous_status.allocated) |
951 | 40 | mem->limit = 0; |
952 | 3.97M | else { |
953 | 3.97M | limit -= mem->previous_status.allocated; |
954 | 3.97M | mem->limit = min(limit, max_allocated); |
955 | 3.97M | } |
956 | 3.97M | } else |
957 | 61.0M | mem->limit = min(max_allocated, mem->gc_allocated + FORCE_GC_LIMIT); |
958 | 65.0M | if_debug7m('0', (const gs_memory_t *)mem, |
959 | 65.0M | "[0]space=%d, max_vm=%"PRIdSIZE", prev.alloc=%"PRIdSIZE", enabled=%d, " |
960 | 65.0M | "gc_alloc=%"PRIdSIZE", threshold=%"PRIdSIZE" => limit=%"PRIdSIZE"\n", |
961 | 65.0M | mem->space, mem->gc_status.max_vm, |
962 | 65.0M | mem->previous_status.allocated, |
963 | 65.0M | mem->gc_status.enabled, mem->gc_allocated, |
964 | 65.0M | mem->gc_status.vm_threshold, mem->limit); |
965 | 65.0M | } |
966 | | |
967 | | struct free_data |
968 | | { |
969 | | gs_ref_memory_t *imem; |
970 | | clump_t *allocator; |
971 | | }; |
972 | | |
973 | | static splay_app_result_t |
974 | | free_all_not_allocator(clump_t *cp, void *arg) |
975 | 1.66M | { |
976 | 1.66M | struct free_data *fd = (struct free_data *)arg; |
977 | | |
978 | 1.66M | if (cp->cbase + sizeof(obj_header_t) != (byte *)fd->imem) |
979 | 1.58M | alloc_free_clump(cp, fd->imem); |
980 | 87.1k | else |
981 | 87.1k | fd->allocator = cp; |
982 | | |
983 | 1.66M | return SPLAY_APP_CONTINUE; |
984 | 1.66M | } |
985 | | |
986 | | static splay_app_result_t |
987 | | free_all_allocator(clump_t *cp, void *arg) |
988 | 43.5k | { |
989 | 43.5k | struct free_data *fd = (struct free_data *)arg; |
990 | | |
991 | 43.5k | if (cp->cbase + sizeof(obj_header_t) != (byte *)fd->imem) |
992 | 0 | return SPLAY_APP_CONTINUE; |
993 | | |
994 | 43.5k | fd->allocator = cp; |
995 | 43.5k | alloc_free_clump(cp, fd->imem); |
996 | 43.5k | return SPLAY_APP_STOP; |
997 | 43.5k | } |
998 | | |
999 | | /* |
1000 | | * Free all the memory owned by the allocator, except the allocator itself. |
1001 | | * Note that this only frees memory at the current save level: the client |
1002 | | * is responsible for restoring to the outermost level if desired. |
1003 | | */ |
1004 | | static void |
1005 | | i_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname) |
1006 | 113k | { |
1007 | 113k | gs_ref_memory_t * imem = (gs_ref_memory_t *)mem; |
1008 | 113k | struct free_data fd; |
1009 | | |
1010 | 113k | fd.imem = imem; |
1011 | 113k | fd.allocator = NULL; |
1012 | | |
1013 | 113k | if (free_mask & FREE_ALL_DATA && imem->root != NULL) { |
1014 | | /* Free every clump except the allocator */ |
1015 | 113k | clump_splay_app(imem->root, imem, free_all_not_allocator, &fd); |
1016 | | |
1017 | | /* Reinstate the allocator as the sole clump */ |
1018 | 113k | imem->root = fd.allocator; |
1019 | 113k | if (fd.allocator) |
1020 | 87.1k | fd.allocator->parent = fd.allocator->left = fd.allocator->right = NULL; |
1021 | 113k | } |
1022 | 113k | if (free_mask & FREE_ALL_ALLOCATOR) { |
1023 | | /* Walk the tree to find the allocator. */ |
1024 | 43.5k | clump_splay_app(imem->root, imem, free_all_allocator, &fd); |
1025 | 43.5k | } |
1026 | 113k | } |
1027 | | |
1028 | | /* ================ Accessors ================ */ |
1029 | | |
1030 | | /* Get the size of an object from the header. */ |
1031 | | static size_t |
1032 | | i_object_size(gs_memory_t * mem, const void /*obj_header_t */ *obj) |
1033 | 143k | { |
1034 | 143k | return pre_obj_contents_size((const obj_header_t *)obj - 1); |
1035 | 143k | } |
1036 | | |
1037 | | /* Get the type of a structure from the header. */ |
1038 | | static gs_memory_type_ptr_t |
1039 | | i_object_type(const gs_memory_t * mem, const void /*obj_header_t */ *obj) |
1040 | 150k | { |
1041 | 150k | return ((const obj_header_t *)obj - 1)->o_type; |
1042 | 150k | } |
1043 | | |
1044 | | /* Get the GC status of a memory. */ |
1045 | | void |
1046 | | gs_memory_gc_status(const gs_ref_memory_t * mem, gs_memory_gc_status_t * pstat) |
1047 | 65.1M | { |
1048 | 65.1M | *pstat = mem->gc_status; |
1049 | 65.1M | } |
1050 | | |
1051 | | /* Set the GC status of a memory. */ |
1052 | | void |
1053 | | gs_memory_set_gc_status(gs_ref_memory_t * mem, const gs_memory_gc_status_t * pstat) |
1054 | 64.9M | { |
1055 | 64.9M | mem->gc_status = *pstat; |
1056 | 64.9M | ialloc_set_limit(mem); |
1057 | 64.9M | } |
1058 | | |
1059 | | /* Set VM threshold. Value passed as int64_t since it is signed */ |
1060 | | void |
1061 | | gs_memory_set_vm_threshold(gs_ref_memory_t * mem, int64_t val) |
1062 | 128k | { |
1063 | 128k | gs_memory_gc_status_t stat; |
1064 | 128k | gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory; |
1065 | | |
1066 | 128k | if (val < MIN_VM_THRESHOLD) |
1067 | 0 | val = MIN_VM_THRESHOLD; |
1068 | 128k | else if (val > MAX_VM_THRESHOLD) |
1069 | 0 | val = MAX_VM_THRESHOLD; |
1070 | 128k | gs_memory_gc_status(mem, &stat); |
1071 | 128k | stat.vm_threshold = val; |
1072 | 128k | gs_memory_set_gc_status(mem, &stat); |
1073 | 128k | gs_memory_gc_status(stable, &stat); |
1074 | 128k | stat.vm_threshold = val; |
1075 | 128k | gs_memory_set_gc_status(stable, &stat); |
1076 | 128k | } |
1077 | | |
1078 | | /* Set VM reclaim. */ |
1079 | | void |
1080 | | gs_memory_set_vm_reclaim(gs_ref_memory_t * mem, bool enabled) |
1081 | 128k | { |
1082 | 128k | gs_memory_gc_status_t stat; |
1083 | 128k | gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory; |
1084 | | |
1085 | 128k | gs_memory_gc_status(mem, &stat); |
1086 | 128k | stat.enabled = enabled; |
1087 | 128k | gs_memory_set_gc_status(mem, &stat); |
1088 | 128k | gs_memory_gc_status(stable, &stat); |
1089 | 128k | stat.enabled = enabled; |
1090 | 128k | gs_memory_set_gc_status(stable, &stat); |
1091 | 128k | } |
1092 | | |
1093 | | /* ================ Objects ================ */ |
1094 | | |
1095 | | /* Allocate a small object quickly if possible. */ |
1096 | | /* The size must be substantially less than max_uint. */ |
1097 | | /* ptr must be declared as obj_header_t *. */ |
1098 | | /* pfl must be declared as obj_header_t **. */ |
1099 | | #define IF_FREELIST_ALLOC(ptr, imem, size, pstype, pfl)\ |
1100 | 257M | if ( size <= max_freelist_size &&\ |
1101 | 257M | *(pfl = &imem->freelists[(size + obj_align_mask) >> log2_obj_align_mod]) != 0\ |
1102 | 257M | )\ |
1103 | 257M | { ptr = *pfl;\ |
1104 | 235M | *pfl = *(obj_header_t **)ptr;\ |
1105 | 235M | ptr[-1].o_size = (obj_size_t)size;\ |
1106 | 235M | ptr[-1].o_type = pstype;\ |
1107 | 235M | /* If debugging, clear the block in an attempt to */\ |
1108 | 235M | /* track down uninitialized data errors. */\ |
1109 | 257M | gs_alloc_fill(ptr, gs_alloc_fill_alloc, size); |
1110 | | #define ELSEIF_BIG_FREELIST_ALLOC(ptr, imem, size, pstype)\ |
1111 | 235M | }\ |
1112 | 257M | else if (size > max_freelist_size &&\ |
1113 | 22.0M | (ptr = large_freelist_alloc(imem, size)) != 0)\ |
1114 | 22.0M | { ptr[-1].o_type = pstype;\ |
1115 | 1.32M | /* If debugging, clear the block in an attempt to */\ |
1116 | 1.32M | /* track down uninitialized data errors. */\ |
1117 | 257M | gs_alloc_fill(ptr, gs_alloc_fill_alloc, size); |
1118 | | #define ELSEIF_LIFO_ALLOC(ptr, imem, size, pstype)\ |
1119 | 1.32M | }\ |
1120 | 22.0M | else if ( imem->cc && !imem->cc->c_alone && \ |
1121 | 20.7M | (imem->cc->ctop - (byte *)(ptr = (obj_header_t *)imem->cc->cbot))\ |
1122 | 20.6M | >= size + (obj_align_mod + sizeof(obj_header_t) * 2) &&\ |
1123 | 20.7M | size < imem->large_size\ |
1124 | 20.7M | )\ |
1125 | 20.7M | { imem->cc->cbot = (byte *)ptr + obj_size_round(size);\ |
1126 | 19.4M | ptr->o_pad = 0;\ |
1127 | 19.4M | ptr->o_alone = 0;\ |
1128 | 19.4M | ptr->o_size = (obj_size_t)size;\ |
1129 | 19.4M | ptr->o_type = pstype;\ |
1130 | 19.4M | ptr++;\ |
1131 | 19.4M | /* If debugging, clear the block in an attempt to */\ |
1132 | 19.4M | /* track down uninitialized data errors. */\ |
1133 | 22.0M | gs_alloc_fill(ptr, gs_alloc_fill_alloc, size); |
1134 | | #define ELSE_ALLOC\ |
1135 | 19.4M | }\ |
1136 | | else |
1137 | | |
1138 | | static byte * |
1139 | | i_alloc_bytes(gs_memory_t * mem, size_t ssize, client_name_t cname) |
1140 | 6.44M | { |
1141 | 6.44M | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1142 | 6.44M | obj_header_t *obj; |
1143 | 6.44M | obj_header_t **pfl; |
1144 | 6.44M | obj_size_t size = (obj_size_t)ssize; |
1145 | | |
1146 | | #ifdef MEMENTO |
1147 | | if (Memento_failThisEvent()) |
1148 | | return NULL; |
1149 | | #endif |
1150 | | |
1151 | 6.44M | if ((size_t)size != ssize) |
1152 | 0 | return NULL; |
1153 | | |
1154 | 6.44M | IF_FREELIST_ALLOC(obj, imem, size, &st_bytes, pfl) |
1155 | 528k | alloc_trace(":+bf", imem, cname, NULL, size, obj); |
1156 | 1.15M | ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, &st_bytes) |
1157 | 1.15M | alloc_trace(":+bF", imem, cname, NULL, size, obj); |
1158 | 3.82M | ELSEIF_LIFO_ALLOC(obj, imem, (uint)size, &st_bytes) |
1159 | 3.82M | alloc_trace(":+b ", imem, cname, NULL, size, obj); |
1160 | 3.82M | ELSE_ALLOC |
1161 | 936k | { |
1162 | 936k | obj = alloc_obj(imem, size, &st_bytes, 0, cname); |
1163 | 936k | if (obj == 0) |
1164 | 0 | return 0; |
1165 | 936k | alloc_trace(":+b.", imem, cname, NULL, size, obj); |
1166 | 936k | } |
1167 | | #if IGC_PTR_STABILITY_CHECK |
1168 | | obj[-1].d.o.space_id = imem->space_id; |
1169 | | #endif |
1170 | 6.44M | return (byte *)Memento_label(obj, cname); |
1171 | 6.44M | } |
1172 | | static byte * |
1173 | | i_alloc_bytes_immovable(gs_memory_t * mem, size_t ssize, client_name_t cname) |
1174 | 298k | { |
1175 | 298k | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1176 | 298k | obj_header_t *obj; |
1177 | 298k | obj_size_t size = (obj_size_t)ssize; |
1178 | | |
1179 | | #ifdef MEMENTO |
1180 | | if (Memento_failThisEvent()) |
1181 | | return NULL; |
1182 | | #endif |
1183 | | |
1184 | 298k | if ((size_t)size != ssize) |
1185 | 0 | return NULL; |
1186 | | |
1187 | 298k | obj = alloc_obj(imem, size, &st_bytes, |
1188 | 298k | ALLOC_IMMOVABLE | ALLOC_DIRECT, cname); |
1189 | 298k | if (obj == 0) |
1190 | 0 | return 0; |
1191 | 298k | alloc_trace("|+b.", imem, cname, NULL, size, obj); |
1192 | 298k | return (byte *)Memento_label(obj, cname); |
1193 | 298k | } |
1194 | | static void * |
1195 | | i_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype, |
1196 | | client_name_t cname) |
1197 | 250M | { |
1198 | 250M | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1199 | 250M | obj_size_t size = pstype->ssize; |
1200 | 250M | obj_header_t *obj; |
1201 | 250M | obj_header_t **pfl; |
1202 | | |
1203 | | #ifdef MEMENTO |
1204 | | if (Memento_failThisEvent()) |
1205 | | return NULL; |
1206 | | #endif |
1207 | | |
1208 | 250M | ALLOC_CHECK_SIZE(mem,pstype); |
1209 | 250M | IF_FREELIST_ALLOC(obj, imem, size, pstype, pfl) |
1210 | 234M | alloc_trace(":+<f", imem, cname, pstype, size, obj); |
1211 | 234M | ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, pstype) |
1212 | 165k | alloc_trace(":+<F", imem, cname, pstype, size, obj); |
1213 | 15.6M | ELSEIF_LIFO_ALLOC(obj, imem, size, pstype) |
1214 | 15.6M | alloc_trace(":+< ", imem, cname, pstype, size, obj); |
1215 | 15.6M | ELSE_ALLOC |
1216 | 326k | { |
1217 | 326k | obj = alloc_obj(imem, size, pstype, 0, cname); |
1218 | 326k | if (obj == 0) |
1219 | 0 | return 0; |
1220 | 326k | alloc_trace(":+<.", imem, cname, pstype, size, obj); |
1221 | 326k | } |
1222 | | #if IGC_PTR_STABILITY_CHECK |
1223 | | obj[-1].d.o.space_id = imem->space_id; |
1224 | | #endif |
1225 | 250M | return Memento_label(obj, cname); |
1226 | 250M | } |
1227 | | static void * |
1228 | | i_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype, |
1229 | | client_name_t cname) |
1230 | 11.0M | { |
1231 | 11.0M | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1232 | 11.0M | obj_size_t size = pstype->ssize; |
1233 | 11.0M | obj_header_t *obj; |
1234 | | |
1235 | | #ifdef MEMENTO |
1236 | | if (Memento_failThisEvent()) |
1237 | | return NULL; |
1238 | | #endif |
1239 | | |
1240 | 11.0M | ALLOC_CHECK_SIZE(mem,pstype); |
1241 | 11.0M | obj = alloc_obj(imem, size, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname); |
1242 | 11.0M | alloc_trace("|+<.", imem, cname, pstype, size, obj); |
1243 | 11.0M | return Memento_label(obj, cname); |
1244 | 11.0M | } |
1245 | | |
1246 | | static inline bool |
1247 | | alloc_array_check_size(size_t num_elements, size_t elt_size, size_t *lsize) |
1248 | 15.8M | { |
1249 | 15.8M | int shift0, shift1; |
1250 | 15.8M | size_t m, n; |
1251 | | |
1252 | | /* Avoid the loops in the overwhelming number of cases. */ |
1253 | 15.8M | if ((num_elements | elt_size) >= 65536) { |
1254 | | /* Slightly conservative, but it'll work for our purposes. */ |
1255 | | /* m is the maximum unsigned value representable in shift0 bits */ |
1256 | 4.90k | for (m=0, shift0 = 0; m < num_elements; m = (m<<1)+1, shift0++); |
1257 | | /* n is the maximum unsigned value representable in shift1 bits */ |
1258 | 1.24k | for (n=0, shift1 = 0; n < elt_size; n = (n<<1)+1, shift1++); |
1259 | | /* An shift0 bit unsigned number multiplied by an shift1 bit |
1260 | | * unsigned number is guaranteed to fit in n+m-1 bits. */ |
1261 | 208 | if (shift0+shift1-1 > 8*sizeof(size_t)) |
1262 | 0 | return false; /* Overflow */ |
1263 | 208 | } |
1264 | | |
1265 | 15.8M | *lsize = num_elements * elt_size; |
1266 | 15.8M | return true; |
1267 | 15.8M | } |
1268 | | |
1269 | | static byte * |
1270 | | i_alloc_byte_array(gs_memory_t * mem, size_t num_elements, size_t elt_size, |
1271 | | client_name_t cname) |
1272 | 2.15M | { |
1273 | 2.15M | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1274 | 2.15M | obj_header_t *obj; |
1275 | 2.15M | size_t slsize; |
1276 | 2.15M | obj_size_t lsize; |
1277 | | #ifdef MEMENTO |
1278 | | if (Memento_failThisEvent()) |
1279 | | return NULL; |
1280 | | #endif |
1281 | 2.15M | if (alloc_array_check_size(num_elements, elt_size, &slsize) == false) |
1282 | 0 | return NULL; |
1283 | 2.15M | lsize = (obj_size_t)slsize; |
1284 | 2.15M | if ((size_t)lsize != slsize) |
1285 | 0 | return NULL; |
1286 | 2.15M | obj = alloc_obj(imem, lsize, |
1287 | 2.15M | &st_bytes, ALLOC_DIRECT, cname); |
1288 | | |
1289 | 2.15M | if_debug6m('A', mem, "[a%d:+b.]%s -bytes-*(%"PRIuSIZE"=%"PRIuSIZE"*%"PRIuSIZE") = "PRI_INTPTR"\n", |
1290 | 2.15M | alloc_trace_space(imem), client_name_string(cname), |
1291 | 2.15M | num_elements * elt_size, |
1292 | 2.15M | num_elements, elt_size, (intptr_t)obj); |
1293 | 2.15M | return (byte *)Memento_label(obj, cname); |
1294 | 2.15M | } |
1295 | | static byte * |
1296 | | i_alloc_byte_array_immovable(gs_memory_t * mem, size_t num_elements, |
1297 | | size_t elt_size, client_name_t cname) |
1298 | 0 | { |
1299 | 0 | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1300 | 0 | obj_header_t *obj; |
1301 | 0 | size_t slsize; |
1302 | 0 | obj_size_t lsize; |
1303 | | #ifdef MEMENTO |
1304 | | if (Memento_failThisEvent()) |
1305 | | return NULL; |
1306 | | #endif |
1307 | 0 | if (alloc_array_check_size(num_elements, elt_size, &slsize) == false) |
1308 | 0 | return NULL; |
1309 | 0 | lsize = (obj_size_t)slsize; |
1310 | 0 | if ((size_t)lsize != slsize) |
1311 | 0 | return NULL; |
1312 | 0 | obj = alloc_obj(imem, lsize, |
1313 | 0 | &st_bytes, ALLOC_IMMOVABLE | ALLOC_DIRECT, |
1314 | 0 | cname); |
1315 | |
|
1316 | 0 | if_debug6m('A', mem, "[a%d|+b.]%s -bytes-*(%"PRIuSIZE"=%"PRIuSIZE"*%"PRIuSIZE") = "PRI_INTPTR"\n", |
1317 | 0 | alloc_trace_space(imem), client_name_string(cname), |
1318 | 0 | num_elements * elt_size, |
1319 | 0 | num_elements, elt_size, (intptr_t)obj); |
1320 | 0 | return (byte *)Memento_label(obj, cname); |
1321 | 0 | } |
1322 | | static void * |
1323 | | i_alloc_struct_array(gs_memory_t * mem, size_t num_elements, |
1324 | | gs_memory_type_ptr_t pstype, client_name_t cname) |
1325 | 13.6M | { |
1326 | 13.6M | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1327 | 13.6M | obj_header_t *obj; |
1328 | 13.6M | size_t slsize; |
1329 | 13.6M | obj_size_t lsize; |
1330 | | #ifdef MEMENTO |
1331 | | if (Memento_failThisEvent()) |
1332 | | return NULL; |
1333 | | #endif |
1334 | | |
1335 | 13.6M | ALLOC_CHECK_SIZE(mem,pstype); |
1336 | | #ifdef DEBUG |
1337 | | if (pstype->enum_ptrs == basic_enum_ptrs) { |
1338 | | dmprintf2(mem, " i_alloc_struct_array: called with incorrect structure type (not element), struct='%s', client='%s'\n", |
1339 | | pstype->sname, cname); |
1340 | | return NULL; /* fail */ |
1341 | | } |
1342 | | #endif |
1343 | 13.6M | if (alloc_array_check_size(num_elements, pstype->ssize, &slsize) == false) |
1344 | 0 | return NULL; |
1345 | 13.6M | lsize = (obj_size_t)slsize; |
1346 | 13.6M | if ((size_t)lsize != slsize) |
1347 | 11 | return NULL; |
1348 | 13.6M | obj = alloc_obj(imem, lsize, pstype, ALLOC_DIRECT, cname); |
1349 | 13.6M | if_debug7m('A', mem, "[a%d:+<.]%s %s*(%"PRIuSIZE"=%"PRIuSIZE"*%u) = "PRI_INTPTR"\n", |
1350 | 13.6M | alloc_trace_space(imem), client_name_string(cname), |
1351 | 13.6M | struct_type_name_string(pstype), |
1352 | 13.6M | num_elements * pstype->ssize, |
1353 | 13.6M | num_elements, pstype->ssize, (intptr_t)obj); |
1354 | 13.6M | return (char *)Memento_label(obj, cname); |
1355 | 13.6M | } |
1356 | | static void * |
1357 | | i_alloc_struct_array_immovable(gs_memory_t * mem, size_t num_elements, |
1358 | | gs_memory_type_ptr_t pstype, client_name_t cname) |
1359 | 8.71k | { |
1360 | 8.71k | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1361 | 8.71k | obj_header_t *obj; |
1362 | 8.71k | size_t slsize; |
1363 | 8.71k | obj_size_t lsize; |
1364 | | #ifdef MEMENTO |
1365 | | if (Memento_failThisEvent()) |
1366 | | return NULL; |
1367 | | #endif |
1368 | | |
1369 | 8.71k | ALLOC_CHECK_SIZE(mem,pstype); |
1370 | 8.71k | if (alloc_array_check_size(num_elements, pstype->ssize, &slsize) == false) |
1371 | 0 | return NULL; |
1372 | 8.71k | lsize = (obj_size_t)slsize; |
1373 | 8.71k | if ((size_t)lsize != slsize) |
1374 | 0 | return NULL; |
1375 | 8.71k | obj = alloc_obj(imem, lsize, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname); |
1376 | 8.71k | if_debug7m('A', mem, "[a%d|+<.]%s %s*(%"PRIuSIZE"=%"PRIuSIZE"*%u) = "PRI_INTPTR"\n", |
1377 | 8.71k | alloc_trace_space(imem), client_name_string(cname), |
1378 | 8.71k | struct_type_name_string(pstype), |
1379 | 8.71k | num_elements * pstype->ssize, |
1380 | 8.71k | num_elements, pstype->ssize, (intptr_t)obj); |
1381 | 8.71k | return (char *)Memento_label(obj, cname); |
1382 | 8.71k | } |
1383 | | static void * |
1384 | | i_resize_object(gs_memory_t * mem, void *obj, size_t new_num_elements, |
1385 | | client_name_t cname) |
1386 | 17.4k | { |
1387 | 17.4k | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1388 | 17.4k | obj_header_t *pp = (obj_header_t *) obj - 1; |
1389 | 17.4k | gs_memory_type_ptr_t pstype = pp->o_type; |
1390 | 17.4k | size_t old_size = pre_obj_contents_size(pp); |
1391 | 17.4k | size_t new_size = pstype->ssize * new_num_elements; |
1392 | 17.4k | size_t old_size_rounded = obj_align_round(old_size); |
1393 | 17.4k | size_t new_size_rounded = obj_align_round(new_size); |
1394 | 17.4k | void *new_obj = NULL; |
1395 | | |
1396 | | #ifdef MEMENTO |
1397 | | if (Memento_failThisEvent()) |
1398 | | return NULL; |
1399 | | #endif |
1400 | | |
1401 | 17.4k | if (new_size_rounded != (obj_size_t)new_size_rounded) |
1402 | 0 | return NULL; |
1403 | | |
1404 | 17.4k | if (old_size_rounded == new_size_rounded) { |
1405 | 17.4k | pp->o_size = (obj_size_t)new_size; |
1406 | 17.4k | new_obj = obj; |
1407 | 17.4k | } else |
1408 | 0 | if (imem->cc && (byte *)obj + old_size_rounded == imem->cc->cbot && |
1409 | 0 | imem->cc->ctop - (byte *)obj >= new_size_rounded ) { |
1410 | 0 | imem->cc->cbot = (byte *)obj + new_size_rounded; |
1411 | 0 | pp->o_size = (obj_size_t)new_size; |
1412 | 0 | new_obj = obj; |
1413 | 0 | } else /* try and trim the object -- but only if room for a dummy header */ |
1414 | 0 | if (new_size_rounded + sizeof(obj_header_t) <= old_size_rounded) { |
1415 | 0 | trim_obj(imem, obj, (obj_size_t)new_size, (clump_t *)0); |
1416 | 0 | new_obj = obj; |
1417 | 0 | } |
1418 | 17.4k | if (new_obj) { |
1419 | 17.4k | if_debug8m('A', mem, "[a%d:%c%c ]%s %s(%"PRIuSIZE"=>%"PRIuSIZE") "PRI_INTPTR"\n", |
1420 | 17.4k | alloc_trace_space(imem), |
1421 | 17.4k | (new_size > old_size ? '>' : '<'), |
1422 | 17.4k | (pstype == &st_bytes ? 'b' : '<'), |
1423 | 17.4k | client_name_string(cname), |
1424 | 17.4k | struct_type_name_string(pstype), |
1425 | 17.4k | old_size, new_size, (intptr_t)obj); |
1426 | 17.4k | return Memento_label(new_obj, cname); |
1427 | 17.4k | } |
1428 | | /* Punt. */ |
1429 | 0 | new_obj = gs_alloc_struct_array(mem, new_num_elements, void, |
1430 | 0 | pstype, cname); |
1431 | 0 | if (new_obj == 0) |
1432 | 0 | return 0; |
1433 | 0 | memcpy(new_obj, obj, min(old_size, new_size)); |
1434 | 0 | gs_free_object(mem, obj, cname); |
1435 | 0 | return Memento_label(new_obj, cname); |
1436 | 0 | } |
1437 | | static void |
1438 | | i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname) |
1439 | 267M | { |
1440 | 267M | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1441 | 267M | obj_header_t *pp; |
1442 | 267M | gs_memory_type_ptr_t pstype; |
1443 | 267M | gs_memory_struct_type_t saved_stype; |
1444 | | |
1445 | 267M | struct_proc_finalize((*finalize)); |
1446 | 267M | size_t size, rounded_size; |
1447 | | |
1448 | 267M | if (ptr == 0) |
1449 | 1.78M | return; |
1450 | 265M | pp = (obj_header_t *) ptr - 1; |
1451 | 265M | pstype = pp->o_type; |
1452 | | #ifdef DEBUG |
1453 | | if (gs_debug_c('?')) { |
1454 | | clump_locator_t cld; |
1455 | | |
1456 | | if (pstype == &st_free) { |
1457 | | mlprintf2(mem, "%s: object "PRI_INTPTR" already free!\n", |
1458 | | client_name_string(cname), (intptr_t)ptr); |
1459 | | return; /*gs_abort(); */ |
1460 | | } |
1461 | | /* Check that this allocator owns the object being freed. */ |
1462 | | cld.memory = imem; |
1463 | | while ((cld.cp = cld.memory->root), |
1464 | | !clump_locate_ptr(ptr, &cld) |
1465 | | ) { |
1466 | | if (!cld.memory->saved) { |
1467 | | mlprintf3(mem, "%s: freeing "PRI_INTPTR", not owned by memory "PRI_INTPTR"!\n", |
1468 | | client_name_string(cname), (intptr_t)ptr, |
1469 | | (intptr_t)mem); |
1470 | | return; /*gs_abort(); */ |
1471 | | } |
1472 | | /****** HACK: we know the saved state is the first ****** |
1473 | | ****** member of an alloc_save_t. ******/ |
1474 | | cld.memory = (gs_ref_memory_t *) cld.memory->saved; |
1475 | | } |
1476 | | /* Check that the object is in the allocated region. */ |
1477 | | if (!(PTR_BETWEEN((const byte *)pp, cld.cp->cbase, |
1478 | | cld.cp->cbot)) |
1479 | | ) { |
1480 | | mlprintf5(mem, "%s: freeing "PRI_INTPTR",\n\toutside clump "PRI_INTPTR" cbase="PRI_INTPTR", cbot="PRI_INTPTR"!\n", |
1481 | | client_name_string(cname), (intptr_t) ptr, |
1482 | | (intptr_t) cld.cp, (intptr_t) cld.cp->cbase, |
1483 | | (intptr_t) cld.cp->cbot); |
1484 | | return; /*gs_abort(); */ |
1485 | | } |
1486 | | } |
1487 | | #endif |
1488 | 265M | size = pre_obj_contents_size(pp); |
1489 | 265M | rounded_size = obj_align_round(size); |
1490 | 265M | finalize = pstype->finalize; |
1491 | 265M | if (finalize != 0) { |
1492 | | |
1493 | | /* unfortunately device finalize procedures will clobber the |
1494 | | stype which is used for later debugging with "A" debug |
1495 | | tracing, so we save stype it in a local. */ |
1496 | 15.8M | if (gs_debug['a'] || gs_debug['A']) |
1497 | 0 | saved_stype = *pstype; |
1498 | | |
1499 | 15.8M | if_debug3m('u', mem, "[u]finalizing %s "PRI_INTPTR" (%s)\n", |
1500 | 15.8M | struct_type_name_string(pstype), |
1501 | 15.8M | (intptr_t)ptr, client_name_string(cname)); |
1502 | 15.8M | (*finalize) (mem, ptr); |
1503 | | |
1504 | 15.8M | if (gs_debug['a'] || gs_debug['A']) |
1505 | 0 | pstype = &saved_stype; |
1506 | 15.8M | } |
1507 | 265M | if (imem->cc && (byte *) ptr + rounded_size == imem->cc->cbot) { |
1508 | 11.0M | alloc_trace(":-o ", imem, cname, pstype, size, ptr); |
1509 | 11.0M | gs_alloc_fill(ptr, gs_alloc_fill_free, size); |
1510 | 11.0M | imem->cc->cbot = (byte *) pp; |
1511 | | /* IFF this object is adjacent to (or below) the byte after the |
1512 | | * highest free object, do the consolidation within this clump. */ |
1513 | 11.0M | if ((byte *)pp <= imem->cc->int_freed_top) { |
1514 | 1.13M | consolidate_clump_free(imem->cc, imem); |
1515 | 1.13M | } |
1516 | 11.0M | return; |
1517 | 11.0M | } |
1518 | 254M | if (pp->o_alone) { |
1519 | | /* |
1520 | | * We gave this object its own clump. Free the entire clump, |
1521 | | * unless it belongs to an older save level, in which case |
1522 | | * we mustn't overwrite it. |
1523 | | */ |
1524 | 12.3M | clump_locator_t cl; |
1525 | | |
1526 | | #ifdef DEBUG |
1527 | | { |
1528 | | clump_locator_t cld; |
1529 | | |
1530 | | cld.memory = imem; |
1531 | | cld.cp = 0; |
1532 | | if (gs_debug_c('a')) |
1533 | | alloc_trace( |
1534 | | (clump_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"), |
1535 | | imem, cname, pstype, size, ptr); |
1536 | | } |
1537 | | #endif |
1538 | 12.3M | cl.memory = imem; |
1539 | 12.3M | cl.cp = 0; |
1540 | 12.3M | if (clump_locate_ptr(ptr, &cl)) { |
1541 | 12.3M | if (!imem->is_controlled) |
1542 | 12.3M | alloc_free_clump(cl.cp, imem); |
1543 | 12.3M | return; |
1544 | 12.3M | } |
1545 | | /* Don't overwrite even if gs_alloc_debug is set. */ |
1546 | 12.3M | } |
1547 | 242M | if (rounded_size >= sizeof(obj_header_t *)) { |
1548 | | /* |
1549 | | * Put the object on a freelist, unless it belongs to |
1550 | | * an older save level, in which case we mustn't |
1551 | | * overwrite it. |
1552 | | */ |
1553 | 242M | imem->cfreed.memory = imem; |
1554 | 242M | if (clump_locate(ptr, &imem->cfreed)) { |
1555 | 242M | obj_header_t **pfl; |
1556 | | |
1557 | 242M | if (size > max_freelist_size) { |
1558 | 1.65M | pfl = &imem->freelists[LARGE_FREELIST_INDEX]; |
1559 | 1.65M | if (rounded_size > imem->largest_free_size) |
1560 | 210k | imem->largest_free_size = rounded_size; |
1561 | 240M | } else { |
1562 | 240M | pfl = &imem->freelists[(size + obj_align_mask) >> |
1563 | 240M | log2_obj_align_mod]; |
1564 | 240M | } |
1565 | | /* keep track of highest object on a freelist */ |
1566 | | /* If we're dealing with a block in the currently open clump |
1567 | | (in imem->cc) update that, otherwise, update the clump in |
1568 | | the clump list (in imem->cfreed.cp) |
1569 | | */ |
1570 | 242M | if (imem->cc && imem->cfreed.cp->chead == imem->cc->chead) { |
1571 | 2.08M | if ((byte *)pp >= imem->cc->int_freed_top) { |
1572 | 1.20M | imem->cc->int_freed_top = (byte *)ptr + rounded_size; |
1573 | 1.20M | } |
1574 | 2.08M | } |
1575 | 240M | else { |
1576 | 240M | if ((byte *)pp >= imem->cfreed.cp->int_freed_top) { |
1577 | 389k | imem->cfreed.cp->int_freed_top = (byte *)ptr + rounded_size; |
1578 | 389k | } |
1579 | 240M | } |
1580 | 242M | pp->o_type = &st_free; /* don't confuse GC */ |
1581 | 242M | o_set_unmarked(pp); |
1582 | 242M | gs_alloc_fill(ptr, gs_alloc_fill_free, size); |
1583 | 242M | *(obj_header_t **) ptr = *pfl; |
1584 | 242M | *pfl = (obj_header_t *) ptr; |
1585 | 242M | alloc_trace((size > max_freelist_size ? ":-oF" : ":-of"), |
1586 | 242M | imem, cname, pstype, size, ptr); |
1587 | 242M | return; |
1588 | 242M | } |
1589 | | /* Don't overwrite even if gs_alloc_debug is set. */ |
1590 | 242M | } else { |
1591 | 0 | pp->o_type = &st_free; /* don't confuse GC */ |
1592 | 0 | gs_alloc_fill(ptr, gs_alloc_fill_free, size); |
1593 | 0 | } |
1594 | 242M | alloc_trace(":-o#", imem, cname, pstype, size, ptr); |
1595 | 3.66k | imem->lost.objects += obj_size_round(size); |
1596 | 3.66k | } |
1597 | | static byte * |
1598 | | i_alloc_string(gs_memory_t * mem, size_t nbytes, client_name_t cname) |
1599 | 82.7M | { |
1600 | 82.7M | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1601 | 82.7M | byte *str; |
1602 | 82.7M | clump_splay_walker sw; |
1603 | | |
1604 | | /* |
1605 | | * Cycle through the clumps at the current save level, starting |
1606 | | * with the currently open one. |
1607 | | */ |
1608 | 82.7M | clump_t *cp = clump_splay_walk_init_mid(&sw, imem->cc); |
1609 | | |
1610 | | #ifdef MEMENTO |
1611 | | if (Memento_failThisEvent()) |
1612 | | return NULL; |
1613 | | #endif |
1614 | 82.7M | if (cp == 0) { |
1615 | | /* Open an arbitrary clump. */ |
1616 | 4 | imem->cc = clump_splay_walk_init(&sw, imem); |
1617 | 4 | alloc_open_clump(imem); |
1618 | 4 | } |
1619 | 91.1M | top: |
1620 | 91.1M | if (imem->cc && !imem->cc->c_alone && imem->cc->ctop - imem->cc->cbot > nbytes) { |
1621 | 82.7M | if_debug4m('A', mem, "[a%d:+> ]%s(%"PRIuSIZE") = "PRI_INTPTR"\n", |
1622 | 82.7M | alloc_trace_space(imem), client_name_string(cname), nbytes, |
1623 | 82.7M | (intptr_t)(imem->cc->ctop - nbytes)); |
1624 | 82.7M | str = imem->cc->ctop -= nbytes; |
1625 | 82.7M | gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes); |
1626 | 82.7M | return str; |
1627 | 82.7M | } |
1628 | | /* Try the next clump. */ |
1629 | 8.43M | cp = clump_splay_walk_fwd(&sw); |
1630 | | |
1631 | 8.43M | if (cp != NULL) |
1632 | 8.33M | { |
1633 | 8.33M | alloc_close_clump(imem); |
1634 | 8.33M | imem->cc = cp; |
1635 | 8.33M | alloc_open_clump(imem); |
1636 | 8.33M | goto top; |
1637 | 8.33M | } |
1638 | 107k | if (nbytes > string_space_quanta(SIZE_MAX - sizeof(clump_head_t)) * |
1639 | 107k | string_data_quantum |
1640 | 107k | ) { /* Can't represent the size in a uint! */ |
1641 | 0 | return 0; |
1642 | 0 | } |
1643 | 107k | if (nbytes >= imem->large_size) { /* Give it a clump all its own. */ |
1644 | 21.1k | return i_alloc_string_immovable(mem, nbytes, cname); |
1645 | 86.7k | } else { /* Add another clump. */ |
1646 | 86.7k | cp = alloc_acquire_clump(imem, (ulong) imem->clump_size, true, "clump"); |
1647 | | |
1648 | 86.7k | if (cp == 0) |
1649 | 0 | return 0; |
1650 | 86.7k | alloc_close_clump(imem); |
1651 | 86.7k | imem->cc = clump_splay_walk_init_mid(&sw, cp); |
1652 | 86.7k | gs_alloc_fill(imem->cc->cbase, gs_alloc_fill_free, |
1653 | 86.7k | imem->cc->climit - imem->cc->cbase); |
1654 | 86.7k | goto top; |
1655 | 86.7k | } |
1656 | 107k | } |
1657 | | static byte * |
1658 | | i_alloc_string_immovable(gs_memory_t * mem, size_t nbytes, client_name_t cname) |
1659 | 21.1k | { |
1660 | 21.1k | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1661 | 21.1k | byte *str; |
1662 | 21.1k | size_t asize; |
1663 | 21.1k | clump_t *cp; |
1664 | | |
1665 | | #ifdef MEMENTO |
1666 | | if (Memento_failThisEvent()) |
1667 | | return NULL; |
1668 | | #endif |
1669 | | /* Give it a clump all its own. */ |
1670 | 21.1k | asize = string_clump_space(nbytes) + sizeof(clump_head_t); |
1671 | 21.1k | cp = alloc_acquire_clump(imem, asize, true, "large string clump"); |
1672 | | |
1673 | 21.1k | if (cp == 0) |
1674 | 0 | return 0; |
1675 | 21.1k | cp->c_alone = true; |
1676 | | |
1677 | 21.1k | str = cp->ctop = cp->climit - nbytes; |
1678 | 21.1k | if_debug4m('a', mem, "[a%d|+>L]%s(%"PRIuSIZE") = "PRI_INTPTR"\n", |
1679 | 21.1k | alloc_trace_space(imem), client_name_string(cname), nbytes, |
1680 | 21.1k | (intptr_t)str); |
1681 | 21.1k | gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes); |
1682 | | |
1683 | 21.1k | return Memento_label(str, cname); |
1684 | 21.1k | } |
1685 | | |
1686 | | static byte * |
1687 | | i_resize_string(gs_memory_t * mem, byte * data, size_t old_num, size_t new_num, |
1688 | | client_name_t cname) |
1689 | 922k | { |
1690 | 922k | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1691 | 922k | byte *ptr; |
1692 | | |
1693 | 922k | if (old_num == new_num) /* same size returns the same string */ |
1694 | 421k | return data; |
1695 | | |
1696 | 500k | if ( imem->cc && data == imem->cc->ctop && /* bottom-most string */ |
1697 | 500k | (new_num < old_num || |
1698 | 500k | imem->cc->ctop - imem->cc->cbot > new_num - old_num) |
1699 | 500k | ) { /* Resize in place. */ |
1700 | 500k | ptr = data + old_num - new_num; |
1701 | 500k | if_debug6m('A', mem, "[a%d:%c> ]%s(%"PRIuSIZE"->%"PRIuSIZE") "PRI_INTPTR"\n", |
1702 | 500k | alloc_trace_space(imem), |
1703 | 500k | (new_num > old_num ? '>' : '<'), |
1704 | 500k | client_name_string(cname), old_num, new_num, |
1705 | 500k | (intptr_t)ptr); |
1706 | 500k | imem->cc->ctop = ptr; |
1707 | 500k | memmove(ptr, data, min(old_num, new_num)); |
1708 | | #ifdef DEBUG |
1709 | | if (new_num > old_num) |
1710 | | gs_alloc_fill(ptr + old_num, gs_alloc_fill_alloc, |
1711 | | new_num - old_num); |
1712 | | else |
1713 | | gs_alloc_fill(data, gs_alloc_fill_free, old_num - new_num); |
1714 | | #endif |
1715 | 500k | } else |
1716 | 28 | if (new_num < old_num) { |
1717 | | /* trim the string and create a free space hole */ |
1718 | 2 | ptr = data; |
1719 | 2 | imem->lost.strings += old_num - new_num; |
1720 | 2 | gs_alloc_fill(data + new_num, gs_alloc_fill_free, |
1721 | 2 | old_num - new_num); |
1722 | 2 | if_debug5m('A', mem, "[a%d:<> ]%s(%"PRIuSIZE"->%"PRIuSIZE") "PRI_INTPTR"\n", |
1723 | 2 | alloc_trace_space(imem), client_name_string(cname), |
1724 | 2 | old_num, new_num, (intptr_t)ptr); |
1725 | 26 | } else { /* Punt. */ |
1726 | 26 | ptr = gs_alloc_string(mem, new_num, cname); |
1727 | 26 | if (ptr == 0) |
1728 | 0 | return 0; |
1729 | 26 | memcpy(ptr, data, min(old_num, new_num)); |
1730 | 26 | gs_free_string(mem, data, old_num, cname); |
1731 | 26 | } |
1732 | | |
1733 | 500k | return ptr; |
1734 | 500k | } |
1735 | | |
1736 | | static void |
1737 | | i_free_string(gs_memory_t * mem, byte * data, size_t nbytes, |
1738 | | client_name_t cname) |
1739 | 1.28M | { |
1740 | 1.28M | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1741 | | |
1742 | 1.28M | if (data) { |
1743 | 1.28M | if (imem->cc && data == imem->cc->ctop) { |
1744 | 1.22M | if_debug4m('A', mem, "[a%d:-> ]%s(%"PRIuSIZE") "PRI_INTPTR"\n", |
1745 | 1.22M | alloc_trace_space(imem), client_name_string(cname), nbytes, |
1746 | 1.22M | (intptr_t)data); |
1747 | 1.22M | imem->cc->ctop += nbytes; |
1748 | 1.22M | } else { |
1749 | 60.7k | if_debug4m('A', mem, "[a%d:->#]%s(%"PRIuSIZE") "PRI_INTPTR"\n", |
1750 | 60.7k | alloc_trace_space(imem), client_name_string(cname), nbytes, |
1751 | 60.7k | (intptr_t)data); |
1752 | 60.7k | imem->lost.strings += nbytes; |
1753 | 60.7k | } |
1754 | 1.28M | gs_alloc_fill(data, gs_alloc_fill_free, nbytes); |
1755 | 1.28M | } |
1756 | 1.28M | } |
1757 | | |
1758 | | static gs_memory_t * |
1759 | | i_stable(gs_memory_t *mem) |
1760 | 533M | { |
1761 | 533M | return mem->stable_memory; |
1762 | 533M | } |
1763 | | |
1764 | | static void |
1765 | | i_status(gs_memory_t * mem, gs_memory_status_t * pstat) |
1766 | 217k | { |
1767 | 217k | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
1768 | 217k | size_t unused = imem->lost.refs + imem->lost.strings; |
1769 | 217k | size_t inner = 0; |
1770 | 217k | clump_splay_walker sw; |
1771 | 217k | clump_t *cp; |
1772 | | |
1773 | 217k | alloc_close_clump(imem); |
1774 | | /* Add up unallocated space within each clump. */ |
1775 | | /* Also keep track of space allocated to inner clumps, */ |
1776 | | /* which are included in previous_status.allocated. */ |
1777 | 6.10M | for (cp = clump_splay_walk_init(&sw, imem); cp != NULL; cp = clump_splay_walk_fwd(&sw)) |
1778 | 5.88M | { |
1779 | 5.88M | unused += cp->ctop - cp->cbot; |
1780 | 5.88M | if (cp->outer) |
1781 | 376k | inner += cp->cend - (byte *) cp->chead; |
1782 | 5.88M | } |
1783 | 217k | unused += compute_free_objects(imem); |
1784 | 217k | pstat->used = imem->allocated + inner - unused + |
1785 | 217k | imem->previous_status.used; |
1786 | 217k | pstat->allocated = imem->allocated + |
1787 | 217k | imem->previous_status.allocated; |
1788 | 217k | pstat->max_used = 0; /* unknown for this allocator */ |
1789 | 217k | pstat->limit = imem->limit; |
1790 | 217k | pstat->is_thread_safe = false; /* this allocator is not thread safe */ |
1791 | 217k | } |
1792 | | |
1793 | | static void |
1794 | | i_enable_free(gs_memory_t * mem, bool enable) |
1795 | 318k | { |
1796 | 318k | if (enable) |
1797 | 159k | mem->procs.free_object = i_free_object, |
1798 | 159k | mem->procs.free_string = i_free_string; |
1799 | 159k | else |
1800 | 159k | mem->procs.free_object = gs_ignore_free_object, |
1801 | 159k | mem->procs.free_string = gs_ignore_free_string; |
1802 | 318k | } |
1803 | | |
1804 | | static void i_set_object_type(gs_memory_t *mem, void *ptr, gs_memory_type_ptr_t type) |
1805 | 0 | { |
1806 | 0 | obj_header_t *pp; |
1807 | |
|
1808 | 0 | if (ptr == 0) |
1809 | 0 | return; |
1810 | 0 | pp = (obj_header_t *) ptr - 1; |
1811 | 0 | pp->o_type = type; |
1812 | 0 | } |
1813 | | |
1814 | | static void i_defer_frees(gs_memory_t *mem, int defer) |
1815 | 0 | { |
1816 | 0 | } |
1817 | | |
1818 | | /* ------ Internal procedures ------ */ |
1819 | | |
1820 | | /* Compute the amount of free object space by scanning free lists. */ |
1821 | | static size_t |
1822 | | compute_free_objects(gs_ref_memory_t * mem) |
1823 | 217k | { |
1824 | 217k | size_t unused = mem->lost.objects; |
1825 | 217k | int i; |
1826 | | |
1827 | | /* Add up space on free lists. */ |
1828 | 22.4M | for (i = 0; i < num_freelists; i++) { |
1829 | 22.2M | const obj_header_t *pfree; |
1830 | | |
1831 | 22.6M | for (pfree = mem->freelists[i]; pfree != 0; |
1832 | 22.2M | pfree = *(const obj_header_t * const *)pfree |
1833 | 22.2M | ) |
1834 | 448k | unused += obj_align_round(pfree[-1].o_size); |
1835 | 22.2M | } |
1836 | 217k | return unused; |
1837 | 217k | } |
1838 | | |
1839 | | /* Allocate an object from the large-block freelist. */ |
1840 | | static obj_header_t * /* rets obj if allocated, else 0 */ |
1841 | | large_freelist_alloc(gs_ref_memory_t *mem, obj_size_t size) |
1842 | 8.15M | { |
1843 | | /* Scan large object freelist. We'll grab an object up to 1/8 bigger */ |
1844 | | /* right away, else use best fit of entire scan. */ |
1845 | 8.15M | obj_size_t aligned_size = obj_align_round(size); |
1846 | 8.15M | size_t aligned_min_size = aligned_size + sizeof(obj_header_t); |
1847 | 8.15M | size_t aligned_max_size = |
1848 | 8.15M | aligned_min_size + obj_align_round(aligned_min_size / 8); |
1849 | 8.15M | obj_header_t *best_fit = 0; |
1850 | 8.15M | obj_header_t **best_fit_prev = NULL; /* Initialize against indeterminism. */ |
1851 | 8.15M | obj_size_t best_fit_size = (obj_size_t)SIZE_MAX; |
1852 | 8.15M | obj_header_t *pfree; |
1853 | 8.15M | obj_header_t **ppfprev = &mem->freelists[LARGE_FREELIST_INDEX]; |
1854 | 8.15M | size_t largest_size = 0; |
1855 | | |
1856 | 8.15M | if (aligned_size > mem->largest_free_size) |
1857 | 6.48M | return 0; /* definitely no block large enough */ |
1858 | | |
1859 | 7.05M | while ((pfree = *ppfprev) != 0) { |
1860 | 6.85M | obj_size_t free_size = obj_align_round(pfree[-1].o_size); |
1861 | | |
1862 | 6.85M | if (free_size == aligned_size || |
1863 | 6.85M | (free_size >= aligned_min_size && free_size < best_fit_size) |
1864 | 6.85M | ) { |
1865 | 1.57M | best_fit = pfree; |
1866 | 1.57M | best_fit_prev = ppfprev; |
1867 | 1.57M | best_fit_size = pfree[-1].o_size; |
1868 | 1.57M | if (best_fit_size <= aligned_max_size) |
1869 | 1.46M | break; /* good enough fit to spare scan of entire list */ |
1870 | 1.57M | } |
1871 | 5.38M | ppfprev = (obj_header_t **) pfree; |
1872 | 5.38M | if (free_size > largest_size) |
1873 | 412k | largest_size = free_size; |
1874 | 5.38M | } |
1875 | 1.67M | if (best_fit == 0) { |
1876 | | /* |
1877 | | * No single free clump is large enough, but since we scanned the |
1878 | | * entire list, we now have an accurate updated value for |
1879 | | * largest_free_size. |
1880 | | */ |
1881 | 154k | mem->largest_free_size = largest_size; |
1882 | 154k | return 0; |
1883 | 154k | } |
1884 | | |
1885 | | /* Remove from freelist & return excess memory to free */ |
1886 | 1.51M | *best_fit_prev = *(obj_header_t **)best_fit; |
1887 | 1.51M | trim_obj(mem, best_fit, aligned_size, (clump_t *)0); |
1888 | | |
1889 | | /* Pre-init block header; o_alone & o_type are already init'd */ |
1890 | 1.51M | best_fit[-1].o_size = size; |
1891 | | |
1892 | 1.51M | return best_fit; |
1893 | 1.67M | } |
1894 | | |
1895 | | /* Allocate an object. This handles all but the fastest, simplest case. */ |
1896 | | static obj_header_t * |
1897 | | alloc_obj(gs_ref_memory_t *mem, obj_size_t lsize, gs_memory_type_ptr_t pstype, |
1898 | | alloc_flags_t flags, client_name_t cname) |
1899 | 28.4M | { |
1900 | 28.4M | obj_header_t *ptr; |
1901 | | |
1902 | 28.4M | if (lsize >= mem->large_size || (flags & ALLOC_IMMOVABLE)) { |
1903 | | /* |
1904 | | * Give the object a clump all its own. Note that this case does |
1905 | | * not occur if is_controlled is true. |
1906 | | */ |
1907 | 13.1M | obj_size_t asize = |
1908 | 13.1M | ((lsize + obj_align_mask) & -obj_align_mod) + |
1909 | 13.1M | sizeof(obj_header_t); |
1910 | 13.1M | clump_t *cp = |
1911 | 13.1M | alloc_acquire_clump(mem, asize + sizeof(clump_head_t), false, |
1912 | 13.1M | "large object clump"); |
1913 | | |
1914 | 13.1M | if (asize < lsize) |
1915 | 0 | return 0; |
1916 | 13.1M | if (cp == 0) |
1917 | 11 | return 0; |
1918 | 13.1M | cp->c_alone = true; |
1919 | 13.1M | ptr = (obj_header_t *) cp->cbot; |
1920 | 13.1M | cp->cbot += asize; |
1921 | 13.1M | ptr->o_pad = 0; |
1922 | 13.1M | ptr->o_alone = 1; |
1923 | 13.1M | ptr->o_size = (obj_size_t)lsize; |
1924 | 15.2M | } else { |
1925 | | /* |
1926 | | * Cycle through the clumps at the current save level, starting |
1927 | | * with the currently open one. |
1928 | | */ |
1929 | 15.2M | clump_splay_walker sw; |
1930 | 15.2M | clump_t *cp = clump_splay_walk_init_mid(&sw, mem->cc); |
1931 | 15.2M | obj_size_t asize = obj_size_round(lsize); |
1932 | 15.2M | bool allocate_success = false; |
1933 | | |
1934 | 15.2M | if (lsize > max_freelist_size && (flags & ALLOC_DIRECT)) { |
1935 | | /* We haven't checked the large block freelist yet. */ |
1936 | 2.69M | if ((ptr = large_freelist_alloc(mem, lsize)) != 0) { |
1937 | 194k | --ptr; /* must point to header */ |
1938 | 194k | goto done; |
1939 | 194k | } |
1940 | 2.69M | } |
1941 | | |
1942 | 15.0M | if (cp == 0) { |
1943 | | /* Open an arbitrary clump. */ |
1944 | 44.1k | mem->cc = clump_splay_walk_init(&sw, mem); |
1945 | 44.1k | alloc_open_clump(mem); |
1946 | 44.1k | } |
1947 | | |
1948 | 15.0M | #define CAN_ALLOC_AT_END(cp)\ |
1949 | 88.4M | ((cp) && !((cp)->c_alone) && (cp)->ctop - (byte *) (ptr = (obj_header_t *) (cp)->cbot)\ |
1950 | 72.8M | > asize + sizeof(obj_header_t)) |
1951 | | |
1952 | 88.4M | do { |
1953 | 88.4M | if (CAN_ALLOC_AT_END(mem->cc)) { |
1954 | 14.1M | allocate_success = true; |
1955 | 14.1M | break; |
1956 | 74.3M | } else if (mem->is_controlled) { |
1957 | | /* Try consolidating free space. */ |
1958 | 0 | gs_consolidate_free((gs_memory_t *)mem); |
1959 | 0 | if (CAN_ALLOC_AT_END(mem->cc)) { |
1960 | 0 | allocate_success = true; |
1961 | 0 | break; |
1962 | 0 | } |
1963 | 0 | } |
1964 | | /* No luck, go on to the next clump. */ |
1965 | 74.3M | cp = clump_splay_walk_fwd(&sw); |
1966 | 74.3M | if (cp == NULL) |
1967 | 927k | break; |
1968 | | |
1969 | 73.4M | alloc_close_clump(mem); |
1970 | 73.4M | mem->cc = cp; |
1971 | 73.4M | alloc_open_clump(mem); |
1972 | 73.4M | } |
1973 | 73.4M | while (1); |
1974 | | |
1975 | | #ifdef CONSOLIDATE_BEFORE_ADDING_CLUMP |
1976 | | if (!allocate_success) { |
1977 | | /* |
1978 | | * Try consolidating free space before giving up. |
1979 | | * It's not clear this is a good idea, since it requires quite |
1980 | | * a lot of computation and doesn't seem to improve things much. |
1981 | | */ |
1982 | | if (!mem->is_controlled) { /* already did this if controlled */ |
1983 | | clump_t *cp; |
1984 | | |
1985 | | alloc_close_clump(mem); |
1986 | | for (cp = clump_splay_walk_init_mid(&sw, cp_orig); cp != NULL; cp = clump_splay_walk_fwd(&sw)) |
1987 | | { |
1988 | | consolidate_clump_free(cp, mem); |
1989 | | if (CAN_ALLOC_AT_END(cp)) { |
1990 | | mem->cc = cp; |
1991 | | alloc_open_clump(mem); |
1992 | | allocate_success = true; |
1993 | | break; |
1994 | | } |
1995 | | } |
1996 | | } |
1997 | | } |
1998 | | #endif |
1999 | | |
2000 | 0 | #undef CAN_ALLOC_AT_END |
2001 | | |
2002 | 15.0M | if (!allocate_success) { |
2003 | | /* Add another clump. */ |
2004 | 927k | clump_t *cp = |
2005 | 927k | alloc_add_clump(mem, mem->clump_size, "clump"); |
2006 | | |
2007 | 927k | if (cp) { |
2008 | | /* mem->cc == cp */ |
2009 | 927k | ptr = (obj_header_t *)cp->cbot; |
2010 | 927k | allocate_success = true; |
2011 | 927k | } |
2012 | 927k | } |
2013 | | |
2014 | | /* |
2015 | | * If no success, try to scavenge from low free memory. This is |
2016 | | * only enabled for controlled memory (currently only async |
2017 | | * renderer) because it's too much work to prevent it from |
2018 | | * examining outer save levels in the general case. |
2019 | | */ |
2020 | 15.0M | if (allocate_success) |
2021 | 15.0M | mem->cc->cbot = (byte *) ptr + asize; |
2022 | 0 | else if (!mem->is_controlled || |
2023 | 0 | (ptr = scavenge_low_free(mem, lsize)) == 0) |
2024 | 0 | return 0; /* allocation failed */ |
2025 | 15.0M | ptr->o_pad = 0; |
2026 | 15.0M | ptr->o_alone = 0; |
2027 | 15.0M | ptr->o_size = lsize; |
2028 | 15.0M | } |
2029 | 28.4M | done: |
2030 | 28.4M | ptr->o_type = pstype; |
2031 | | # if IGC_PTR_STABILITY_CHECK |
2032 | | ptr->d.o.space_id = mem->space_id; |
2033 | | # endif |
2034 | 28.4M | ptr++; |
2035 | 28.4M | gs_alloc_fill(ptr, gs_alloc_fill_alloc, lsize); |
2036 | 28.4M | return Memento_label(ptr, cname); |
2037 | 28.4M | } |
2038 | | |
2039 | | /* |
2040 | | * Consolidate free objects contiguous to free space at cbot onto the cbot |
2041 | | * area. Also keep track of end of highest internal free object |
2042 | | * (int_freed_top). |
2043 | | */ |
2044 | | static void |
2045 | | consolidate_clump_free(clump_t *cp, gs_ref_memory_t *mem) |
2046 | 1.13M | { |
2047 | 1.13M | obj_header_t *begin_free = 0; |
2048 | | |
2049 | 1.13M | cp->int_freed_top = cp->cbase; /* below all objects in clump */ |
2050 | 31.3M | SCAN_CLUMP_OBJECTS(cp) |
2051 | 31.3M | DO_ALL |
2052 | 31.3M | if (pre->o_type == &st_free) { |
2053 | 1.49M | if (begin_free == 0) |
2054 | 877k | begin_free = pre; |
2055 | 29.8M | } else { |
2056 | 29.8M | if (begin_free) |
2057 | 46.5k | cp->int_freed_top = (byte *)pre; /* first byte following internal free */ |
2058 | 29.8M | begin_free = 0; |
2059 | 29.8M | } |
2060 | 31.3M | END_OBJECTS_SCAN |
2061 | 1.13M | if (begin_free) { |
2062 | | /* We found free objects at the top of the object area. */ |
2063 | | /* Remove the free objects from the freelists. */ |
2064 | 830k | remove_range_from_freelist(mem, begin_free, cp->cbot); |
2065 | 830k | if_debug4m('a', (const gs_memory_t *)mem, |
2066 | 830k | "[a]resetting clump "PRI_INTPTR" cbot from "PRI_INTPTR" to "PRI_INTPTR" (%lu free)\n", |
2067 | 830k | (intptr_t)cp, (intptr_t)cp->cbot, (intptr_t)begin_free, |
2068 | 830k | (intptr_t)((byte *)cp->cbot - (byte *)begin_free)); |
2069 | 830k | cp->cbot = (byte *) begin_free; |
2070 | 830k | } |
2071 | 1.13M | } |
2072 | | |
2073 | | static splay_app_result_t |
2074 | | consolidate(clump_t *cp, void *arg) |
2075 | 0 | { |
2076 | 0 | gs_ref_memory_t *mem = (gs_ref_memory_t *)arg; |
2077 | |
|
2078 | 0 | consolidate_clump_free(cp, mem); |
2079 | 0 | if (cp->cbot == cp->cbase && cp->ctop == cp->climit) { |
2080 | | /* The entire clump is free. */ |
2081 | 0 | if (!mem->is_controlled) { |
2082 | 0 | alloc_free_clump(cp, mem); |
2083 | 0 | if (mem->cc == cp) |
2084 | 0 | mem->cc = NULL; |
2085 | 0 | } |
2086 | 0 | } |
2087 | |
|
2088 | 0 | return SPLAY_APP_CONTINUE; |
2089 | 0 | } |
2090 | | |
2091 | | /* Consolidate free objects. */ |
2092 | | void |
2093 | | ialloc_consolidate_free(gs_ref_memory_t *mem) |
2094 | 0 | { |
2095 | 0 | alloc_close_clump(mem); |
2096 | | |
2097 | | /* We used to visit clumps in reverse order to encourage LIFO behavior, |
2098 | | * but with binary trees this is not possible (unless you want to |
2099 | | * either change the tree during the process, recurse, or otherwise |
2100 | | * hold the state). */ |
2101 | 0 | clump_splay_app(mem->root, mem, consolidate, mem); |
2102 | | |
2103 | | /* NOTE: Previously, if we freed the current clump, we'd move to whatever the |
2104 | | * bigger of it's children was. We now just move to the root. */ |
2105 | 0 | if (mem->cc == NULL) |
2106 | 0 | mem->cc = mem->root; |
2107 | |
|
2108 | 0 | alloc_open_clump(mem); |
2109 | 0 | } |
2110 | | static void |
2111 | | i_consolidate_free(gs_memory_t *mem) |
2112 | 0 | { |
2113 | 0 | ialloc_consolidate_free((gs_ref_memory_t *)mem); |
2114 | 0 | } |
2115 | | |
2116 | | typedef struct |
2117 | | { |
2118 | | uint need_free; |
2119 | | obj_header_t *found_pre; |
2120 | | gs_ref_memory_t *mem; |
2121 | | obj_size_t request_size; |
2122 | | } scavenge_data; |
2123 | | |
2124 | | static splay_app_result_t |
2125 | | scavenge(clump_t *cp, void *arg) |
2126 | 0 | { |
2127 | 0 | scavenge_data *sd = (scavenge_data *)arg; |
2128 | 0 | obj_header_t *begin_free = NULL; |
2129 | 0 | obj_size_t found_free = 0; |
2130 | |
|
2131 | 0 | sd->found_pre = NULL; |
2132 | |
|
2133 | 0 | SCAN_CLUMP_OBJECTS(cp) |
2134 | 0 | DO_ALL |
2135 | 0 | if (pre->o_type == &st_free) { |
2136 | 0 | if (begin_free == 0) { |
2137 | 0 | found_free = 0; |
2138 | 0 | begin_free = pre; |
2139 | 0 | } |
2140 | 0 | found_free += pre_obj_rounded_size(pre); |
2141 | 0 | if (begin_free != 0 && found_free >= sd->need_free) |
2142 | 0 | break; |
2143 | 0 | } else |
2144 | 0 | begin_free = 0; |
2145 | 0 | END_OBJECTS_SCAN_NO_ABORT |
2146 | |
|
2147 | 0 | if (begin_free != 0 && found_free >= sd->need_free) { |
2148 | | /* Fish found pieces out of various freelists */ |
2149 | 0 | remove_range_from_freelist(sd->mem, (char*)begin_free, |
2150 | 0 | (char*)begin_free + found_free); |
2151 | | |
2152 | | /* Prepare found object */ |
2153 | 0 | sd->found_pre = begin_free; |
2154 | 0 | sd->found_pre->o_type = &st_free; /* don't confuse GC if gets lost */ |
2155 | 0 | sd->found_pre->o_size = found_free - sizeof(obj_header_t); |
2156 | | |
2157 | | /* Chop off excess tail piece & toss it back into free pool */ |
2158 | 0 | trim_obj(sd->mem, sd->found_pre + 1, sd->request_size, cp); |
2159 | 0 | return SPLAY_APP_STOP; |
2160 | 0 | } |
2161 | | |
2162 | 0 | return SPLAY_APP_CONTINUE; |
2163 | 0 | } |
2164 | | |
2165 | | /* try to free-up given amount of space from freespace below clump base */ |
2166 | | static obj_header_t * /* returns uninitialized object hdr, NULL if none found */ |
2167 | | scavenge_low_free(gs_ref_memory_t *mem, obj_size_t request_size) |
2168 | 0 | { |
2169 | | /* find 1st range of memory that can be glued back together to fill request */ |
2170 | 0 | scavenge_data sd; |
2171 | 0 | obj_size_t request_size_rounded = obj_size_round(request_size); |
2172 | |
|
2173 | 0 | sd.found_pre = 0; |
2174 | 0 | sd.need_free = request_size_rounded + sizeof(obj_header_t); /* room for GC's dummy hdr */ |
2175 | 0 | sd.mem = mem; |
2176 | 0 | sd.request_size = request_size; |
2177 | |
|
2178 | 0 | clump_splay_app(mem->root, mem, scavenge, &sd); |
2179 | 0 | return sd.found_pre; |
2180 | 0 | } |
2181 | | |
2182 | | /* Remove range of memory from a mem's freelists */ |
2183 | | static void |
2184 | | remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top) |
2185 | 830k | { |
2186 | 830k | int num_free[num_freelists]; |
2187 | 830k | int smallest = num_freelists, largest = -1; |
2188 | 830k | const obj_header_t *cur; |
2189 | 830k | obj_size_t size; |
2190 | 830k | int i; |
2191 | 830k | obj_size_t removed = 0; |
2192 | | |
2193 | | /* |
2194 | | * Scan from bottom to top, a range containing only free objects, |
2195 | | * counting the number of objects of each size. |
2196 | | */ |
2197 | | |
2198 | 2.15M | for (cur = bottom; cur != top; |
2199 | 1.31M | cur = (const obj_header_t *) |
2200 | 1.31M | ((const byte *)cur + obj_size_round(size)) |
2201 | 1.31M | ) { |
2202 | 1.31M | size = cur->o_size; |
2203 | 1.31M | i = (size > max_freelist_size ? LARGE_FREELIST_INDEX : |
2204 | 1.31M | (size + obj_align_mask) >> log2_obj_align_mod); |
2205 | 1.31M | if (i < smallest) { |
2206 | | /* |
2207 | | * 0-length free blocks aren't kept on any list, because |
2208 | | * they don't have room for a pointer. |
2209 | | */ |
2210 | 891k | if (i == 0) |
2211 | 0 | continue; |
2212 | 891k | if (smallest < num_freelists) |
2213 | 60.7k | memset(&num_free[i], 0, (smallest - i) * sizeof(int)); |
2214 | 830k | else |
2215 | 830k | num_free[i] = 0; |
2216 | 891k | smallest = i; |
2217 | 891k | } |
2218 | 1.31M | if (i > largest) { |
2219 | 877k | if (largest >= 0) |
2220 | 46.7k | memset(&num_free[largest + 1], 0, (i - largest) * sizeof(int)); |
2221 | 877k | largest = i; |
2222 | 877k | } |
2223 | 1.31M | num_free[i]++; |
2224 | 1.31M | } |
2225 | | |
2226 | | /* |
2227 | | * Remove free objects from the freelists, adjusting lost.objects by |
2228 | | * subtracting the size of the region being processed minus the amount |
2229 | | * of space reclaimed. |
2230 | | */ |
2231 | | |
2232 | 5.02M | for (i = smallest; i <= largest; i++) { |
2233 | 4.19M | int count = num_free[i]; |
2234 | 4.19M | obj_header_t *pfree; |
2235 | 4.19M | obj_header_t **ppfprev; |
2236 | | |
2237 | 4.19M | if (!count) |
2238 | 3.23M | continue; |
2239 | 960k | ppfprev = &mem->freelists[i]; |
2240 | 2.87M | for (;;) { |
2241 | 2.87M | pfree = *ppfprev; |
2242 | 2.87M | if (PTR_GE(pfree, bottom) && PTR_LT(pfree, top)) { |
2243 | | /* We're removing an object. */ |
2244 | 1.31M | *ppfprev = *(obj_header_t **) pfree; |
2245 | 1.31M | removed += obj_align_round(pfree[-1].o_size); |
2246 | 1.31M | if (!--count) |
2247 | 960k | break; |
2248 | 1.31M | } else |
2249 | 1.55M | ppfprev = (obj_header_t **) pfree; |
2250 | 2.87M | } |
2251 | 960k | } |
2252 | 830k | mem->lost.objects -= (char*)top - (char*)bottom - removed; |
2253 | 830k | } |
2254 | | |
2255 | | /* Trim a memory object down to a given size */ |
2256 | | static void |
2257 | | trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, obj_size_t size, clump_t *cp) |
2258 | | /* Obj must have rounded size == req'd size, or have enough room for */ |
2259 | | /* trailing dummy obj_header */ |
2260 | 1.51M | { |
2261 | 1.51M | obj_size_t rounded_size = obj_align_round(size); |
2262 | 1.51M | obj_header_t *pre_obj = obj - 1; |
2263 | 1.51M | obj_header_t *excess_pre = (obj_header_t*)((char*)obj + rounded_size); |
2264 | 1.51M | obj_size_t old_rounded_size = obj_align_round(pre_obj->o_size); |
2265 | 1.51M | obj_size_t excess_size = old_rounded_size - rounded_size - sizeof(obj_header_t); |
2266 | | |
2267 | | /* trim object's size to desired */ |
2268 | 1.51M | pre_obj->o_size = size; |
2269 | 1.51M | if (old_rounded_size == rounded_size) |
2270 | 1.41M | return; /* nothing more to do here */ |
2271 | | /* |
2272 | | * If the object is alone in its clump, move cbot to point to the end |
2273 | | * of the object. |
2274 | | */ |
2275 | 102k | if (pre_obj->o_alone) { |
2276 | 0 | if (!cp) { |
2277 | 0 | mem->cfreed.memory = mem; |
2278 | 0 | if (clump_locate(obj, &mem->cfreed)) { |
2279 | 0 | cp = mem->cfreed.cp; |
2280 | 0 | } |
2281 | 0 | } |
2282 | 0 | if (cp) { |
2283 | | #ifdef DEBUG |
2284 | | if (cp->cbot != (byte *)obj + old_rounded_size) { |
2285 | | lprintf3("resizing "PRI_INTPTR", old size %u, new size %u, cbot wrong!\n", |
2286 | | (intptr_t)obj, old_rounded_size, size); |
2287 | | /* gs_abort */ |
2288 | | } else |
2289 | | #endif |
2290 | 0 | { |
2291 | 0 | cp->cbot = (byte *)excess_pre; |
2292 | 0 | return; |
2293 | 0 | } |
2294 | 0 | } |
2295 | | /* |
2296 | | * Something very weird is going on. This probably shouldn't |
2297 | | * ever happen, but if it does.... |
2298 | | */ |
2299 | 0 | pre_obj->o_pad = 0; |
2300 | 0 | pre_obj->o_alone = 0; |
2301 | 0 | } |
2302 | | /* make excess into free obj */ |
2303 | 102k | excess_pre->o_type = &st_free; /* don't confuse GC */ |
2304 | 102k | excess_pre->o_size = excess_size; |
2305 | 102k | excess_pre->o_pad = 0; |
2306 | 102k | excess_pre->o_alone = 0; |
2307 | 102k | if (excess_size >= obj_align_mod) { |
2308 | | /* Put excess object on a freelist */ |
2309 | 102k | obj_header_t **pfl; |
2310 | | |
2311 | 102k | if (mem->cc && (byte *)excess_pre >= mem->cc->int_freed_top) |
2312 | 33.9k | mem->cc->int_freed_top = (byte *)excess_pre + excess_size; |
2313 | 102k | if (excess_size <= max_freelist_size) |
2314 | 56.2k | pfl = &mem->freelists[(excess_size + obj_align_mask) >> |
2315 | 56.2k | log2_obj_align_mod]; |
2316 | 45.9k | else { |
2317 | 45.9k | uint rounded_size = obj_align_round(excess_size); |
2318 | | |
2319 | 45.9k | pfl = &mem->freelists[LARGE_FREELIST_INDEX]; |
2320 | 45.9k | if (rounded_size > mem->largest_free_size) |
2321 | 0 | mem->largest_free_size = rounded_size; |
2322 | 45.9k | } |
2323 | 102k | *(obj_header_t **) (excess_pre + 1) = *pfl; |
2324 | 102k | *pfl = excess_pre + 1; |
2325 | 102k | mem->cfreed.memory = mem; |
2326 | 102k | } else { |
2327 | | /* excess piece will be "lost" memory */ |
2328 | 15 | mem->lost.objects += excess_size + sizeof(obj_header_t); |
2329 | 15 | } |
2330 | 102k | } |
2331 | | |
2332 | | /* ================ Roots ================ */ |
2333 | | |
2334 | | /* Register a root. */ |
2335 | | static int |
2336 | | i_register_root(gs_memory_t * mem, gs_gc_root_t ** rpp, gs_ptr_type_t ptype, |
2337 | | void **up, client_name_t cname) |
2338 | 320k | { |
2339 | 320k | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
2340 | 320k | gs_gc_root_t *rp; |
2341 | | |
2342 | 320k | if (rpp == NULL || *rpp == NULL) { |
2343 | 0 | rp = gs_raw_alloc_struct_immovable(imem->non_gc_memory, &st_gc_root_t, |
2344 | 0 | "i_register_root"); |
2345 | 0 | if (rp == 0) |
2346 | 0 | return_error(gs_error_VMerror); |
2347 | 0 | rp->free_on_unregister = true; |
2348 | 0 | if (rpp && *rpp == NULL) |
2349 | 0 | *rpp = rp; |
2350 | 320k | } else { |
2351 | 320k | rp = *rpp; |
2352 | 320k | rp->free_on_unregister = false; |
2353 | 320k | } |
2354 | 320k | if_debug3m('8', mem, "[8]register root(%s) "PRI_INTPTR" -> "PRI_INTPTR"\n", |
2355 | 320k | client_name_string(cname), (intptr_t)rp, (intptr_t)up); |
2356 | 320k | rp->ptype = ptype; |
2357 | 320k | rp->p = up; |
2358 | 320k | rp->next = imem->roots; |
2359 | 320k | imem->roots = rp; |
2360 | 320k | return 0; |
2361 | 320k | } |
2362 | | |
2363 | | /* Unregister a root. */ |
2364 | | static void |
2365 | | i_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname) |
2366 | 294k | { |
2367 | 294k | gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem; |
2368 | 294k | gs_gc_root_t **rpp = &imem->roots; |
2369 | | |
2370 | 294k | if_debug2m('8', mem, "[8]unregister root(%s) "PRI_INTPTR"\n", |
2371 | 294k | client_name_string(cname), (intptr_t)rp); |
2372 | 294k | while (*rpp != rp) |
2373 | 0 | rpp = &(*rpp)->next; |
2374 | 294k | *rpp = (*rpp)->next; |
2375 | 294k | if (rp->free_on_unregister) |
2376 | 0 | gs_free_object(imem->non_gc_memory, rp, "i_unregister_root"); |
2377 | 294k | } |
2378 | | |
2379 | | /* ================ clumps ================ */ |
2380 | | |
2381 | | public_st_clump(); |
2382 | | |
2383 | | /* Insert a clump in the chain. This is exported for the GC and for */ |
2384 | | /* the forget_save operation. */ |
2385 | | void |
2386 | | alloc_link_clump(clump_t * cp, gs_ref_memory_t * imem) |
2387 | 14.6M | { |
2388 | 14.6M | splay_insert(cp, imem); |
2389 | 14.6M | SANITY_CHECK(cp); |
2390 | 14.6M | } |
2391 | | |
2392 | | /* Add a clump for ordinary allocation. */ |
2393 | | static clump_t * |
2394 | | alloc_add_clump(gs_ref_memory_t * mem, size_t csize, client_name_t cname) |
2395 | 927k | { |
2396 | 927k | clump_t *cp = alloc_acquire_clump(mem, csize, true, cname); |
2397 | | |
2398 | 927k | if (cp) { |
2399 | 927k | alloc_close_clump(mem); |
2400 | 927k | mem->cc = cp; |
2401 | 927k | gs_alloc_fill(mem->cc->cbase, gs_alloc_fill_free, |
2402 | 927k | mem->cc->climit - mem->cc->cbase); |
2403 | 927k | } |
2404 | 927k | return cp; |
2405 | 927k | } |
2406 | | |
2407 | | /* Acquire a clump. If we would exceed MaxLocalVM (if relevant), */ |
2408 | | /* or if we would exceed the VMThreshold and psignal is NULL, */ |
2409 | | /* return 0; if we would exceed the VMThreshold but psignal is valid, */ |
2410 | | /* just set the signal and return successfully. */ |
2411 | | static clump_t * |
2412 | | alloc_acquire_clump(gs_ref_memory_t * mem, size_t csize, bool has_strings, |
2413 | | client_name_t cname) |
2414 | 14.2M | { |
2415 | 14.2M | gs_memory_t *parent = mem->non_gc_memory; |
2416 | 14.2M | clump_t *cp; |
2417 | 14.2M | byte *cdata; |
2418 | | |
2419 | 14.2M | #if ARCH_SIZEOF_LONG > ARCH_SIZEOF_INT |
2420 | | /* If csize is larger than max_uint, punt. */ |
2421 | 14.2M | if (csize != (uint) csize) |
2422 | 0 | return 0; |
2423 | 14.2M | #endif |
2424 | 14.2M | cp = gs_raw_alloc_struct_immovable(parent, &st_clump, cname); |
2425 | | |
2426 | | /* gc_status.signal_value is initialised to zero when the |
2427 | | * allocator is created, only the Postscript interpreter |
2428 | | * (which implement garbage collection) takes the action to set |
2429 | | * it to anything other than zero |
2430 | | */ |
2431 | 14.2M | if( mem->gc_status.signal_value != 0) { |
2432 | | /* we have a garbage collector */ |
2433 | 13.9M | if (mem->allocated >= mem->limit) { |
2434 | 2.26k | mem->gc_status.requested += csize; |
2435 | 2.26k | if (mem->limit >= mem->gc_status.max_vm) { |
2436 | 0 | gs_free_object(parent, cp, cname); |
2437 | 0 | return 0; |
2438 | 0 | } |
2439 | 2.26k | if_debug4m('0', (const gs_memory_t *)mem, |
2440 | 2.26k | "[0]signaling space=%d, allocated=%"PRIdSIZE", limit=%"PRIdSIZE", requested=%"PRIdSIZE"\n", |
2441 | 2.26k | mem->space, mem->allocated, |
2442 | 2.26k | mem->limit, mem->gc_status.requested); |
2443 | 2.26k | mem->gs_lib_ctx->gcsignal = mem->gc_status.signal_value; |
2444 | 2.26k | } |
2445 | 13.9M | } |
2446 | 14.2M | cdata = gs_alloc_bytes_immovable(parent, csize, cname); |
2447 | 14.2M | if (cp == 0 || cdata == 0) { |
2448 | 11 | gs_free_object(parent, cdata, cname); |
2449 | 11 | gs_free_object(parent, cp, cname); |
2450 | 11 | mem->gc_status.requested = csize; |
2451 | 11 | return 0; |
2452 | 11 | } |
2453 | 14.2M | alloc_init_clump(cp, cdata, cdata + csize, has_strings, (clump_t *) 0); |
2454 | 14.2M | alloc_link_clump(cp, mem); |
2455 | 14.2M | mem->allocated += st_clump.ssize + csize; |
2456 | 14.2M | SANITY_CHECK(cp); |
2457 | 14.2M | return cp; |
2458 | 14.2M | } |
2459 | | |
2460 | | /* Initialize the pointers in a clump. This is exported for save/restore. */ |
2461 | | /* The bottom pointer must be aligned, but the top pointer need not */ |
2462 | | /* be aligned. */ |
2463 | | void |
2464 | | alloc_init_clump(clump_t * cp, byte * bot, byte * top, bool has_strings, |
2465 | | clump_t * outer) |
2466 | 14.6M | { |
2467 | 14.6M | byte *cdata = bot; |
2468 | | |
2469 | 14.6M | if (outer != 0) |
2470 | 390k | outer->inner_count++; |
2471 | 14.6M | cp->chead = (clump_head_t *) cdata; |
2472 | 14.6M | cdata += sizeof(clump_head_t); |
2473 | 14.6M | cp->cbot = cp->cbase = cp->int_freed_top = cdata; |
2474 | 14.6M | cp->cend = top; |
2475 | 14.6M | cp->rcur = 0; |
2476 | 14.6M | cp->rtop = 0; |
2477 | 14.6M | cp->outer = outer; |
2478 | 14.6M | cp->inner_count = 0; |
2479 | 14.6M | cp->has_refs = false; |
2480 | 14.6M | cp->sbase = cdata; |
2481 | 14.6M | cp->c_alone = false; /* should be set correctly by caller */ |
2482 | 14.6M | if (has_strings && top - cdata >= string_space_quantum + sizeof(long) - 1) { |
2483 | | /* |
2484 | | * We allocate a large enough string marking and reloc table |
2485 | | * to cover the entire clump. |
2486 | | */ |
2487 | 1.42M | uint nquanta = string_space_quanta(top - cdata); |
2488 | | |
2489 | 1.42M | cp->climit = cdata + nquanta * string_data_quantum; |
2490 | 1.42M | cp->smark = cp->climit; |
2491 | 1.42M | cp->smark_size = string_quanta_mark_size(nquanta); |
2492 | 1.42M | cp->sreloc = |
2493 | 1.42M | (string_reloc_offset *) (cp->smark + cp->smark_size); |
2494 | 1.42M | cp->sfree1 = (uint *) cp->sreloc; |
2495 | 13.2M | } else { |
2496 | | /* No strings, don't need the string GC tables. */ |
2497 | 13.2M | cp->climit = cp->cend; |
2498 | 13.2M | cp->sfree1 = 0; |
2499 | 13.2M | cp->smark = 0; |
2500 | 13.2M | cp->smark_size = 0; |
2501 | 13.2M | cp->sreloc = 0; |
2502 | 13.2M | } |
2503 | 14.6M | cp->ctop = cp->climit; |
2504 | 14.6M | alloc_init_free_strings(cp); |
2505 | 14.6M | } |
2506 | | |
2507 | | /* Initialize the string freelists in a clump. */ |
2508 | | void |
2509 | | alloc_init_free_strings(clump_t * cp) |
2510 | 14.6M | { |
2511 | 14.6M | if (cp->sfree1) |
2512 | 1.42M | memset(cp->sfree1, 0, STRING_FREELIST_SPACE(cp)); |
2513 | 14.6M | cp->sfree = 0; |
2514 | 14.6M | } |
2515 | | |
2516 | | /* Close up the current clump. */ |
2517 | | /* This is exported for save/restore and the GC. */ |
2518 | | void |
2519 | | alloc_close_clump(gs_ref_memory_t * mem) |
2520 | 83.1M | { |
2521 | | #ifdef DEBUG |
2522 | | if (gs_debug_c('A')) { |
2523 | | dmlprintf1((const gs_memory_t *)mem, "[a%d]", alloc_trace_space(mem)); |
2524 | | dmprintf_clump((const gs_memory_t *)mem, "closing clump", mem->cc); |
2525 | | } |
2526 | | #endif |
2527 | 83.1M | } |
2528 | | |
2529 | | /* Reopen the current clump after a GC or restore. */ |
2530 | | void |
2531 | | alloc_open_clump(gs_ref_memory_t * mem) |
2532 | 81.9M | { |
2533 | | #ifdef DEBUG |
2534 | | if (gs_debug_c('A')) { |
2535 | | dmlprintf1((const gs_memory_t *)mem, "[a%d]", alloc_trace_space(mem)); |
2536 | | dmprintf_clump((const gs_memory_t *)mem, "opening clump", mem->cc); |
2537 | | } |
2538 | | #endif |
2539 | 81.9M | } |
2540 | | |
2541 | | #ifdef DEBUG |
2542 | | static splay_app_result_t |
2543 | | check_in_clump(clump_t *cp, void *arg) |
2544 | | { |
2545 | | clump_t **cpp = (clump_t **)arg; |
2546 | | |
2547 | | if (*cpp != cp) |
2548 | | return SPLAY_APP_CONTINUE; |
2549 | | *cpp = NULL; |
2550 | | |
2551 | | return SPLAY_APP_STOP; |
2552 | | } |
2553 | | #endif |
2554 | | |
2555 | | /* Remove a clump from the chain. This is exported for the GC. */ |
2556 | | void |
2557 | | alloc_unlink_clump(clump_t * cp, gs_ref_memory_t * mem) |
2558 | 14.6M | { |
2559 | 14.6M | SANITY_CHECK_MID(cp); |
2560 | | #ifdef DEBUG |
2561 | | if (gs_alloc_debug) { /* Check to make sure this clump belongs to this allocator. */ |
2562 | | clump_t *found = cp; |
2563 | | clump_splay_app(mem->root, mem, check_in_clump, &found); |
2564 | | |
2565 | | if (found != NULL) { |
2566 | | mlprintf2((const gs_memory_t *)mem, "unlink_clump "PRI_INTPTR" not owned by memory "PRI_INTPTR"!\n", |
2567 | | (intptr_t)cp, (intptr_t)mem); |
2568 | | return; /*gs_abort(); */ |
2569 | | } |
2570 | | } |
2571 | | #endif |
2572 | 14.6M | (void)clump_splay_remove(cp, mem); |
2573 | 14.6M | if (mem->cc == cp) { |
2574 | 70.4k | mem->cc = NULL; |
2575 | 70.4k | } |
2576 | 14.6M | } |
2577 | | |
2578 | | /* |
2579 | | * Free a clump. This is exported for the GC. Since we eventually use |
2580 | | * this to free the clump containing the allocator itself, we must be |
2581 | | * careful not to reference anything in the allocator after freeing the |
2582 | | * clump data. |
2583 | | */ |
2584 | | void |
2585 | | alloc_free_clump(clump_t * cp, gs_ref_memory_t * mem) |
2586 | 14.6M | { |
2587 | 14.6M | gs_memory_t *parent = mem->non_gc_memory; |
2588 | 14.6M | byte *cdata = (byte *)cp->chead; |
2589 | 14.6M | ulong csize = (byte *)cp->cend - cdata; |
2590 | | |
2591 | 14.6M | alloc_unlink_clump(cp, mem); |
2592 | 14.6M | mem->allocated -= st_clump.ssize; |
2593 | 14.6M | if (mem->cfreed.cp == cp) |
2594 | 49.5k | mem->cfreed.cp = 0; |
2595 | 14.6M | if (cp->outer == 0) { |
2596 | 14.2M | mem->allocated -= csize; |
2597 | 14.2M | gs_free_object(parent, cdata, "alloc_free_clump(data)"); |
2598 | 14.2M | } else { |
2599 | 390k | cp->outer->inner_count--; |
2600 | 390k | gs_alloc_fill(cdata, gs_alloc_fill_free, csize); |
2601 | 390k | } |
2602 | 14.6M | gs_free_object(parent, cp, "alloc_free_clump(clump struct)"); |
2603 | 14.6M | } |
2604 | | |
2605 | | /* Find the clump for a pointer. */ |
2606 | | /* Note that this only searches the current save level. */ |
2607 | | /* Since a given save level can't contain both a clump and an inner clump */ |
2608 | | /* of that clump, we can stop when is_within_clump succeeds, and just test */ |
2609 | | /* is_in_inner_clump then. */ |
2610 | | bool |
2611 | | clump_locate_ptr(const void *ptr, clump_locator_t * clp) |
2612 | 57.6M | { |
2613 | 57.6M | clump_t *cp = clp->memory->root; |
2614 | | |
2615 | 148M | while (cp) |
2616 | 146M | { |
2617 | 146M | if (PTR_LT(ptr, cp->cbase)) |
2618 | 47.8M | { |
2619 | 47.8M | cp = cp->left; |
2620 | 47.8M | continue; |
2621 | 47.8M | } |
2622 | 98.2M | if (PTR_GE(ptr, cp->cend)) |
2623 | 42.6M | { |
2624 | 42.6M | cp = cp->right; |
2625 | 42.6M | continue; |
2626 | 42.6M | } |
2627 | | /* Found it! */ |
2628 | 55.5M | splay_move_to_root(cp, clp->memory); |
2629 | 55.5M | clp->cp = cp; |
2630 | 55.5M | return !ptr_is_in_inner_clump(ptr, cp); |
2631 | 98.2M | } |
2632 | 2.10M | return false; |
2633 | 57.6M | } |
2634 | | |
2635 | | bool ptr_is_within_mem_clumps(const void *ptr, gs_ref_memory_t *mem) |
2636 | 180k | { |
2637 | 180k | clump_t *cp = mem->root; |
2638 | | |
2639 | 1.12M | while (cp) |
2640 | 947k | { |
2641 | 947k | if (PTR_LT(ptr, cp->cbase)) |
2642 | 747k | { |
2643 | 747k | cp = cp->left; |
2644 | 747k | continue; |
2645 | 747k | } |
2646 | 200k | if (PTR_GE(ptr, cp->cend)) |
2647 | 200k | { |
2648 | 200k | cp = cp->right; |
2649 | 200k | continue; |
2650 | 200k | } |
2651 | | /* Found it! */ |
2652 | 0 | splay_move_to_root(cp, mem); |
2653 | 0 | return true; |
2654 | 200k | } |
2655 | 180k | return false; |
2656 | 180k | } |
2657 | | |
2658 | | /* ------ Debugging ------ */ |
2659 | | |
2660 | | #ifdef DEBUG |
2661 | | |
2662 | | #include "string_.h" |
2663 | | |
2664 | | static inline bool |
2665 | | obj_in_control_region(const void *obot, const void *otop, |
2666 | | const dump_control_t *pdc) |
2667 | | { |
2668 | | return |
2669 | | ((pdc->bottom == NULL || PTR_GT(otop, pdc->bottom)) && |
2670 | | (pdc->top == NULL || PTR_LT(obot, pdc->top))); |
2671 | | } |
2672 | | |
2673 | | const dump_control_t dump_control_default = |
2674 | | { |
2675 | | dump_do_default, NULL, NULL |
2676 | | }; |
2677 | | const dump_control_t dump_control_all = |
2678 | | { |
2679 | | dump_do_strings | dump_do_type_addresses | dump_do_pointers | |
2680 | | dump_do_pointed_strings | dump_do_contents, NULL, NULL |
2681 | | }; |
2682 | | |
2683 | | const dump_control_t dump_control_no_contents = |
2684 | | { |
2685 | | dump_do_strings | dump_do_type_addresses | dump_do_pointers | |
2686 | | dump_do_pointed_strings, NULL, NULL |
2687 | | }; |
2688 | | |
2689 | | /* |
2690 | | * Internal procedure to dump a block of memory, in hex and optionally |
2691 | | * also as characters. |
2692 | | */ |
2693 | | static void |
2694 | | debug_indent(const gs_memory_t *mem, int indent) |
2695 | | { |
2696 | | int i; |
2697 | | |
2698 | | for (i = indent; i > 0; --i) |
2699 | | dmputc(mem, ' '); |
2700 | | } |
2701 | | static void |
2702 | | debug_dump_contents(const gs_memory_t *mem, const byte * bot, |
2703 | | const byte * top, int indent, bool as_chars) |
2704 | | { |
2705 | | const byte *block; |
2706 | | |
2707 | | #define block_size 16 |
2708 | | |
2709 | | if (bot >= top) |
2710 | | return; |
2711 | | for (block = bot - ((bot - (byte *) 0) & (block_size - 1)); |
2712 | | block < top; block += block_size |
2713 | | ) { |
2714 | | int i; |
2715 | | char label[12]; |
2716 | | |
2717 | | /* Check for repeated blocks. */ |
2718 | | if (block >= bot + block_size && |
2719 | | block <= top - (block_size * 2) && |
2720 | | !memcmp(block, block - block_size, block_size) && |
2721 | | !memcmp(block, block + block_size, block_size) |
2722 | | ) { |
2723 | | if (block < bot + block_size * 2 || |
2724 | | memcmp(block, block - block_size * 2, block_size) |
2725 | | ) { |
2726 | | debug_indent(mem, indent); |
2727 | | dmputs(mem, " ...\n"); |
2728 | | } |
2729 | | continue; |
2730 | | } |
2731 | | gs_snprintf(label, sizeof(label), PRI_INTPTR":", (intptr_t)block); |
2732 | | debug_indent(mem, indent); |
2733 | | dmputs(mem, label); |
2734 | | for (i = 0; i < block_size; ++i) { |
2735 | | const char *sepr = ((i & 3) == 0 && i != 0 ? " " : " "); |
2736 | | |
2737 | | dmputs(mem, sepr); |
2738 | | if (block + i >= bot && block + i < top) |
2739 | | dmprintf1(mem, "%02x", block[i]); |
2740 | | else |
2741 | | dmputs(mem, " "); |
2742 | | } |
2743 | | dmputc(mem, '\n'); |
2744 | | if (as_chars) { |
2745 | | debug_indent(mem, indent + strlen(label)); |
2746 | | for (i = 0; i < block_size; ++i) { |
2747 | | byte ch; |
2748 | | |
2749 | | if ((i & 3) == 0 && i != 0) |
2750 | | dmputc(mem, ' '); |
2751 | | if (block + i >= bot && block + i < top && |
2752 | | (ch = block[i]) >= 32 && ch <= 126 |
2753 | | ) |
2754 | | dmprintf1(mem, " %c", ch); |
2755 | | else |
2756 | | dmputs(mem, " "); |
2757 | | } |
2758 | | dmputc(mem, '\n'); |
2759 | | } |
2760 | | } |
2761 | | #undef block_size |
2762 | | } |
2763 | | |
2764 | | /* Print one object with the given options. */ |
2765 | | /* Relevant options: type_addresses, no_types, pointers, pointed_strings, */ |
2766 | | /* contents. */ |
2767 | | void |
2768 | | debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t * control) |
2769 | | { |
2770 | | const obj_header_t *pre = ((const obj_header_t *)obj) - 1; |
2771 | | ulong size = pre_obj_contents_size(pre); |
2772 | | const gs_memory_struct_type_t *type = pre->o_type; |
2773 | | dump_options_t options = control->options; |
2774 | | |
2775 | | dmprintf3(mem, " pre="PRI_INTPTR"(obj="PRI_INTPTR") size=%lu", |
2776 | | (intptr_t) pre, (intptr_t) obj, size); |
2777 | | switch (options & (dump_do_type_addresses | dump_do_no_types)) { |
2778 | | case dump_do_type_addresses + dump_do_no_types: /* addresses only */ |
2779 | | dmprintf1(mem, " type="PRI_INTPTR"", (intptr_t) type); |
2780 | | break; |
2781 | | case dump_do_type_addresses: /* addresses & names */ |
2782 | | dmprintf2(mem, " type=%s("PRI_INTPTR")", struct_type_name_string(type), |
2783 | | (intptr_t)type); |
2784 | | break; |
2785 | | case 0: /* names only */ |
2786 | | dmprintf1(mem, " type=%s", struct_type_name_string(type)); |
2787 | | case dump_do_no_types: /* nothing */ |
2788 | | ; |
2789 | | } |
2790 | | if (options & dump_do_marks) { |
2791 | | dmprintf2(mem, " smark/back=%u (0x%x)", pre->o_smark, pre->o_smark); |
2792 | | } |
2793 | | dmputc(mem, '\n'); |
2794 | | if (type == &st_free) |
2795 | | return; |
2796 | | if (options & dump_do_pointers) { |
2797 | | struct_proc_enum_ptrs((*proc)) = type->enum_ptrs; |
2798 | | uint index = 0; |
2799 | | enum_ptr_t eptr; |
2800 | | gs_ptr_type_t ptype; |
2801 | | |
2802 | | if (proc != gs_no_struct_enum_ptrs) { |
2803 | | if (proc != 0) { |
2804 | | for (; (ptype = (*proc)(mem, pre + 1, size, index, &eptr, type, NULL)) != 0; |
2805 | | ++index |
2806 | | ) { |
2807 | | const void *ptr = eptr.ptr; |
2808 | | |
2809 | | dmprintf1(mem, " ptr %u: ", index); |
2810 | | if (ptype == ptr_string_type || ptype == ptr_const_string_type) { |
2811 | | const gs_const_string *str = (const gs_const_string *)&eptr; |
2812 | | if (!str) |
2813 | | dmprintf(mem, "0x0"); |
2814 | | else |
2815 | | dmprintf2(mem, PRI_INTPTR "(%u)", (intptr_t)str->data, str->size); |
2816 | | if (options & dump_do_pointed_strings) { |
2817 | | dmputs(mem, " =>\n"); |
2818 | | if (!str) |
2819 | | dmprintf(mem, "(null)\n"); |
2820 | | else |
2821 | | debug_dump_contents(mem, str->data, str->data + str->size, 6, |
2822 | | true); |
2823 | | } else { |
2824 | | dmputc(mem, '\n'); |
2825 | | } |
2826 | | } else { |
2827 | | dmprintf1(mem, (PTR_BETWEEN(ptr, obj, (const byte *)obj + size) ? |
2828 | | "("PRI_INTPTR")\n" : PRI_INTPTR "\n"), (intptr_t) ptr); |
2829 | | } |
2830 | | } |
2831 | | } else { /* proc == 0 */ |
2832 | | dmprintf(mem, "previous line should be a ref\n"); |
2833 | | } |
2834 | | } /* proc != gs_no_struct_enum_ptrs */ |
2835 | | } |
2836 | | if (options & dump_do_contents) { |
2837 | | debug_dump_contents(mem, (const byte *)obj, (const byte *)obj + size, |
2838 | | 0, false); |
2839 | | } |
2840 | | } |
2841 | | |
2842 | | /* Print the contents of a clump with the given options. */ |
2843 | | /* Relevant options: all. */ |
2844 | | void |
2845 | | debug_dump_clump(const gs_memory_t *mem, const clump_t * cp, const dump_control_t * control) |
2846 | | { |
2847 | | dmprintf1(mem, "clump at "PRI_INTPTR":\n", (intptr_t) cp); |
2848 | | dmprintf3(mem, " chead="PRI_INTPTR" cbase="PRI_INTPTR" sbase="PRI_INTPTR"\n", |
2849 | | (intptr_t)cp->chead, (intptr_t)cp->cbase, (intptr_t)cp->sbase); |
2850 | | dmprintf3(mem, " rcur="PRI_INTPTR" rtop="PRI_INTPTR" cbot="PRI_INTPTR"\n", |
2851 | | (intptr_t)cp->rcur, (intptr_t)cp->rtop, (intptr_t)cp->cbot); |
2852 | | dmprintf4(mem, " ctop="PRI_INTPTR" climit="PRI_INTPTR" smark="PRI_INTPTR", size=%u\n", |
2853 | | (intptr_t)cp->ctop, (intptr_t)cp->climit, (intptr_t)cp->smark, |
2854 | | cp->smark_size); |
2855 | | dmprintf2(mem, " sreloc="PRI_INTPTR" cend="PRI_INTPTR"\n", |
2856 | | (intptr_t)cp->sreloc, (intptr_t)cp->cend); |
2857 | | dmprintf6(mem, "left="PRI_INTPTR" right="PRI_INTPTR" parent="PRI_INTPTR" outer="PRI_INTPTR" inner_count=%u has_refs=%s\n", |
2858 | | (intptr_t)cp->left, (intptr_t)cp->right, (intptr_t)cp->parent, (intptr_t)cp->outer, |
2859 | | cp->inner_count, (cp->has_refs ? "true" : "false")); |
2860 | | |
2861 | | dmprintf2(mem, " sfree1="PRI_INTPTR" sfree="PRI_INTPTR"\n", |
2862 | | (intptr_t)cp->sfree1, (intptr_t)cp->sfree); |
2863 | | if (control->options & dump_do_strings) { |
2864 | | debug_dump_contents(mem, (control->bottom == 0 ? cp->ctop : |
2865 | | max(control->bottom, cp->ctop)), |
2866 | | (control->top == 0 ? cp->climit : |
2867 | | min(control->top, cp->climit)), |
2868 | | 0, true); |
2869 | | } |
2870 | | SCAN_CLUMP_OBJECTS(cp) |
2871 | | DO_ALL |
2872 | | if (obj_in_control_region(pre + 1, |
2873 | | (const byte *)(pre + 1) + size, |
2874 | | control) |
2875 | | ) |
2876 | | debug_print_object(mem, pre + 1, control); |
2877 | | END_OBJECTS_SCAN_NO_ABORT |
2878 | | } |
2879 | | void |
2880 | | debug_print_clump(const gs_memory_t *mem, const clump_t * cp) |
2881 | | { |
2882 | | dump_control_t control; |
2883 | | |
2884 | | control = dump_control_default; |
2885 | | debug_dump_clump(mem, cp, &control); |
2886 | | } |
2887 | | |
2888 | | /* Print the contents of all clumps managed by an allocator. */ |
2889 | | /* Relevant options: all. */ |
2890 | | void |
2891 | | debug_dump_memory(const gs_ref_memory_t * mem, const dump_control_t * control) |
2892 | | { |
2893 | | const clump_t *cp; |
2894 | | clump_splay_walker sw; |
2895 | | |
2896 | | for (cp = clump_splay_walk_init(&sw, mem); cp != NULL; cp = clump_splay_walk_fwd(&sw)) |
2897 | | { |
2898 | | if (obj_in_control_region(cp->cbase, cp->cend, control)) |
2899 | | debug_dump_clump((const gs_memory_t *)mem, cp, control); |
2900 | | } |
2901 | | } |
2902 | | |
2903 | | void |
2904 | | debug_dump_allocator(const gs_ref_memory_t *mem) |
2905 | | { |
2906 | | debug_dump_memory(mem, &dump_control_no_contents); |
2907 | | } |
2908 | | |
2909 | | /* Find all the objects that contain a given pointer. */ |
2910 | | void |
2911 | | debug_find_pointers(const gs_ref_memory_t *mem, const void *target) |
2912 | | { |
2913 | | clump_splay_walker sw; |
2914 | | dump_control_t control; |
2915 | | const clump_t *cp; |
2916 | | |
2917 | | control.options = 0; |
2918 | | for (cp = clump_splay_walk_init(&sw, mem); cp; cp = clump_splay_walk_fwd(&sw)) |
2919 | | { |
2920 | | SCAN_CLUMP_OBJECTS(cp); |
2921 | | DO_ALL |
2922 | | struct_proc_enum_ptrs((*proc)) = pre->o_type->enum_ptrs; |
2923 | | uint index = 0; |
2924 | | enum_ptr_t eptr; |
2925 | | |
2926 | | if (proc) /* doesn't trace refs NB fix me. */ |
2927 | | for (; (*proc)((const gs_memory_t *)mem, pre + 1, size, index, |
2928 | | &eptr, pre->o_type, NULL); |
2929 | | ++index) |
2930 | | if (eptr.ptr == target) { |
2931 | | dmprintf1((const gs_memory_t *)mem, "Index %d in", index); |
2932 | | debug_print_object((const gs_memory_t *)mem, pre + 1, &control); |
2933 | | } |
2934 | | END_OBJECTS_SCAN_NO_ABORT |
2935 | | } |
2936 | | } |
2937 | | static void ddct(const gs_memory_t *mem, clump_t *cp, clump_t *parent, int depth) |
2938 | | { |
2939 | | int i; |
2940 | | |
2941 | | if (cp == NULL) |
2942 | | return; |
2943 | | for (i = 0; i < depth; i++) |
2944 | | dmlprintf(mem, " "); |
2945 | | |
2946 | | dmlprintf7(mem, "Clump "PRI_INTPTR":"PRI_INTPTR" parent="PRI_INTPTR" left="PRI_INTPTR":"PRI_INTPTR" right="PRI_INTPTR":"PRI_INTPTR"\n", |
2947 | | (intptr_t)cp, (intptr_t)cp->cbase, (intptr_t)cp->parent, |
2948 | | (intptr_t)cp->left, (intptr_t)(cp->left ? cp->left->cbase : NULL), |
2949 | | (intptr_t)cp->right, (intptr_t)(cp->right ? cp->right->cbase : NULL)); |
2950 | | if (cp->parent != parent) |
2951 | | dmlprintf(mem, "Parent pointer mismatch!\n"); |
2952 | | ddct(mem, cp->left, cp, depth+1); |
2953 | | ddct(mem, cp->right, cp, depth+1); |
2954 | | } |
2955 | | void |
2956 | | debug_dump_clump_tree(const gs_ref_memory_t *mem) |
2957 | | { |
2958 | | ddct((const gs_memory_t *)mem, mem->root, NULL, 0); |
2959 | | } |
2960 | | |
2961 | | #endif /* DEBUG */ |