Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2024 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Garbage collector for Ghostscript */ |
18 | | #include "memory_.h" |
19 | | #include "ghost.h" |
20 | | #include "ierrors.h" |
21 | | #include "gsexit.h" |
22 | | #include "gsmdebug.h" |
23 | | #include "gsstruct.h" |
24 | | #include "iastate.h" |
25 | | #include "isave.h" |
26 | | #include "isstate.h" |
27 | | #include "idict.h" |
28 | | #include "ipacked.h" |
29 | | #include "istruct.h" |
30 | | #include "igc.h" |
31 | | #include "igcstr.h" |
32 | | #include "inamedef.h" |
33 | | #include "opdef.h" /* for marking oparray names */ |
34 | | |
35 | | /* Define whether to force all garbage collections to be global. */ |
36 | | static const bool I_FORCE_GLOBAL_GC = false; |
37 | | |
38 | | /* Define whether to bypass the collector entirely. */ |
39 | | static const bool I_BYPASS_GC = false; |
40 | | |
41 | | /* Define an entry on the mark stack. */ |
42 | | typedef struct { |
43 | | void *ptr; |
44 | | uint index; |
45 | | bool is_refs; |
46 | | } ms_entry; |
47 | | |
48 | | /* Define (a segment of) the mark stack. */ |
49 | | /* entries[0] has ptr = 0 to indicate the bottom of the stack. */ |
50 | | /* count additional entries follow this structure. */ |
51 | | typedef struct gc_mark_stack_s gc_mark_stack; |
52 | | struct gc_mark_stack_s { |
53 | | gc_mark_stack *prev; |
54 | | gc_mark_stack *next; |
55 | | uint count; |
56 | | bool on_heap; /* if true, allocated during GC */ |
57 | | ms_entry entries[1]; |
58 | | }; |
59 | | |
60 | | /* Define the mark stack sizing parameters. */ |
61 | 339k | #define ms_size_default 100 /* default, allocated on C stack */ |
62 | | /* This should probably be defined as a parameter somewhere.... */ |
63 | | #define ms_size_desired /* for additional allocation */\ |
64 | 27.9k | ((max_ushort - sizeof(gc_mark_stack)) / sizeof(ms_entry) - 10) |
65 | 141M | #define ms_size_min 50 /* min size for segment in free block */ |
66 | | |
67 | | /* Forward references */ |
68 | | |
69 | | static void gc_init_mark_stack(gc_mark_stack *, uint); |
70 | | static void gc_objects_clear_marks(const gs_memory_t *mem, clump_t *); |
71 | | static void gc_unmark_names(name_table *, op_array_table *, op_array_table *); |
72 | | static int gc_trace(gs_gc_root_t *, gc_state_t *, gc_mark_stack *); |
73 | | static int gc_rescan_clump(clump_t *, gc_state_t *, gc_mark_stack *); |
74 | | static int gc_trace_clump(const gs_memory_t *mem, clump_t *, gc_state_t *, gc_mark_stack *); |
75 | | static bool gc_trace_finish(gc_state_t *); |
76 | | static void gc_clear_reloc(clump_t *); |
77 | | static void gc_objects_set_reloc(gc_state_t * gcst, clump_t *); |
78 | | static void gc_do_reloc(clump_t *, gs_ref_memory_t *, gc_state_t *); |
79 | | static void gc_objects_compact(clump_t *, gc_state_t *); |
80 | | static void gc_free_empty_clumps(gs_ref_memory_t *); |
81 | | |
82 | | /* Forward references for pointer types */ |
83 | | static ptr_proc_unmark(ptr_struct_unmark); |
84 | | static ptr_proc_mark(ptr_struct_mark); |
85 | | static ptr_proc_unmark(ptr_string_unmark); |
86 | | static ptr_proc_mark(ptr_string_mark); |
87 | | static ptr_proc_unmark(ptr_name_index_unmark); |
88 | | static ptr_proc_mark(ptr_name_index_mark); |
89 | | /*ptr_proc_unmark(ptr_ref_unmark); *//* in igc.h */ |
90 | | /*ptr_proc_mark(ptr_ref_mark); *//* in igc.h */ |
91 | | static ptr_proc_reloc(igc_reloc_struct_ptr, void); |
92 | | |
93 | | ptr_proc_reloc(igc_reloc_ref_ptr, ref_packed); /* in igcref.c */ |
94 | | refs_proc_reloc(igc_reloc_refs); /* in igcref.c */ |
95 | | |
96 | | /* Define this GC's procedure vector. */ |
97 | | static const gc_procs_with_refs_t igc_procs = { |
98 | | igc_reloc_struct_ptr, igc_reloc_string, igc_reloc_const_string, |
99 | | igc_reloc_param_string, igc_reloc_ref_ptr, igc_reloc_refs |
100 | | }; |
101 | | |
102 | | /* Pointer type descriptors. */ |
103 | | /* Note that the trace/mark routine has special knowledge of ptr_ref_type */ |
104 | | /* and ptr_struct_type -- it assumes that no other types have embedded */ |
105 | | /* pointers. Note also that the reloc procedures for string, ref and name */ |
106 | | /* pointers are never called. */ |
107 | | typedef ptr_proc_reloc((*ptr_proc_reloc_t), void); |
108 | | const gs_ptr_procs_t ptr_struct_procs = |
109 | | {ptr_struct_unmark, ptr_struct_mark, (ptr_proc_reloc_t) igc_reloc_struct_ptr}; |
110 | | const gs_ptr_procs_t ptr_string_procs = |
111 | | {ptr_string_unmark, ptr_string_mark, NULL}; |
112 | | const gs_ptr_procs_t ptr_const_string_procs = |
113 | | {ptr_string_unmark, ptr_string_mark, NULL}; |
114 | | const gs_ptr_procs_t ptr_ref_procs = |
115 | | {ptr_ref_unmark, ptr_ref_mark, NULL}; |
116 | | const gs_ptr_procs_t ptr_name_index_procs = |
117 | | {ptr_name_index_unmark, ptr_name_index_mark, NULL}; |
118 | | |
119 | | /* ------ Main program ------ */ |
120 | | |
121 | | /* Top level of garbage collector. */ |
122 | | #ifdef DEBUG |
123 | | static void |
124 | | end_phase(const gs_memory_t *mem, const char *str) |
125 | | { |
126 | | if (gs_debug_c('6')) { |
127 | | dmlprintf1(mem, "[6]---------------- end %s ----------------\n", |
128 | | (const char *)str); |
129 | | dmflush(mem); |
130 | | } |
131 | | } |
132 | | static const char *const depth_dots_string = ".........."; |
133 | | static const char * |
134 | | depth_dots(const ms_entry * sp, const gc_mark_stack * pms) |
135 | | { |
136 | | int depth = sp - pms->entries - 1; |
137 | | const gc_mark_stack *pss = pms; |
138 | | |
139 | | while ((pss = pss->prev) != 0) |
140 | | depth += pss->count - 1; |
141 | | return depth_dots_string + (depth >= 10 ? 0 : 10 - depth); |
142 | | } |
143 | | static void |
144 | | gc_validate_spaces(gs_ref_memory_t **spaces, int max_space, gc_state_t *gcst) |
145 | | { |
146 | | int i; |
147 | | gs_ref_memory_t *mem; |
148 | | |
149 | | for (i = 1; i <= max_space; ++i) |
150 | | if ((mem = spaces[i]) != 0) |
151 | | ialloc_validate_memory(mem, gcst); |
152 | | } |
153 | | #else /* !DEBUG */ |
154 | 5.42M | # define end_phase(mem,str) DO_NOTHING |
155 | | #endif /* DEBUG */ |
156 | | |
157 | | void |
158 | | gs_gc_reclaim(vm_spaces * pspaces, bool global) |
159 | 339k | { |
160 | 339k | #define nspaces ((i_vm_max + 1) * 2) /* * 2 for stable allocators */ |
161 | | |
162 | 339k | vm_spaces spaces; |
163 | 339k | gs_ref_memory_t *space_memories[nspaces]; |
164 | 339k | gs_gc_root_t space_roots[nspaces]; |
165 | 339k | int max_trace; /* max space_ to trace */ |
166 | 339k | int min_collect; /* min space_ to collect */ |
167 | 339k | int min_collect_vm_space; /* min VM space to collect */ |
168 | 339k | int ispace; |
169 | 339k | gs_ref_memory_t *mem; |
170 | 339k | clump_t *cp; |
171 | 339k | gs_gc_root_t *rp; |
172 | 339k | gc_state_t state; |
173 | 339k | struct _msd { |
174 | 339k | gc_mark_stack stack; |
175 | 339k | ms_entry body[ms_size_default]; |
176 | 339k | } ms_default; |
177 | 339k | gc_mark_stack *mark_stack = &ms_default.stack; |
178 | 339k | const gs_memory_t *cmem; |
179 | 339k | clump_splay_walker sw; |
180 | | |
181 | | /* Optionally force global GC for debugging. */ |
182 | | |
183 | 339k | if (I_FORCE_GLOBAL_GC) |
184 | 0 | global = true; |
185 | | |
186 | | /* Determine which spaces we are tracing and collecting. */ |
187 | | |
188 | 339k | spaces = *pspaces; |
189 | 339k | cmem = space_system->stable_memory; |
190 | 339k | space_memories[1] = space_system; |
191 | 339k | space_memories[2] = space_global; |
192 | 339k | min_collect = max_trace = 2; |
193 | 339k | min_collect_vm_space = i_vm_global; |
194 | 339k | if (space_global->stable_memory != (gs_memory_t *)space_global) |
195 | 339k | space_memories[++max_trace] = |
196 | 339k | (gs_ref_memory_t *)space_global->stable_memory; |
197 | 339k | if (space_global != space_local) { |
198 | 339k | space_memories[++max_trace] = space_local; |
199 | 339k | min_collect = max_trace; |
200 | 339k | min_collect_vm_space = i_vm_local; |
201 | 339k | if (space_local->stable_memory != (gs_memory_t *)space_local) |
202 | 339k | space_memories[++max_trace] = |
203 | 339k | (gs_ref_memory_t *)space_local->stable_memory; |
204 | 339k | } |
205 | 339k | if (global) |
206 | 325k | min_collect = min_collect_vm_space = 1; |
207 | | |
208 | 339k | #define for_spaces(i, n)\ |
209 | 13.4M | for (i = 1; i <= n; ++i) |
210 | 339k | #define for_collected_spaces(i)\ |
211 | 17.9M | for (i = min_collect; i <= max_trace; ++i) |
212 | 339k | #define for_space_mems(i, mem)\ |
213 | 48.1M | for (mem = space_memories[i]; mem != 0; mem = &mem->saved->state) |
214 | 339k | #define for_mem_clumps(mem, cp, sw)\ |
215 | 387M | for (cp = clump_splay_walk_init(sw, mem); cp != 0; cp = clump_splay_walk_fwd(sw)) |
216 | 339k | #define for_space_clumps(i, mem, cp, sw)\ |
217 | 25.6M | for_space_mems(i, mem) for_mem_clumps(mem, cp, sw) |
218 | 339k | #define for_clumps(i, n, mem, cp, sw)\ |
219 | 1.69M | for_spaces(i, n) for_space_clumps(i, mem, cp, sw) |
220 | 339k | #define for_collected_clumps(i, mem, cp, sw)\ |
221 | 679k | for_collected_spaces(i) for_space_clumps(i, mem, cp, sw) |
222 | 339k | #define for_roots(i, n, mem, rp)\ |
223 | 1.01M | for_spaces(i, n)\ |
224 | 15.8M | for (mem = space_memories[i], rp = mem->roots; rp != 0; rp = rp->next) |
225 | | |
226 | | /* Initialize the state. */ |
227 | | |
228 | 339k | state.procs = &igc_procs; |
229 | 339k | state.loc.memory = space_global; /* any one will do */ |
230 | | |
231 | 339k | state.loc.cp = 0; |
232 | 339k | state.spaces = spaces; |
233 | 339k | state.min_collect = min_collect_vm_space << r_space_shift; |
234 | 339k | state.relocating_untraced = false; |
235 | 339k | state.heap = state.loc.memory->non_gc_memory; |
236 | 339k | state.ntable = state.heap->gs_lib_ctx->gs_name_table; |
237 | | |
238 | | /* Register the allocators themselves as roots, */ |
239 | | /* so we mark and relocate the change and save lists properly. */ |
240 | | |
241 | 1.69M | for_spaces(ispace, max_trace) { |
242 | 1.69M | rp = &space_roots[ispace]; |
243 | 1.69M | gs_register_struct_root((gs_memory_t *)space_memories[ispace], |
244 | 1.69M | &rp, |
245 | 1.69M | (void **)&space_memories[ispace], |
246 | 1.69M | "gc_top_level"); |
247 | 1.69M | } |
248 | | |
249 | 339k | end_phase(state.heap,"register space roots"); |
250 | | |
251 | | #ifdef DEBUG |
252 | | |
253 | | /* Pre-validate the state. This shouldn't be necessary.... */ |
254 | | |
255 | | gc_validate_spaces(space_memories, max_trace, &state); |
256 | | |
257 | | end_phase(state.heap,"pre-validate pointers"); |
258 | | |
259 | | #endif |
260 | | |
261 | 339k | if (I_BYPASS_GC) { /* Don't collect at all. */ |
262 | 0 | goto no_collect; |
263 | 0 | } |
264 | | |
265 | | /* Clear marks in spaces to be collected. */ |
266 | | |
267 | 339k | for_collected_spaces(ispace) |
268 | 68.5M | for_space_clumps(ispace, mem, cp, &sw) { |
269 | 68.5M | gc_objects_clear_marks((const gs_memory_t *)mem, cp); |
270 | 68.5M | gc_strings_set_marks(cp, false); |
271 | 68.5M | } |
272 | | |
273 | 339k | end_phase(state.heap,"clear clump marks"); |
274 | | |
275 | | /* Clear the marks of roots. We must do this explicitly, */ |
276 | | /* since some roots are not in any clump. */ |
277 | | |
278 | 3.57M | for_roots(ispace, max_trace, mem, rp) { |
279 | 3.57M | enum_ptr_t eptr; |
280 | | |
281 | 3.57M | eptr.ptr = *rp->p; |
282 | 3.57M | if_debug_root('6', (const gs_memory_t *)mem, "[6]unmarking root", rp); |
283 | 3.57M | (*rp->ptype->unmark)(&eptr, &state); |
284 | 3.57M | } |
285 | | |
286 | 339k | end_phase(state.heap,"clear root marks"); |
287 | | |
288 | 339k | if (global) { |
289 | 325k | op_array_table *global_ops = get_global_op_array(cmem); |
290 | 325k | op_array_table *local_ops = get_local_op_array(cmem); |
291 | 325k | gc_unmark_names(state.ntable, global_ops, local_ops); |
292 | 325k | } |
293 | | |
294 | | /* Initialize the (default) mark stack. */ |
295 | | |
296 | 339k | gc_init_mark_stack(&ms_default.stack, ms_size_default); |
297 | 339k | ms_default.stack.prev = 0; |
298 | 339k | ms_default.stack.on_heap = false; |
299 | | |
300 | | /* Add all large-enough free blocks to the mark stack. */ |
301 | | /* Also initialize the rescan pointers. */ |
302 | | |
303 | 339k | { |
304 | 339k | gc_mark_stack *end = mark_stack; |
305 | | |
306 | 70.8M | for_clumps(ispace, max_trace, mem, cp, &sw) { |
307 | 70.8M | uint avail = cp->ctop - cp->cbot; |
308 | | |
309 | 70.8M | if (avail >= sizeof(gc_mark_stack) + sizeof(ms_entry) * |
310 | 70.8M | ms_size_min && |
311 | 70.8M | !cp->inner_count |
312 | 70.8M | ) { |
313 | 8.99M | gc_mark_stack *pms = (gc_mark_stack *) cp->cbot; |
314 | | |
315 | 8.99M | gc_init_mark_stack(pms, (avail - sizeof(gc_mark_stack)) / |
316 | 8.99M | sizeof(ms_entry)); |
317 | 8.99M | end->next = pms; |
318 | 8.99M | pms->prev = end; |
319 | 8.99M | pms->on_heap = false; |
320 | 8.99M | if_debug2m('6', (const gs_memory_t *)mem, |
321 | 8.99M | "[6]adding free "PRI_INTPTR"(%u) to mark stack\n", |
322 | 8.99M | (intptr_t)pms, pms->count); |
323 | 8.99M | } |
324 | 70.8M | cp->rescan_bot = cp->cend; |
325 | 70.8M | cp->rescan_top = cp->cbase; |
326 | 70.8M | } |
327 | 339k | } |
328 | | |
329 | | /* Mark reachable objects. */ |
330 | | |
331 | 339k | { |
332 | 339k | int more = 0; |
333 | | |
334 | | /* Mark from roots. */ |
335 | | |
336 | 3.57M | for_roots(ispace, max_trace, mem, rp) { |
337 | 3.57M | if_debug_root('6', (const gs_memory_t *)mem, "[6]marking root", rp); |
338 | 3.57M | more |= gc_trace(rp, &state, mark_stack); |
339 | 3.57M | } |
340 | | |
341 | 339k | end_phase(state.heap,"mark"); |
342 | | |
343 | | /* If this is a local GC, mark from non-local clumps. */ |
344 | | |
345 | 339k | if (!global) |
346 | 339k | for_clumps(ispace, min_collect - 1, mem, cp, &sw) |
347 | 2.27M | more |= gc_trace_clump((const gs_memory_t *)mem, cp, &state, mark_stack); |
348 | | |
349 | | /* Handle mark stack overflow. */ |
350 | | |
351 | 339k | while (more < 0) { /* stack overflowed */ |
352 | 10 | more = 0; |
353 | 594k | for_clumps(ispace, max_trace, mem, cp, &sw) |
354 | 1.31M | more |= gc_rescan_clump(cp, &state, mark_stack); |
355 | 10 | } |
356 | | |
357 | 339k | end_phase(state.heap,"mark overflow"); |
358 | 339k | } |
359 | | |
360 | | /* Free the mark stack. */ |
361 | | |
362 | 339k | { |
363 | 339k | gc_mark_stack *pms = mark_stack; |
364 | | |
365 | 687k | while (pms->next) |
366 | 347k | pms = pms->next; |
367 | 1.02M | while (pms) { |
368 | 687k | gc_mark_stack *prev = pms->prev; |
369 | | |
370 | 687k | if (pms->on_heap) |
371 | 7.53k | gs_free_object(state.heap, pms, "gc mark stack"); |
372 | 679k | else |
373 | 687k | gs_alloc_fill(pms, gs_alloc_fill_free, |
374 | 687k | sizeof(*pms) + sizeof(ms_entry) * pms->count); |
375 | 687k | pms = prev; |
376 | 687k | } |
377 | 339k | } |
378 | | |
379 | 339k | end_phase(state.heap,"free mark stack"); |
380 | | |
381 | 339k | if (global) { |
382 | 325k | gc_trace_finish(&state); |
383 | 325k | names_trace_finish(state.ntable, &state); |
384 | | |
385 | 325k | end_phase(state.heap,"finish trace"); |
386 | 325k | } |
387 | | |
388 | | /* Filter save change lists with removing elements, |
389 | | which point to unmarked blocks of refs. */ |
390 | 339k | { |
391 | 339k | int i; |
392 | | |
393 | 1.65M | for_collected_spaces(i) { |
394 | 1.65M | gs_ref_memory_t *mem = space_memories[i]; |
395 | | |
396 | 1.65M | alloc_save__filter_changes(mem); |
397 | 1.65M | } |
398 | 339k | } |
399 | | /* Clear marks and relocation in spaces that are only being traced. */ |
400 | | /* We have to clear the marks first, because we want the */ |
401 | | /* relocation to wind up as o_untraced, not o_unmarked. */ |
402 | | |
403 | 339k | for_clumps(ispace, min_collect - 1, mem, cp, &sw) |
404 | 2.27M | gc_objects_clear_marks((const gs_memory_t *)mem, cp); |
405 | | |
406 | 339k | end_phase(state.heap,"post-clear marks"); |
407 | | |
408 | 339k | for_clumps(ispace, min_collect - 1, mem, cp, &sw) |
409 | 2.27M | gc_clear_reloc(cp); |
410 | | |
411 | 339k | end_phase(state.heap,"clear reloc"); |
412 | | |
413 | | /* Set the relocation of roots outside any clump to o_untraced, */ |
414 | | /* so we won't try to relocate pointers to them. */ |
415 | | /* (Currently, there aren't any.) */ |
416 | | |
417 | | /* Disable freeing in the allocators of the spaces we are */ |
418 | | /* collecting, so finalization procedures won't cause problems. */ |
419 | 339k | { |
420 | 339k | int i; |
421 | | |
422 | 339k | for_collected_spaces(i) |
423 | 1.65M | gs_enable_free((gs_memory_t *)space_memories[i], false); |
424 | 339k | } |
425 | | |
426 | | /* Compute relocation based on marks, in the spaces */ |
427 | | /* we are going to compact. Also finalize freed objects. */ |
428 | 339k | state.cur_mem = (gs_memory_t *)mem; |
429 | | |
430 | 68.5M | for_collected_clumps(ispace, mem, cp, &sw) { |
431 | 68.5M | gc_objects_set_reloc(&state, cp); |
432 | 68.5M | gc_strings_set_reloc(cp); |
433 | 68.5M | } |
434 | | |
435 | | /* Re-enable freeing. */ |
436 | 339k | { |
437 | 339k | int i; |
438 | | |
439 | 339k | for_collected_spaces(i) |
440 | 1.65M | gs_enable_free((gs_memory_t *)space_memories[i], true); |
441 | 339k | } |
442 | | |
443 | 339k | end_phase(state.heap,"set reloc"); |
444 | | |
445 | | /* Relocate pointers. */ |
446 | | |
447 | 339k | state.relocating_untraced = true; |
448 | 339k | for_clumps(ispace, min_collect - 1, mem, cp, &sw) |
449 | 2.27M | gc_do_reloc(cp, mem, &state); |
450 | 339k | state.relocating_untraced = false; |
451 | 7.83M | for_collected_clumps(ispace, mem, cp, &sw) |
452 | 68.5M | gc_do_reloc(cp, mem, &state); |
453 | | |
454 | 339k | end_phase(state.heap,"relocate clumps"); |
455 | | |
456 | 3.57M | for_roots(ispace, max_trace, mem, rp) { |
457 | 3.57M | if_debug3m('6', (const gs_memory_t *)mem, |
458 | 3.57M | "[6]relocating root "PRI_INTPTR": "PRI_INTPTR" -> "PRI_INTPTR"\n", |
459 | 3.57M | (intptr_t)rp, (intptr_t)rp->p, (intptr_t)*rp->p); |
460 | 3.57M | if (rp->ptype == ptr_ref_type) { |
461 | 177k | ref *pref = (ref *) * rp->p; |
462 | | |
463 | 177k | igc_reloc_refs((ref_packed *) pref, |
464 | 177k | (ref_packed *) (pref + 1), |
465 | 177k | &state); |
466 | 177k | } else |
467 | 3.39M | *rp->p = (*rp->ptype->reloc) (*rp->p, &state); |
468 | 3.57M | if_debug3m('6', (const gs_memory_t *)mem, |
469 | 3.57M | "[6]relocated root "PRI_INTPTR": "PRI_INTPTR" -> "PRI_INTPTR"\n", |
470 | 3.57M | (intptr_t)rp, (intptr_t)rp->p, (intptr_t)*rp->p); |
471 | 3.57M | } |
472 | | |
473 | 339k | end_phase(state.heap,"relocate roots"); |
474 | | |
475 | | /* Compact data. We only do this for spaces we are collecting. */ |
476 | | |
477 | 1.65M | for_collected_spaces(ispace) { |
478 | 6.18M | for_space_mems(ispace, mem) { |
479 | 68.5M | for_mem_clumps(mem, cp, &sw) { |
480 | 68.5M | if_debug_clump('6', (const gs_memory_t *)mem, "[6]compacting clump", cp); |
481 | 68.5M | gc_objects_compact(cp, &state); |
482 | 68.5M | gc_strings_compact(cp, cmem); |
483 | 68.5M | if_debug_clump('6', (const gs_memory_t *)mem, "[6]after compaction:", cp); |
484 | 68.5M | } |
485 | 6.18M | mem->saved = mem->reloc_saved; |
486 | 6.18M | ialloc_reset_free(mem); |
487 | 6.18M | } |
488 | 1.65M | } |
489 | | |
490 | 339k | end_phase(state.heap,"compact"); |
491 | | |
492 | | /* Free empty clumps. */ |
493 | | |
494 | 1.65M | for_collected_spaces(ispace) { |
495 | 6.18M | for_space_mems(ispace, mem) { |
496 | 6.18M | gc_free_empty_clumps(mem); |
497 | 6.18M | } |
498 | 1.65M | } |
499 | | |
500 | 339k | end_phase(state.heap,"free empty clumps"); |
501 | | |
502 | | /* |
503 | | * Update previous_status to reflect any freed clumps, |
504 | | * and set inherited to the negative of allocated, |
505 | | * so it has no effect. We must update previous_status by |
506 | | * working back-to-front along the save chain, using pointer reversal. |
507 | | * (We could update inherited in any order, since it only uses |
508 | | * information local to the individual save level.) |
509 | | */ |
510 | | |
511 | 1.65M | for_collected_spaces(ispace) { /* Reverse the pointers. */ |
512 | 1.65M | alloc_save_t *curr; |
513 | 1.65M | alloc_save_t *prev = 0; |
514 | 1.65M | alloc_save_t *next; |
515 | 1.65M | gs_memory_status_t total; |
516 | | |
517 | 6.18M | for (curr = space_memories[ispace]->saved; curr != 0; |
518 | 4.52M | prev = curr, curr = next |
519 | 4.52M | ) { |
520 | 4.52M | next = curr->state.saved; |
521 | 4.52M | curr->state.saved = prev; |
522 | 4.52M | } |
523 | | /* Now work the other way, accumulating the values. */ |
524 | 1.65M | total.allocated = 0, total.used = 0; |
525 | 6.18M | for (curr = prev, prev = 0; curr != 0; |
526 | 4.52M | prev = curr, curr = next |
527 | 4.52M | ) { |
528 | 4.52M | mem = &curr->state; |
529 | 4.52M | next = mem->saved; |
530 | 4.52M | mem->saved = prev; |
531 | 4.52M | mem->previous_status = total; |
532 | 4.52M | if_debug3m('6', (const gs_memory_t *)mem, |
533 | 4.52M | "[6]"PRI_INTPTR" previous allocated=%lu, used=%lu\n", |
534 | 4.52M | (intptr_t)mem, total.allocated, total.used); |
535 | 4.52M | gs_memory_status((gs_memory_t *) mem, &total); |
536 | 4.52M | mem->gc_allocated = mem->allocated + total.allocated; |
537 | 4.52M | } |
538 | 1.65M | mem = space_memories[ispace]; |
539 | 1.65M | mem->previous_status = total; |
540 | 1.65M | mem->gc_allocated = mem->allocated + total.allocated; |
541 | 1.65M | if_debug3m('6', (const gs_memory_t *)mem, |
542 | 1.65M | "[6]"PRI_INTPTR" previous allocated=%lu, used=%lu\n", |
543 | 1.65M | (intptr_t)mem, total.allocated, total.used); |
544 | 1.65M | } |
545 | | |
546 | 339k | end_phase(state.heap,"update stats"); |
547 | | |
548 | 339k | no_collect: |
549 | | |
550 | | /* Unregister the allocator roots. */ |
551 | | |
552 | 339k | for_spaces(ispace, max_trace) |
553 | 1.69M | gs_unregister_root((gs_memory_t *)space_memories[ispace], |
554 | 339k | &space_roots[ispace], "gc_top_level"); |
555 | | |
556 | 339k | end_phase(state.heap,"unregister space roots"); |
557 | | |
558 | | #ifdef DEBUG |
559 | | |
560 | | /* Validate the state. This shouldn't be necessary.... */ |
561 | | |
562 | | gc_validate_spaces(space_memories, max_trace, &state); |
563 | | |
564 | | end_phase(state.heap,"validate pointers"); |
565 | | |
566 | | #endif |
567 | 339k | } |
568 | | |
569 | | /* ------ Debugging utilities ------ */ |
570 | | |
571 | | /* Validate a pointer to an object header. */ |
572 | | #ifdef DEBUG |
573 | | # define debug_check_object(pre, cp, gcst)\ |
574 | | ialloc_validate_object((pre) + 1, cp, gcst) |
575 | | #else |
576 | 10.0G | # define debug_check_object(pre, cp, gcst) DO_NOTHING |
577 | | #endif |
578 | | |
579 | | /* ------ Unmarking phase ------ */ |
580 | | |
581 | | /* Unmark a single struct. */ |
582 | | static void |
583 | | ptr_struct_unmark(enum_ptr_t *pep, gc_state_t * ignored) |
584 | 3.39M | { |
585 | 3.39M | void *const vptr = (void *)pep->ptr; /* break const */ |
586 | | |
587 | 3.39M | if (vptr != 0) |
588 | 3.39M | o_set_unmarked(((obj_header_t *) vptr - 1)); |
589 | 3.39M | } |
590 | | |
591 | | /* Unmark a single string. */ |
592 | | static void |
593 | | ptr_string_unmark(enum_ptr_t *pep, gc_state_t * gcst) |
594 | 0 | { |
595 | 0 | discard(gc_string_mark(pep->ptr, pep->size, false, gcst)); |
596 | 0 | } |
597 | | |
598 | | /* Unmark a single name. */ |
599 | | static void |
600 | | ptr_name_index_unmark(enum_ptr_t *pep, gc_state_t * gcst) |
601 | 0 | { |
602 | | /* Do nothing */ |
603 | 0 | } |
604 | | |
605 | | /* Unmark the objects in a clump. */ |
606 | | static void |
607 | | gc_objects_clear_marks(const gs_memory_t *mem, clump_t * cp) |
608 | 70.8M | { |
609 | 70.8M | if_debug_clump('6', mem, "[6]unmarking clump", cp); |
610 | 975M | SCAN_CLUMP_OBJECTS(cp) |
611 | 975M | DO_ALL |
612 | 975M | struct_proc_clear_marks((*proc)) = |
613 | 975M | pre->o_type->clear_marks; |
614 | | #ifdef DEBUG |
615 | | if (pre->o_type != &st_free) |
616 | | debug_check_object(pre, cp, NULL); |
617 | | #endif |
618 | 975M | if_debug3m('7', (const gs_memory_t *)mem, " [7](un)marking %s(%lu) "PRI_INTPTR"\n", |
619 | 975M | struct_type_name_string(pre->o_type), |
620 | 975M | (ulong) size, (intptr_t)pre); |
621 | 975M | o_set_unmarked(pre); |
622 | 975M | if (proc != 0) |
623 | 424M | (*proc) (mem, pre + 1, size, pre->o_type); |
624 | 975M | END_OBJECTS_SCAN |
625 | 70.8M | } |
626 | | |
627 | | /* Mark 0- and 1-character names, and those referenced from the */ |
628 | | /* op_array_nx_table, and unmark all the rest. */ |
629 | | static void |
630 | | gc_unmark_names(name_table * nt, op_array_table *op_array_table_global, |
631 | | op_array_table *op_array_table_local) |
632 | 325k | { |
633 | 325k | uint i; |
634 | | |
635 | 325k | names_unmark_all(nt); |
636 | 60.1M | for (i = 0; i < op_array_table_global->count; i++) { |
637 | 59.8M | name_index_t nidx = op_array_table_global->nx_table[i]; |
638 | | |
639 | 59.8M | names_mark_index(nt, nidx); |
640 | 59.8M | } |
641 | 325k | for (i = 0; i < op_array_table_local->count; i++) { |
642 | 0 | name_index_t nidx = op_array_table_local->nx_table[i]; |
643 | |
|
644 | 0 | names_mark_index(nt, nidx); |
645 | 0 | } |
646 | 325k | } |
647 | | |
648 | | /* ------ Marking phase ------ */ |
649 | | |
650 | | /* Initialize (a segment of) the mark stack. */ |
651 | | static void |
652 | | gc_init_mark_stack(gc_mark_stack * pms, uint count) |
653 | 9.34M | { |
654 | 9.34M | pms->next = 0; |
655 | 9.34M | pms->count = count; |
656 | 9.34M | pms->entries[0].ptr = 0; |
657 | 9.34M | pms->entries[0].index = 0; |
658 | 9.34M | pms->entries[0].is_refs = false; |
659 | 9.34M | } |
660 | | |
661 | | /* Mark starting from all marked objects in the interval of a clump */ |
662 | | /* needing rescanning. */ |
663 | | static int |
664 | | gc_rescan_clump(clump_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack) |
665 | 1.31M | { |
666 | 1.31M | byte *sbot = cp->rescan_bot; |
667 | 1.31M | byte *stop = cp->rescan_top; |
668 | 1.31M | gs_gc_root_t root; |
669 | 1.31M | void *comp; |
670 | 1.31M | int more = 0; |
671 | 1.31M | const gs_memory_t *mem = gcst_get_memory_ptr( pstate ); |
672 | | |
673 | 1.31M | if (sbot > stop) |
674 | 1.31M | return 0; |
675 | 637 | root.p = ∁ |
676 | 637 | if_debug_clump('6', mem, "[6]rescanning clump", cp); |
677 | 637 | cp->rescan_bot = cp->cend; |
678 | 637 | cp->rescan_top = cp->cbase; |
679 | 9.75k | SCAN_CLUMP_OBJECTS(cp) |
680 | 9.75k | DO_ALL |
681 | 9.75k | if ((byte *) (pre + 1) + size < sbot); |
682 | 3.15k | else if ((byte *) (pre + 1) > stop) |
683 | 493 | return more; /* 'break' won't work here */ |
684 | 2.66k | else { |
685 | 2.66k | if_debug2m('7', mem, " [7]scanning/marking "PRI_INTPTR"(%lu)\n", |
686 | 2.66k | (intptr_t)pre, (ulong)size); |
687 | 2.66k | if (pre->o_type == &st_refs) { |
688 | 0 | ref_packed *rp = (ref_packed *) (pre + 1); |
689 | 0 | char *end = (char *)rp + size; |
690 | |
|
691 | 0 | root.ptype = ptr_ref_type; |
692 | 0 | while ((char *)rp < end) { |
693 | 0 | comp = rp; |
694 | 0 | if (r_is_packed(rp)) { |
695 | 0 | if (r_has_pmark(rp)) { |
696 | 0 | r_clear_pmark(rp); |
697 | 0 | more |= gc_trace(&root, pstate, |
698 | 0 | pmstack); |
699 | 0 | } |
700 | 0 | rp++; |
701 | 0 | } else { |
702 | 0 | ref *const pref = (ref *)rp; |
703 | |
|
704 | 0 | if (r_has_attr(pref, l_mark)) { |
705 | 0 | r_clear_attrs(pref, l_mark); |
706 | 0 | more |= gc_trace(&root, pstate, pmstack); |
707 | 0 | } |
708 | 0 | rp += packed_per_ref; |
709 | 0 | } |
710 | 0 | } |
711 | 2.66k | } else if (!o_is_unmarked(pre)) { |
712 | 1.94k | struct_proc_clear_marks((*proc)) = |
713 | 1.94k | pre->o_type->clear_marks; |
714 | 1.94k | root.ptype = ptr_struct_type; |
715 | 1.94k | comp = pre + 1; |
716 | 1.94k | if (!o_is_untraced(pre)) |
717 | 1.94k | o_set_unmarked(pre); |
718 | 1.94k | if (proc != 0) |
719 | 510 | (*proc) (mem, comp, size, pre->o_type); |
720 | 1.94k | more |= gc_trace(&root, pstate, pmstack); |
721 | 1.94k | } |
722 | 2.66k | } |
723 | 9.75k | END_OBJECTS_SCAN |
724 | 144 | return more; |
725 | 637 | } |
726 | | |
727 | | /* Mark starting from all the objects in a clump. */ |
728 | | /* We assume that pstate->min_collect > avm_system, */ |
729 | | /* so we don't have to trace names. */ |
730 | | static int |
731 | | gc_trace_clump(const gs_memory_t *mem, clump_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack) |
732 | 2.27M | { |
733 | 2.27M | gs_gc_root_t root; |
734 | 2.27M | void *comp; |
735 | 2.27M | int more = 0; |
736 | 2.27M | int min_trace = pstate->min_collect; |
737 | | |
738 | 2.27M | root.p = ∁ |
739 | 2.27M | if_debug_clump('6', mem, "[6]marking from clump", cp); |
740 | 27.6M | SCAN_CLUMP_OBJECTS(cp) |
741 | 27.6M | DO_ALL |
742 | 27.6M | { |
743 | 27.6M | if_debug2m('7', mem, " [7]scanning/marking "PRI_INTPTR"(%lu)\n", |
744 | 27.6M | (intptr_t)pre, (ulong)size); |
745 | 27.6M | if (pre->o_type == &st_refs) { |
746 | 13.7M | ref_packed *rp = (ref_packed *) (pre + 1); |
747 | 13.7M | char *end = (char *)rp + size; |
748 | | |
749 | 13.7M | root.ptype = ptr_ref_type; |
750 | 1.50G | while ((char *)rp < end) { |
751 | 1.49G | comp = rp; |
752 | 1.49G | if (r_is_packed(rp)) { /* No packed refs need tracing. */ |
753 | 513M | rp++; |
754 | 979M | } else { |
755 | 979M | ref *const pref = (ref *)rp; |
756 | | |
757 | 979M | if (r_space(pref) >= min_trace) { |
758 | 161k | r_clear_attrs(pref, l_mark); |
759 | 161k | more |= gc_trace(&root, pstate, pmstack); |
760 | 161k | } |
761 | 979M | rp += packed_per_ref; |
762 | 979M | } |
763 | 1.49G | } |
764 | 13.9M | } else if (!o_is_unmarked(pre)) { |
765 | 13.6M | if (!o_is_untraced(pre)) |
766 | 13.6M | o_set_unmarked(pre); |
767 | 13.6M | if (pre->o_type != &st_free) { |
768 | 13.4M | struct_proc_clear_marks((*proc)) = |
769 | 13.4M | pre->o_type->clear_marks; |
770 | | |
771 | 13.4M | root.ptype = ptr_struct_type; |
772 | 13.4M | comp = pre + 1; |
773 | 13.4M | if (proc != 0) |
774 | 572k | (*proc) (mem, comp, size, pre->o_type); |
775 | 13.4M | more |= gc_trace(&root, pstate, pmstack); |
776 | 13.4M | } |
777 | 13.6M | } |
778 | 27.6M | } |
779 | 27.6M | END_OBJECTS_SCAN |
780 | 2.27M | return more; |
781 | 2.27M | } |
782 | | |
783 | | /* Recursively mark from a (root) pointer. */ |
784 | | /* Return -1 if we overflowed the mark stack, */ |
785 | | /* 0 if we completed successfully without marking any new objects, */ |
786 | | /* 1 if we completed and marked some new objects. */ |
787 | | static int gc_extend_stack(gc_mark_stack *, gc_state_t *); |
788 | | static int |
789 | | gc_trace(gs_gc_root_t * rp, /* lgtm [cpp/use-of-goto] */ |
790 | | gc_state_t * pstate, gc_mark_stack * pmstack) |
791 | 17.2M | { |
792 | 17.2M | int min_trace = pstate->min_collect; |
793 | 17.2M | gc_mark_stack *pms = pmstack; |
794 | 17.2M | ms_entry *sp = pms->entries + 1; |
795 | | |
796 | | /* We stop the mark stack 1 entry early, because we store into */ |
797 | | /* the entry beyond the top. */ |
798 | 17.2M | ms_entry *stop = sp + pms->count - 2; |
799 | 17.2M | int new = 0; |
800 | 17.2M | enum_ptr_t nep; |
801 | 17.2M | void *nptr; |
802 | 17.2M | name_table *nt = pstate->ntable; |
803 | | |
804 | 17.2M | #define mark_name(nidx)\ |
805 | 7.19G | BEGIN\ |
806 | 7.19G | if (names_mark_index(nt, nidx)) {\ |
807 | 2.18G | new |= 1;\ |
808 | 2.18G | if_debug2m('8', gcst_get_memory_ptr(pstate), " [8]marked name "PRI_INTPTR"(%u)\n",\ |
809 | 2.18G | (intptr_t)names_index_ptr(nt, nidx), nidx);\ |
810 | 2.18G | }\ |
811 | 7.19G | END |
812 | | |
813 | 17.2M | nptr = *rp->p; |
814 | 17.2M | if (nptr == 0) |
815 | 0 | return 0; |
816 | | |
817 | | /* Initialize the stack */ |
818 | 17.2M | sp->ptr = nptr; |
819 | 17.2M | if (rp->ptype == ptr_ref_type) |
820 | 338k | sp->index = 1, sp->is_refs = true; |
821 | 16.8M | else { |
822 | 16.8M | sp->index = 0, sp->is_refs = false; |
823 | 16.8M | nep.ptr = nptr; |
824 | 16.8M | if ((*rp->ptype->mark) (&nep, pstate)) |
825 | 16.2M | new |= 1; |
826 | 16.8M | } |
827 | 4.51G | for (;;) { |
828 | 4.51G | gs_ptr_type_t ptp; |
829 | | |
830 | | /* |
831 | | * The following should really be an if..else, but that |
832 | | * would force unnecessary is_refs tests. |
833 | | */ |
834 | 4.51G | if (sp->is_refs) |
835 | 3.68G | goto do_refs; |
836 | | |
837 | | /* ---------------- Structure ---------------- */ |
838 | | |
839 | 1.12G | do_struct: |
840 | 1.12G | { |
841 | 1.12G | obj_header_t *ptr = sp->ptr; |
842 | | |
843 | 1.12G | struct_proc_enum_ptrs((*mproc)); |
844 | | |
845 | 1.12G | if (ptr == 0) { /* We've reached the bottom of a stack segment. */ |
846 | 18.2M | pms = pms->prev; |
847 | 18.2M | if (pms == 0) |
848 | 17.2M | break; /* all done */ |
849 | 1.05M | stop = pms->entries + pms->count - 1; |
850 | 1.05M | sp = stop; |
851 | 1.05M | continue; |
852 | 18.2M | } |
853 | 1.12G | debug_check_object(ptr - 1, NULL, NULL); |
854 | 2.72G | ts:if_debug4m('7', pstate->heap, " [7]%smarking %s "PRI_INTPTR"[%u]", |
855 | 2.72G | depth_dots(sp, pms), |
856 | 2.72G | struct_type_name_string(ptr[-1].o_type), |
857 | 2.72G | (intptr_t)ptr, sp->index); |
858 | 2.72G | mproc = ptr[-1].o_type->enum_ptrs; |
859 | 2.72G | if (mproc == gs_no_struct_enum_ptrs || |
860 | 2.72G | (ptp = (*mproc) |
861 | 2.69G | (gcst_get_memory_ptr(pstate), ptr, pre_obj_contents_size(ptr - 1), |
862 | 2.69G | sp->index, &nep, ptr[-1].o_type, pstate)) == 0 |
863 | 2.72G | ) { |
864 | 302M | if_debug0m('7', pstate->heap, " - done\n"); |
865 | 302M | sp--; |
866 | 302M | continue; |
867 | 302M | } |
868 | | /* The cast in the following statement is the one */ |
869 | | /* place we need to break 'const' to make the */ |
870 | | /* template for pointer enumeration work. */ |
871 | 2.42G | nptr = (void *)nep.ptr; |
872 | 2.42G | sp->index++; |
873 | 2.42G | if_debug1m('7', pstate->heap, " = "PRI_INTPTR"\n", (intptr_t)nptr); |
874 | | /* Descend into nep.ptr, whose pointer type is ptp. */ |
875 | 2.42G | if (ptp == ptr_struct_type) { |
876 | 1.68G | sp[1].index = 0; |
877 | 1.68G | sp[1].is_refs = false; |
878 | 1.68G | if (sp == stop) |
879 | 636k | goto push; |
880 | 1.68G | if (!ptr_struct_mark(&nep, pstate)) |
881 | 1.40G | goto ts; |
882 | 276M | new |= 1; |
883 | 276M | (++sp)->ptr = nptr; |
884 | 276M | goto do_struct; |
885 | 1.68G | } else if (ptp == ptr_ref_type) { |
886 | 525M | sp[1].index = 1; |
887 | 525M | sp[1].is_refs = true; |
888 | 525M | if (sp == stop) |
889 | 369k | goto push; |
890 | 525M | new |= 1; |
891 | 525M | (++sp)->ptr = nptr; |
892 | 525M | goto do_refs; |
893 | 525M | } else { /* We assume this is some non-pointer- */ |
894 | | /* containing type. */ |
895 | | /* This was the only formulation of this call/condition/assignment that valgrind did not complain about */ |
896 | 217M | new |= ((*ptp->mark) (&nep, pstate)) ? 1 : 0; |
897 | 217M | goto ts; |
898 | 217M | } |
899 | 2.42G | } |
900 | | |
901 | | /* ---------------- Refs ---------------- */ |
902 | | |
903 | 7.87G | do_refs: |
904 | 7.87G | { |
905 | 7.87G | ref_packed *pptr = sp->ptr; |
906 | 7.87G | ref *rptr; |
907 | | |
908 | 45.0G | tr:if (!sp->index) { |
909 | 4.19G | --sp; |
910 | 4.19G | continue; |
911 | 4.19G | } |
912 | 40.8G | --(sp->index); |
913 | 40.8G | if_debug3m('8', pstate->heap, " [8]%smarking refs "PRI_INTPTR"[%u]\n", |
914 | 40.8G | depth_dots(sp, pms), (intptr_t)pptr, sp->index); |
915 | 40.8G | if (r_is_packed(pptr)) { |
916 | 11.9G | if (!r_has_pmark(pptr)) { |
917 | 7.97G | r_set_pmark(pptr); |
918 | 7.97G | new |= 1; |
919 | 7.97G | if (r_packed_is_name(pptr)) { |
920 | 1.87G | name_index_t nidx = packed_name_index(pptr); |
921 | | |
922 | 1.87G | mark_name(nidx); |
923 | 1.87G | } |
924 | 7.97G | } |
925 | 11.9G | ++pptr; |
926 | 11.9G | goto tr; |
927 | 11.9G | } |
928 | 28.9G | rptr = (ref *) pptr; /* * const beyond here */ |
929 | 28.9G | if (r_has_attr(rptr, l_mark)) { |
930 | 4.34G | pptr = (ref_packed *)(rptr + 1); |
931 | 4.34G | goto tr; |
932 | 4.34G | } |
933 | 24.5G | r_set_attrs(rptr, l_mark); |
934 | 24.5G | new |= 1; |
935 | 24.5G | if (r_space(rptr) < min_trace) { /* Note that this always picks up all scalars. */ |
936 | 15.1G | pptr = (ref_packed *) (rptr + 1); |
937 | 15.1G | goto tr; |
938 | 15.1G | } |
939 | 9.38G | sp->ptr = rptr + 1; |
940 | 9.38G | switch (r_type(rptr)) { |
941 | | /* Struct cases */ |
942 | 1.90M | case t_file: |
943 | 1.90M | nptr = rptr->value.pfile; |
944 | 13.3M | rs:sp[1].is_refs = false; |
945 | 13.3M | sp[1].index = 0; |
946 | 13.3M | if (sp == stop) { |
947 | 3.72k | ptp = ptr_struct_type; |
948 | 3.72k | break; |
949 | 3.72k | } |
950 | 13.3M | nep.ptr = nptr; |
951 | 13.3M | if (!ptr_struct_mark(&nep, pstate)) |
952 | 4.24M | goto nr; |
953 | 9.09M | new |= 1; |
954 | 9.09M | (++sp)->ptr = nptr; |
955 | 9.09M | goto do_struct; |
956 | 7.13M | case t_device: |
957 | 7.13M | nptr = rptr->value.pdevice; |
958 | 7.13M | goto rs; |
959 | 467k | case t_fontID: |
960 | 3.97M | case t_struct: |
961 | 4.30M | case t_astruct: |
962 | 4.30M | case t_pdfctx: |
963 | 4.30M | nptr = rptr->value.pstruct; |
964 | 4.30M | goto rs; |
965 | | /* Non-trivial non-struct cases */ |
966 | 193M | case t_dictionary: |
967 | 193M | nptr = rptr->value.pdict; |
968 | 193M | sp[1].index = sizeof(dict) / sizeof(ref); |
969 | 193M | goto rrp; |
970 | 1.66G | case t_array: |
971 | 1.66G | nptr = rptr->value.refs; |
972 | 3.39G | rr:if ((sp[1].index = r_size(rptr)) == 0) { /* Set the base pointer to 0, */ |
973 | | /* so we never try to relocate it. */ |
974 | 54.3M | rptr->value.refs = 0; |
975 | 54.3M | goto nr; |
976 | 54.3M | } |
977 | 3.53G | rrp: |
978 | 3.67G | rrc:sp[1].is_refs = true; |
979 | 3.67G | if (sp == stop) { |
980 | | /* |
981 | | * The following initialization is unnecessary: |
982 | | * ptp will not be used if sp[1].is_refs = true. |
983 | | * We put this here solely to get rid of bogus |
984 | | * "possibly uninitialized variable" warnings |
985 | | * from certain compilers. |
986 | | */ |
987 | 64.3k | ptp = ptr_ref_type; |
988 | 64.3k | break; |
989 | 64.3k | } |
990 | 3.67G | new |= 1; |
991 | 3.67G | (++sp)->ptr = nptr; |
992 | 3.67G | goto do_refs; |
993 | 1.19G | case t_mixedarray: |
994 | 1.73G | case t_shortarray: |
995 | 1.73G | nptr = rptr->value.writable_packed; |
996 | 1.73G | goto rr; |
997 | 5.32G | case t_name: |
998 | 5.32G | mark_name(names_index(nt, rptr)); |
999 | 5.70G | nr:pptr = (ref_packed *) (rptr + 1); |
1000 | 5.70G | goto tr; |
1001 | 322M | case t_string: |
1002 | 322M | if (gc_string_mark(rptr->value.bytes, r_size(rptr), true, pstate)) |
1003 | 266M | new |= 1; |
1004 | 322M | goto nr; |
1005 | 133M | case t_oparray: |
1006 | 133M | nptr = rptr->value.refs; /* discard const */ |
1007 | 133M | sp[1].index = 1; |
1008 | 133M | goto rrc; |
1009 | 0 | default: |
1010 | 0 | goto nr; |
1011 | 9.38G | } |
1012 | 9.38G | } |
1013 | | |
1014 | | /* ---------------- Recursion ---------------- */ |
1015 | | |
1016 | 1.07M | push: |
1017 | 1.07M | if (sp == stop) { /* The current segment is full. */ |
1018 | 1.07M | int new_added = gc_extend_stack(pms, pstate); |
1019 | | |
1020 | 1.07M | if (new_added) { |
1021 | 20.3k | new |= new_added; |
1022 | 20.3k | continue; |
1023 | 20.3k | } |
1024 | 1.05M | pms = pms->next; |
1025 | 1.05M | stop = pms->entries + pms->count - 1; |
1026 | 1.05M | pms->entries[1] = sp[1]; |
1027 | 1.05M | sp = pms->entries; |
1028 | 1.05M | } |
1029 | | /* index and is_refs are already set */ |
1030 | 1.05M | if (!sp[1].is_refs) { |
1031 | 628k | nep.ptr = nptr; |
1032 | 628k | if (!(*ptp->mark) (&nep, pstate)) |
1033 | 479k | continue; |
1034 | 149k | new |= 1; |
1035 | 149k | } |
1036 | 574k | (++sp)->ptr = nptr; |
1037 | 574k | } |
1038 | 17.2M | return new; |
1039 | 17.2M | } |
1040 | | /* Link to, attempting to allocate if necessary, */ |
1041 | | /* another clump of mark stack. */ |
1042 | | static int |
1043 | | gc_extend_stack(gc_mark_stack * pms, gc_state_t * pstate) |
1044 | 1.07M | { |
1045 | 1.07M | if (pms->next == 0) { /* Try to allocate another segment. */ |
1046 | 27.9k | uint count; |
1047 | | |
1048 | 170k | for (count = ms_size_desired; count >= ms_size_min; count >>= 1) { |
1049 | 150k | pms->next = (gc_mark_stack *) |
1050 | 150k | gs_alloc_bytes_immovable(pstate->heap, |
1051 | 150k | sizeof(gc_mark_stack) + |
1052 | 150k | sizeof(ms_entry) * count, |
1053 | 150k | "gc mark stack"); |
1054 | 150k | if (pms->next != 0) |
1055 | 7.53k | break; |
1056 | 150k | } |
1057 | 27.9k | if (pms->next == 0) { /* The mark stack overflowed. */ |
1058 | 20.3k | ms_entry *sp = pms->entries + pms->count - 1; |
1059 | 20.3k | byte *cptr = sp->ptr; /* container */ |
1060 | 20.3k | clump_t *cp = gc_locate(cptr, pstate); |
1061 | 20.3k | int new = 1; |
1062 | | |
1063 | 20.3k | if (cp == 0) { /* We were tracing outside collectible */ |
1064 | | /* storage. This can't happen. */ |
1065 | 0 | if_debug1('6', "mark stack overflowed while outside collectible space at "PRI_INTPTR"!\n", |
1066 | 0 | (intptr_t)cptr); |
1067 | 0 | gs_abort(pstate->heap); |
1068 | 0 | } |
1069 | 20.3k | if (cptr < cp->rescan_bot) |
1070 | 958 | cp->rescan_bot = cptr, new = -1; |
1071 | 20.3k | if (cptr > cp->rescan_top) |
1072 | 4.01k | cp->rescan_top = cptr, new = -1; |
1073 | 20.3k | return new; |
1074 | 20.3k | } |
1075 | 7.53k | gc_init_mark_stack(pms->next, count); |
1076 | 7.53k | pms->next->prev = pms; |
1077 | 7.53k | pms->next->on_heap = true; |
1078 | 7.53k | } |
1079 | 1.05M | return 0; |
1080 | 1.07M | } |
1081 | | |
1082 | | /* Mark a struct. Return true if new mark. */ |
1083 | | static bool |
1084 | | ptr_struct_mark(enum_ptr_t *pep, gc_state_t * ignored) |
1085 | 6.28G | { |
1086 | 6.28G | obj_header_t *ptr = (obj_header_t *)pep->ptr; |
1087 | | |
1088 | 6.28G | if (ptr == 0) |
1089 | 1.01G | return false; |
1090 | 5.26G | ptr--; /* point to header */ |
1091 | 5.26G | if (!o_is_unmarked(ptr)) |
1092 | 4.96G | return false; |
1093 | 301M | o_mark(ptr); |
1094 | 301M | return true; |
1095 | 5.26G | } |
1096 | | |
1097 | | /* Mark a string. Return true if new mark. */ |
1098 | | static bool |
1099 | | ptr_string_mark(enum_ptr_t *pep, gc_state_t * gcst) |
1100 | 217M | { |
1101 | 217M | return gc_string_mark(pep->ptr, pep->size, true, gcst); |
1102 | 217M | } |
1103 | | |
1104 | | /* Mark a name. Return true if new mark. */ |
1105 | | static bool |
1106 | | ptr_name_index_mark(enum_ptr_t *pep, gc_state_t * gcst) |
1107 | 0 | { |
1108 | 0 | return names_mark_index(gcst->heap->gs_lib_ctx->gs_name_table, pep->size); |
1109 | 0 | } |
1110 | | |
1111 | | /* Finish tracing by marking names. */ |
1112 | | static bool |
1113 | | gc_trace_finish(gc_state_t * pstate) |
1114 | 325k | { |
1115 | 325k | name_table *nt = pstate->ntable; |
1116 | 325k | name_index_t nidx = 0; |
1117 | 325k | bool marked = false; |
1118 | | |
1119 | 2.37G | while ((nidx = names_next_valid_index(nt, nidx)) != 0) { |
1120 | 2.37G | name_string_t *pnstr = names_index_string_inline(nt, nidx); |
1121 | | |
1122 | 2.37G | if (pnstr->mark) { |
1123 | 2.28G | enum_ptr_t enst, ensst; |
1124 | | |
1125 | 2.28G | if (!pnstr->foreign_string && |
1126 | 2.28G | gc_string_mark(pnstr->string_bytes, pnstr->string_size, |
1127 | 2.06G | true, pstate) |
1128 | 2.28G | ) |
1129 | 2.06G | marked = true; |
1130 | 2.28G | enst.ptr = names_index_sub_table(nt, nidx); |
1131 | 2.28G | ensst.ptr = names_index_string_sub_table(nt, nidx); |
1132 | 2.28G | marked |= |
1133 | 2.28G | ptr_struct_mark(&enst, pstate) | |
1134 | 2.28G | ptr_struct_mark(&ensst, pstate); |
1135 | 2.28G | } |
1136 | 2.37G | } |
1137 | 325k | return marked; |
1138 | 325k | } |
1139 | | |
1140 | | /* ------ Relocation planning phase ------ */ |
1141 | | |
1142 | | /* Initialize the relocation information in the clump header. */ |
1143 | | static void |
1144 | | gc_init_reloc(clump_t * cp) |
1145 | 70.8M | { |
1146 | 70.8M | clump_head_t *chead = cp->chead; |
1147 | | |
1148 | 70.8M | chead->dest = cp->cbase; |
1149 | 70.8M | chead->free.o_back = |
1150 | 70.8M | offset_of(clump_head_t, free) >> obj_back_shift; |
1151 | 70.8M | chead->free.o_size = sizeof(obj_header_t); |
1152 | 70.8M | chead->free.o_nreloc = 0; |
1153 | 70.8M | } |
1154 | | |
1155 | | /* Set marks and clear relocation for clumps that won't be compacted. */ |
1156 | | static void |
1157 | | gc_clear_reloc(clump_t * cp) |
1158 | 2.27M | { |
1159 | 2.27M | byte *pfree = (byte *) & cp->chead->free; |
1160 | | |
1161 | 2.27M | gc_init_reloc(cp); |
1162 | 27.6M | SCAN_CLUMP_OBJECTS(cp) |
1163 | 27.6M | DO_ALL |
1164 | 27.6M | const struct_shared_procs_t *procs = |
1165 | 27.6M | pre->o_type->shared; |
1166 | | |
1167 | 27.6M | if (procs != 0) |
1168 | 13.7M | (*procs->clear_reloc) (pre, size); |
1169 | 27.6M | o_set_untraced(pre); |
1170 | 27.6M | pre->o_back = ((byte *) pre - pfree) >> obj_back_shift; |
1171 | 27.6M | END_OBJECTS_SCAN |
1172 | 2.27M | gc_strings_set_marks(cp, true); |
1173 | 2.27M | gc_strings_clear_reloc(cp); |
1174 | 2.27M | } |
1175 | | |
1176 | | /* Set the relocation for the objects in a clump. */ |
1177 | | /* This will never be called for a clump with any o_untraced objects. */ |
1178 | | static void |
1179 | | gc_objects_set_reloc(gc_state_t * gcst, clump_t * cp) |
1180 | 68.5M | { |
1181 | 68.5M | size_t reloc = 0; |
1182 | 68.5M | clump_head_t *chead = cp->chead; |
1183 | 68.5M | byte *pfree = (byte *) & chead->free; /* most recent free object */ |
1184 | | |
1185 | 68.5M | if_debug_clump('6', gcst->heap, "[6]setting reloc for clump", cp); |
1186 | 68.5M | gc_init_reloc(cp); |
1187 | 947M | SCAN_CLUMP_OBJECTS(cp) |
1188 | 947M | DO_ALL |
1189 | 947M | struct_proc_finalize((*finalize)); |
1190 | 947M | const struct_shared_procs_t *procs = |
1191 | 947M | pre->o_type->shared; |
1192 | | |
1193 | 947M | if ((procs == 0 ? o_is_unmarked(pre) : |
1194 | 947M | !(*procs->set_reloc) (pre, reloc, size)) |
1195 | 947M | ) { /* Free object */ |
1196 | 370M | reloc += sizeof(obj_header_t) + obj_align_round(size); |
1197 | 370M | if ((finalize = pre->o_type->finalize) != 0) { |
1198 | 12.9M | if_debug2m('u', gcst->heap, "[u]GC finalizing %s "PRI_INTPTR"\n", |
1199 | 12.9M | struct_type_name_string(pre->o_type), |
1200 | 12.9M | (intptr_t)(pre + 1)); |
1201 | 12.9M | (*finalize) (gcst->cur_mem, pre + 1); |
1202 | 12.9M | } |
1203 | 370M | pfree = (byte *) pre; |
1204 | 370M | pre->o_back = (pfree - (byte *) chead) >> obj_back_shift; |
1205 | 370M | pre->o_nreloc = reloc; |
1206 | 370M | if_debug3m('7', gcst->heap, " [7]at "PRI_INTPTR", unmarked %lu, new reloc = %u\n", |
1207 | 370M | (intptr_t)pre, (ulong) size, (unsigned int)reloc); |
1208 | 576M | } else { /* Useful object */ |
1209 | 576M | debug_check_object(pre, cp, gcst); |
1210 | 576M | pre->o_back = ((byte *) pre - pfree) >> obj_back_shift; |
1211 | 576M | } |
1212 | 947M | END_OBJECTS_SCAN |
1213 | | #ifdef DEBUG |
1214 | | if (reloc != 0) { |
1215 | | if_debug1m('6', gcst->heap, "[6]freed %u", (unsigned int)reloc); |
1216 | | if_debug_clump('6', gcst->heap, " in", cp); |
1217 | | } |
1218 | | #endif |
1219 | 68.5M | } |
1220 | | |
1221 | | /* ------ Relocation phase ------ */ |
1222 | | |
1223 | | /* Relocate the pointers in all the objects in a clump. */ |
1224 | | static void |
1225 | | gc_do_reloc(clump_t * cp, gs_ref_memory_t * mem, gc_state_t * pstate) |
1226 | 70.8M | { |
1227 | 70.8M | clump_head_t *chead = cp->chead; |
1228 | | |
1229 | 70.8M | if_debug_clump('6', (const gs_memory_t *)mem, "[6]relocating in clump", cp); |
1230 | 975M | SCAN_CLUMP_OBJECTS(cp) |
1231 | 975M | DO_ALL |
1232 | | #ifdef DEBUG |
1233 | | pstate->container = cp; |
1234 | | #endif |
1235 | | /* We need to relocate the pointers in an object iff */ |
1236 | | /* it is o_untraced, or it is a useful object. */ |
1237 | | /* An object is free iff its back pointer points to */ |
1238 | | /* the clump_head structure. */ |
1239 | 975M | if (o_is_untraced(pre) || |
1240 | 975M | pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead |
1241 | 975M | ) { |
1242 | 604M | struct_proc_reloc_ptrs((*proc)) = |
1243 | 604M | pre->o_type->reloc_ptrs; |
1244 | | |
1245 | 604M | if_debug3m('7', (const gs_memory_t *)mem, |
1246 | 604M | " [7]relocating ptrs in %s(%lu) "PRI_INTPTR"\n", |
1247 | 604M | struct_type_name_string(pre->o_type), |
1248 | 604M | (ulong)size, (intptr_t)pre); |
1249 | 604M | if (proc != 0) |
1250 | 604M | (*proc) (pre + 1, size, pre->o_type, pstate); |
1251 | 604M | } |
1252 | | #ifdef DEBUG |
1253 | | pstate->container = 0; |
1254 | | #endif |
1255 | 975M | END_OBJECTS_SCAN |
1256 | 70.8M | } |
1257 | | |
1258 | | /* Print pointer relocation if debugging. */ |
1259 | | /* We have to provide this procedure even if DEBUG is not defined, */ |
1260 | | /* in case one of the other GC modules was compiled with DEBUG. */ |
1261 | | const void * |
1262 | | print_reloc_proc(const void *obj, const char *cname, const void *robj) |
1263 | 0 | { |
1264 | 0 | if_debug3('9', " [9]relocate %s * "PRI_INTPTR" to "PRI_INTPTR"\n", |
1265 | 0 | cname, (intptr_t)obj, (intptr_t)robj); |
1266 | 0 | return robj; |
1267 | 0 | } |
1268 | | |
1269 | | /* Relocate a pointer to an (aligned) object. */ |
1270 | | /* See gsmemory.h for why the argument is const and the result is not. */ |
1271 | | static void /*obj_header_t */ * |
1272 | | igc_reloc_struct_ptr(const void /*obj_header_t */ *obj, gc_state_t * gcst) |
1273 | 7.78G | { |
1274 | 7.78G | const obj_header_t *const optr = (const obj_header_t *)obj; |
1275 | 7.78G | const void *robj; |
1276 | | |
1277 | 7.78G | if (obj == 0) { |
1278 | 979M | discard(print_reloc(obj, "NULL", 0)); |
1279 | 979M | return 0; |
1280 | 979M | } |
1281 | 7.78G | debug_check_object(optr - 1, NULL, gcst); |
1282 | 6.80G | { |
1283 | 6.80G | uint back = optr[-1].o_back; |
1284 | | |
1285 | 6.80G | if (back == o_untraced) |
1286 | 0 | robj = obj; |
1287 | 6.80G | else { |
1288 | | #ifdef DEBUG |
1289 | | /* Do some sanity checking. */ |
1290 | | clump_t *cp = gcst->container; |
1291 | | |
1292 | | if (cp != 0 && cp->cbase <= (byte *)obj && (byte *)obj <cp->ctop) { |
1293 | | if (back > (cp->ctop - cp->cbase) >> obj_back_shift) { |
1294 | | if_debug2('6', "Invalid back pointer %u at "PRI_INTPTR"!\n", |
1295 | | back, (intptr_t)obj); |
1296 | | gs_abort(NULL); |
1297 | | } |
1298 | | } else { |
1299 | | /* Pointed to unknown clump. Can't check it, sorry. */ |
1300 | | } |
1301 | | #endif |
1302 | 6.80G | { |
1303 | 6.80G | const obj_header_t *pfree = (const obj_header_t *) |
1304 | 6.80G | ((const char *)(optr - 1) - |
1305 | 6.80G | (back << obj_back_shift)); |
1306 | 6.80G | const clump_head_t *chead = (const clump_head_t *) |
1307 | 6.80G | ((const char *)pfree - |
1308 | 6.80G | (pfree->o_back << obj_back_shift)); |
1309 | | |
1310 | 6.80G | robj = chead->dest + |
1311 | 6.80G | ((const char *)obj - (const char *)(chead + 1) - |
1312 | 6.80G | pfree->o_nreloc); |
1313 | 6.80G | } |
1314 | 6.80G | } |
1315 | 6.80G | } |
1316 | | /* Use a severely deprecated pun to remove the const property. */ |
1317 | 6.80G | { |
1318 | 6.80G | union { const void *r; void *w; } u; |
1319 | | |
1320 | 6.80G | u.r = print_reloc(obj, struct_type_name_string(optr[-1].o_type), robj); |
1321 | 6.80G | return u.w; |
1322 | 7.78G | } |
1323 | 7.78G | } |
1324 | | |
1325 | | /* ------ Compaction phase ------ */ |
1326 | | |
1327 | | /* Compact the objects in a clump. */ |
1328 | | /* This will never be called for a clump with any o_untraced objects. */ |
1329 | | static void |
1330 | | gc_objects_compact(clump_t * cp, gc_state_t * gcst) |
1331 | 68.5M | { |
1332 | 68.5M | clump_head_t *chead = cp->chead; |
1333 | 68.5M | obj_header_t *dpre = (obj_header_t *) chead->dest; |
1334 | 68.5M | const gs_memory_t *cmem = gcst->spaces.memories.named.system->stable_memory; |
1335 | | |
1336 | 947M | SCAN_CLUMP_OBJECTS(cp) |
1337 | 947M | DO_ALL |
1338 | | /* An object is free iff its back pointer points to */ |
1339 | | /* the clump_head structure. */ |
1340 | 947M | if (pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead) { |
1341 | 576M | const struct_shared_procs_t *procs = pre->o_type->shared; |
1342 | | |
1343 | 576M | debug_check_object(pre, cp, gcst); |
1344 | 576M | if_debug4m('7', cmem, |
1345 | 576M | " [7]compacting %s "PRI_INTPTR"(%lu) to "PRI_INTPTR"\n", |
1346 | 576M | struct_type_name_string(pre->o_type), |
1347 | 576M | (intptr_t)pre, (ulong)size, (intptr_t)dpre); |
1348 | 576M | if (procs == 0) { |
1349 | 268M | if (dpre != pre) |
1350 | 19.2M | memmove(dpre, pre, |
1351 | 19.2M | sizeof(obj_header_t) + size); |
1352 | 268M | } else |
1353 | 308M | (*procs->compact) (cmem, pre, dpre, size); |
1354 | 576M | dpre = (obj_header_t *) |
1355 | 576M | ((byte *) dpre + obj_size_round(size)); |
1356 | 576M | } |
1357 | 947M | END_OBJECTS_SCAN |
1358 | 68.5M | if (cp->outer == 0 && chead->dest != cp->cbase) |
1359 | 0 | dpre = (obj_header_t *) cp->cbase; /* compacted this clump into another */ |
1360 | 68.5M | gs_alloc_fill(dpre, gs_alloc_fill_collected, cp->cbot - (byte *) dpre); |
1361 | 68.5M | cp->cbot = (byte *) dpre; |
1362 | 68.5M | cp->rcur = 0; |
1363 | 68.5M | cp->rtop = 0; /* just to be sure */ |
1364 | 68.5M | } |
1365 | | |
1366 | | /* ------ Cleanup ------ */ |
1367 | | |
1368 | | static splay_app_result_t |
1369 | | free_if_empty(clump_t *cp, void *arg) |
1370 | 68.5M | { |
1371 | 68.5M | gs_ref_memory_t * mem = (gs_ref_memory_t *)arg; |
1372 | | |
1373 | 68.5M | if (cp->cbot == cp->cbase && cp->ctop == cp->climit && |
1374 | 68.5M | cp->outer == 0 && cp->inner_count == 0) |
1375 | 3.78M | { |
1376 | 3.78M | alloc_free_clump(cp, mem); |
1377 | 3.78M | if (mem->cc == cp) |
1378 | 0 | mem->cc = NULL; |
1379 | 3.78M | } |
1380 | 68.5M | return SPLAY_APP_CONTINUE; |
1381 | 68.5M | } |
1382 | | |
1383 | | /* Free empty clumps. */ |
1384 | | static void |
1385 | | gc_free_empty_clumps(gs_ref_memory_t * mem) |
1386 | 6.18M | { |
1387 | | /* NOTE: Not in reverse order any more, so potentially |
1388 | | * not quite as good for crap allocators. */ |
1389 | 6.18M | clump_splay_app(mem->root, mem, free_if_empty, mem); |
1390 | 6.18M | } |
1391 | | |
1392 | | const gs_memory_t * gcst_get_memory_ptr(gc_state_t *gcst) |
1393 | 2.69G | { |
1394 | 2.69G | vm_spaces spaces = gcst->spaces; |
1395 | 2.69G | const gs_memory_t *cmem = space_system->stable_memory; |
1396 | 2.69G | return cmem; |
1397 | 2.69G | } |