/src/ghostpdl/psi/isave.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2024 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Save/restore manager for Ghostscript interpreter */ |
18 | | #include "ghost.h" |
19 | | #include "memory_.h" |
20 | | #include "ierrors.h" |
21 | | #include "gsexit.h" |
22 | | #include "gsstruct.h" |
23 | | #include "stream.h" /* for linking for forgetsave */ |
24 | | #include "iastate.h" |
25 | | #include "inamedef.h" |
26 | | #include "iname.h" |
27 | | #include "ipacked.h" |
28 | | #include "isave.h" |
29 | | #include "isstate.h" |
30 | | #include "gsstate.h" |
31 | | #include "store.h" /* for ref_assign */ |
32 | | #include "ivmspace.h" |
33 | | #include "igc.h" |
34 | | #include "gsutil.h" /* gs_next_ids prototype */ |
35 | | #include "icstate.h" |
36 | | |
37 | | /* Structure descriptor */ |
38 | | private_st_alloc_save(); |
39 | | |
40 | | /* Define the maximum amount of data we are willing to scan repeatedly -- */ |
41 | | /* see below for details. */ |
42 | | static const long max_repeated_scan = 100000; |
43 | | |
44 | | /* Define the minimum space for creating an inner clump. */ |
45 | | /* Must be at least sizeof(clump_head_t). */ |
46 | | static const long min_inner_clump_space = sizeof(clump_head_t) + 500; |
47 | | |
48 | | /* |
49 | | * The logic for saving and restoring the state is complex. |
50 | | * Both the changes to individual objects, and the overall state |
51 | | * of the memory manager, must be saved and restored. |
52 | | */ |
53 | | |
54 | | /* |
55 | | * To save the state of the memory manager: |
56 | | * Save the state of the current clump in which we are allocating. |
57 | | * Shrink all clumps to their inner unallocated region. |
58 | | * Save and reset the free block chains. |
59 | | * By doing this, we guarantee that no object older than the save |
60 | | * can be freed. |
61 | | * |
62 | | * To restore the state of the memory manager: |
63 | | * Free all clumps newer than the save, and the descriptors for |
64 | | * the inner clumps created by the save. |
65 | | * Make current the clump that was current at the time of the save. |
66 | | * Restore the state of the current clump. |
67 | | * |
68 | | * In addition to save ("start transaction") and restore ("abort transaction"), |
69 | | * we support forgetting a save ("commit transation"). To forget a save: |
70 | | * Reassign to the next outer save all clumps newer than the save. |
71 | | * Free the descriptors for the inners clump, updating their outer |
72 | | * clumps to reflect additional allocations in the inner clumps. |
73 | | * Concatenate the free block chains with those of the outer save. |
74 | | */ |
75 | | |
76 | | /* |
77 | | * For saving changes to individual objects, we add an "attribute" bit |
78 | | * (l_new) that logically belongs to the slot where the ref is stored, |
79 | | * not to the ref itself. The bit means "the contents of this slot |
80 | | * have been changed, or the slot was allocated, since the last save." |
81 | | * To keep track of changes since the save, we associate a chain of |
82 | | * <slot, old_contents> pairs that remembers the old contents of slots. |
83 | | * |
84 | | * When creating an object, if the save level is non-zero: |
85 | | * Set l_new in all slots. |
86 | | * |
87 | | * When storing into a slot, if the save level is non-zero: |
88 | | * If l_new isn't set, save the address and contents of the slot |
89 | | * on the current contents chain. |
90 | | * Set l_new after storing the new value. |
91 | | * |
92 | | * To do a save: |
93 | | * If the save level is non-zero: |
94 | | * Reset l_new in all slots on the contents chain, and in all |
95 | | * objects created since the previous save. |
96 | | * Push the head of the contents chain, and reset the chain to empty. |
97 | | * |
98 | | * To do a restore: |
99 | | * Check all the stacks to make sure they don't contain references |
100 | | * to objects created since the save. |
101 | | * Restore all the slots on the contents chain. |
102 | | * Pop the contents chain head. |
103 | | * If the save level is now non-zero: |
104 | | * Scan the newly restored contents chain, and set l_new in all |
105 | | * the slots it references. |
106 | | * Scan all objects created since the previous save, and set |
107 | | * l_new in all the slots of each object. |
108 | | * |
109 | | * To forget a save: |
110 | | * If the save level is greater than 1: |
111 | | * Set l_new as for a restore, per the next outer save. |
112 | | * Concatenate the next outer contents chain to the end of |
113 | | * the current one. |
114 | | * If the save level is 1: |
115 | | * Reset l_new as for a save. |
116 | | * Free the contents chain. |
117 | | */ |
118 | | |
119 | | /* |
120 | | * A consequence of the foregoing algorithms is that the cost of a save is |
121 | | * proportional to the total amount of data allocated since the previous |
122 | | * save. If a PostScript program reads in a large amount of setup code and |
123 | | * then uses save/restore heavily, each save/restore will be expensive. To |
124 | | * mitigate this, we check to see how much data we have scanned at this save |
125 | | * level: if it is large, we do a second, invisible save. This greatly |
126 | | * reduces the cost of inner saves, at the expense of possibly saving some |
127 | | * changes twice that otherwise would only have to be saved once. |
128 | | */ |
129 | | |
130 | | /* |
131 | | * The presence of global and local VM complicates the situation further. |
132 | | * There is a separate save chain and contents chain for each VM space. |
133 | | * When multiple contexts are fully implemented, save and restore will have |
134 | | * the following effects, according to the privacy status of the current |
135 | | * context's global and local VM: |
136 | | * Private global, private local: |
137 | | * The outermost save saves both global and local VM; |
138 | | * otherwise, save only saves local VM. |
139 | | * Shared global, private local: |
140 | | * Save only saves local VM. |
141 | | * Shared global, shared local: |
142 | | * Save only saves local VM, and suspends all other contexts |
143 | | * sharing the same local VM until the matching restore. |
144 | | * Since we do not currently implement multiple contexts, only the first |
145 | | * case is relevant. |
146 | | * |
147 | | * Note that when saving the contents of a slot, the choice of chain |
148 | | * is determined by the VM space in which the slot is allocated, |
149 | | * not by the current allocation mode. |
150 | | */ |
151 | | |
152 | | /* Tracing printout */ |
153 | | static void |
154 | | print_save(const char *str, uint spacen, const alloc_save_t *sav) |
155 | 2.69M | { |
156 | 2.69M | if_debug5('u', "[u]%s space %u "PRI_INTPTR": cdata = "PRI_INTPTR", id = %lu\n",\ |
157 | 2.69M | str, spacen, (intptr_t)sav, (intptr_t)sav->client_data, (ulong)sav->id); |
158 | 2.69M | } |
159 | | |
160 | | /* A link to igcref.c . */ |
161 | | ptr_proc_reloc(igc_reloc_ref_ptr_nocheck, ref_packed); |
162 | | |
163 | | static |
164 | | CLEAR_MARKS_PROC(change_clear_marks) |
165 | 29.9M | { |
166 | 29.9M | alloc_change_t *const ptr = (alloc_change_t *)vptr; |
167 | | |
168 | 29.9M | if (r_is_packed(&ptr->contents)) |
169 | 433k | r_clear_pmark((ref_packed *) & ptr->contents); |
170 | 29.5M | else |
171 | 29.5M | r_clear_attrs(&ptr->contents, l_mark); |
172 | 29.9M | } |
173 | | static |
174 | 117M | ENUM_PTRS_WITH(change_enum_ptrs, alloc_change_t *ptr) return 0; |
175 | 29.4M | ENUM_PTR(0, alloc_change_t, next); |
176 | 29.4M | case 1: |
177 | 29.4M | if (ptr->offset >= 0) |
178 | 2 | ENUM_RETURN((byte *) ptr->where - ptr->offset); |
179 | 29.4M | else |
180 | 29.4M | if (ptr->offset != AC_OFFSET_ALLOCATED) |
181 | 6.67M | ENUM_RETURN_REF(ptr->where); |
182 | 22.8M | else { |
183 | | /* Don't enumerate ptr->where, because it |
184 | | needs a special processing with |
185 | | alloc_save__filter_changes. */ |
186 | 22.8M | ENUM_RETURN(0); |
187 | 22.8M | } |
188 | 29.4M | case 2: |
189 | 29.4M | ENUM_RETURN_REF(&ptr->contents); |
190 | 117M | ENUM_PTRS_END |
191 | 9.51M | static RELOC_PTRS_WITH(change_reloc_ptrs, alloc_change_t *ptr) |
192 | 9.51M | { |
193 | 9.51M | RELOC_VAR(ptr->next); |
194 | 9.51M | switch (ptr->offset) { |
195 | 0 | case AC_OFFSET_STATIC: |
196 | 0 | break; |
197 | 6.67M | case AC_OFFSET_REF: |
198 | 6.67M | RELOC_REF_PTR_VAR(ptr->where); |
199 | 6.67M | break; |
200 | 2.83M | case AC_OFFSET_ALLOCATED: |
201 | | /* We know that ptr->where may point to an unmarked object |
202 | | because change_enum_ptrs skipped it, |
203 | | and we know it always points to same space |
204 | | because we took a special care when calling alloc_save_change_alloc. |
205 | | Therefore we must skip the check for the mark, |
206 | | which would happen if we call the regular relocation function |
207 | | igc_reloc_ref_ptr from RELOC_REF_PTR_VAR. |
208 | | Calling igc_reloc_ref_ptr_nocheck instead. */ |
209 | 2.83M | { /* A sanity check. */ |
210 | 2.83M | obj_header_t *pre = (obj_header_t *)ptr->where - 1; |
211 | | |
212 | 2.83M | if (pre->o_type != &st_refs) |
213 | 0 | gs_abort(gcst->heap); |
214 | 2.83M | } |
215 | 2.83M | if (ptr->where != 0 && !gcst->relocating_untraced) |
216 | 2.38M | ptr->where = igc_reloc_ref_ptr_nocheck(ptr->where, gcst); |
217 | 2.83M | break; |
218 | 2 | default: |
219 | 2 | { |
220 | 2 | byte *obj = (byte *) ptr->where - ptr->offset; |
221 | | |
222 | 2 | RELOC_VAR(obj); |
223 | 2 | ptr->where = (ref_packed *) (obj + ptr->offset); |
224 | 2 | } |
225 | 2 | break; |
226 | 9.51M | } |
227 | 9.51M | if (r_is_packed(&ptr->contents)) |
228 | 433k | r_clear_pmark((ref_packed *) & ptr->contents); |
229 | 9.08M | else { |
230 | 9.08M | RELOC_REF_VAR(ptr->contents); |
231 | 9.08M | r_clear_attrs(&ptr->contents, l_mark); |
232 | 9.08M | } |
233 | 9.51M | } |
234 | 9.51M | RELOC_PTRS_END |
235 | | gs_private_st_complex_only(st_alloc_change, alloc_change_t, "alloc_change", |
236 | | change_clear_marks, change_enum_ptrs, change_reloc_ptrs, 0); |
237 | | |
238 | | /* Debugging printout */ |
239 | | #ifdef DEBUG |
240 | | static void |
241 | | alloc_save_print(const gs_memory_t *mem, alloc_change_t * cp, bool print_current) |
242 | | { |
243 | | dmprintf2(mem, " "PRI_INTPTR"x: "PRI_INTPTR": ", (intptr_t) cp, (intptr_t) cp->where); |
244 | | if (r_is_packed(&cp->contents)) { |
245 | | if (print_current) |
246 | | dmprintf2(mem, "saved=%x cur=%x\n", *(ref_packed *) & cp->contents, |
247 | | *cp->where); |
248 | | else |
249 | | dmprintf1(mem, "%x\n", *(ref_packed *) & cp->contents); |
250 | | } else { |
251 | | if (print_current) |
252 | | dmprintf6(mem, "saved=%x %x %lx cur=%x %x %lx\n", |
253 | | r_type_attrs(&cp->contents), r_size(&cp->contents), |
254 | | (ulong) cp->contents.value.intval, |
255 | | r_type_attrs((ref *) cp->where), |
256 | | r_size((ref *) cp->where), |
257 | | (ulong) ((ref *) cp->where)->value.intval); |
258 | | else |
259 | | dmprintf3(mem, "%x %x %lx\n", |
260 | | r_type_attrs(&cp->contents), r_size(&cp->contents), |
261 | | (ulong) cp->contents.value.intval); |
262 | | } |
263 | | } |
264 | | #endif |
265 | | |
266 | | /* Forward references */ |
267 | | static int restore_resources(alloc_save_t *, gs_ref_memory_t *); |
268 | | static void restore_free(gs_ref_memory_t *); |
269 | | static int save_set_new(gs_ref_memory_t * mem, bool to_new, bool set_limit, ulong *pscanned); |
270 | | static int save_set_new_changes(gs_ref_memory_t *, bool, bool); |
271 | | static bool check_l_mark(void *obj); |
272 | | |
273 | | /* Initialize the save/restore machinery. */ |
274 | | void |
275 | | alloc_save_init(gs_dual_memory_t * dmem) |
276 | 162k | { |
277 | 162k | alloc_set_not_in_save(dmem); |
278 | 162k | } |
279 | | |
280 | | /* Record that we are in a save. */ |
281 | | static void |
282 | | alloc_set_masks(gs_dual_memory_t *dmem, uint new_mask, uint test_mask) |
283 | 2.19M | { |
284 | 2.19M | int i; |
285 | 2.19M | gs_ref_memory_t *mem; |
286 | | |
287 | 2.19M | dmem->new_mask = new_mask; |
288 | 2.19M | dmem->test_mask = test_mask; |
289 | 10.9M | for (i = 0; i < countof(dmem->spaces.memories.indexed); ++i) |
290 | 8.76M | if ((mem = dmem->spaces.memories.indexed[i]) != 0) { |
291 | 6.57M | mem->new_mask = new_mask, mem->test_mask = test_mask; |
292 | 6.57M | if (mem->stable_memory != (gs_memory_t *)mem) { |
293 | 4.38M | mem = (gs_ref_memory_t *)mem->stable_memory; |
294 | 4.38M | mem->new_mask = new_mask, mem->test_mask = test_mask; |
295 | 4.38M | } |
296 | 6.57M | } |
297 | 2.19M | } |
298 | | void |
299 | | alloc_set_in_save(gs_dual_memory_t *dmem) |
300 | 1.36M | { |
301 | 1.36M | alloc_set_masks(dmem, l_new, l_new); |
302 | 1.36M | } |
303 | | |
304 | | /* Record that we are not in a save. */ |
305 | | void |
306 | | alloc_set_not_in_save(gs_dual_memory_t *dmem) |
307 | 827k | { |
308 | 827k | alloc_set_masks(dmem, 0, ~0); |
309 | 827k | } |
310 | | |
311 | | /* Save the state. */ |
312 | | static alloc_save_t *alloc_save_space(gs_ref_memory_t *mem, |
313 | | gs_dual_memory_t *dmem, |
314 | | ulong sid); |
315 | | static void |
316 | | alloc_free_save(gs_ref_memory_t *mem, alloc_save_t *save, const char *scn) |
317 | 0 | { |
318 | 0 | gs_ref_memory_t save_mem; |
319 | 0 | save_mem = mem->saved->state; |
320 | 0 | gs_free_object((gs_memory_t *)mem, save, scn); |
321 | | /* Free any inner clump structures. This is the easiest way to do it. */ |
322 | 0 | restore_free(mem); |
323 | | /* Restore the 'saved' state - this pulls our object off the linked |
324 | | * list of states. Without this we hit a SEGV in the gc later. */ |
325 | 0 | *mem = save_mem; |
326 | 0 | } |
327 | | int |
328 | | alloc_save_state(gs_dual_memory_t * dmem, void *cdata, ulong *psid) |
329 | 1.18M | { |
330 | 1.18M | gs_ref_memory_t *lmem = dmem->space_local; |
331 | 1.18M | gs_ref_memory_t *gmem = dmem->space_global; |
332 | 1.18M | ulong sid = gs_next_ids((const gs_memory_t *)lmem->stable_memory, 2); |
333 | 1.18M | bool global = |
334 | 1.18M | lmem->save_level == 0 && gmem != lmem && |
335 | 1.18M | gmem->num_contexts == 1; |
336 | 1.18M | alloc_save_t *gsave = |
337 | 1.18M | (global ? alloc_save_space(gmem, dmem, sid + 1) : (alloc_save_t *) 0); |
338 | 1.18M | alloc_save_t *lsave = alloc_save_space(lmem, dmem, sid); |
339 | | |
340 | 1.18M | if (lsave == 0 || (global && gsave == 0)) { |
341 | | /* Only 1 of lsave or gsave will have been allocated, but |
342 | | * nevertheless (in case things change in future), we free |
343 | | * lsave, then gsave, so they 'pop' correctly when restoring |
344 | | * the mem->saved states. */ |
345 | 0 | if (lsave != 0) |
346 | 0 | alloc_free_save(lmem, lsave, "alloc_save_state(local save)"); |
347 | 0 | if (gsave != 0) |
348 | 0 | alloc_free_save(gmem, gsave, "alloc_save_state(global save)"); |
349 | 0 | return_error(gs_error_VMerror); |
350 | 0 | } |
351 | 1.18M | if (gsave != 0) { |
352 | 162k | gsave->client_data = 0; |
353 | 162k | print_save("save", gmem->space, gsave); |
354 | | /* Restore names when we do the local restore. */ |
355 | 162k | lsave->restore_names = gsave->restore_names; |
356 | 162k | gsave->restore_names = false; |
357 | 162k | } |
358 | 1.18M | lsave->id = sid; |
359 | 1.18M | lsave->client_data = cdata; |
360 | 1.18M | print_save("save", lmem->space, lsave); |
361 | | /* Reset the l_new attribute in all slots. The only slots that */ |
362 | | /* can have the attribute set are the ones on the changes chain, */ |
363 | | /* and ones in objects allocated since the last save. */ |
364 | 1.18M | if (lmem->save_level > 1) { |
365 | 1.02M | ulong scanned; |
366 | 1.02M | int code = save_set_new(&lsave->state, false, true, &scanned); |
367 | | |
368 | 1.02M | if (code < 0) |
369 | 0 | return code; |
370 | | #if 0 /* Disable invisible save levels. */ |
371 | | if ((lsave->state.total_scanned += scanned) > max_repeated_scan) { |
372 | | /* Do a second, invisible save. */ |
373 | | alloc_save_t *rsave; |
374 | | |
375 | | rsave = alloc_save_space(lmem, dmem, 0L); |
376 | | if (rsave != 0) { |
377 | | rsave->client_data = cdata; |
378 | | #if 0 /* Bug 688153 */ |
379 | | rsave->id = lsave->id; |
380 | | print_save("save", lmem->space, rsave); |
381 | | lsave->id = 0; /* mark as invisible */ |
382 | | rsave->state.save_level--; /* ditto */ |
383 | | lsave->client_data = 0; |
384 | | #else |
385 | | rsave->id = 0; /* mark as invisible */ |
386 | | print_save("save", lmem->space, rsave); |
387 | | rsave->state.save_level--; /* ditto */ |
388 | | rsave->client_data = 0; |
389 | | #endif |
390 | | /* Inherit the allocated space count -- */ |
391 | | /* we need this for triggering a GC. */ |
392 | | print_save("save", lmem->space, lsave); |
393 | | } |
394 | | } |
395 | | #endif |
396 | 1.02M | } |
397 | | |
398 | 1.18M | alloc_set_in_save(dmem); |
399 | 1.18M | *psid = sid; |
400 | 1.18M | return 0; |
401 | 1.18M | } |
402 | | /* Save the state of one space (global or local). */ |
403 | | static alloc_save_t * |
404 | | alloc_save_space(gs_ref_memory_t * mem, gs_dual_memory_t * dmem, ulong sid) |
405 | 1.34M | { |
406 | 1.34M | gs_ref_memory_t save_mem; |
407 | 1.34M | alloc_save_t *save; |
408 | 1.34M | clump_t *cp; |
409 | 1.34M | clump_t *new_cc = NULL; |
410 | 1.34M | clump_splay_walker sw; |
411 | | |
412 | 1.34M | save_mem = *mem; |
413 | 1.34M | alloc_close_clump(mem); |
414 | 1.34M | mem->cc = NULL; |
415 | 1.34M | gs_memory_status((gs_memory_t *) mem, &mem->previous_status); |
416 | 1.34M | ialloc_reset(mem); |
417 | | |
418 | | /* Create inner clumps wherever it's worthwhile. */ |
419 | | |
420 | 19.4M | for (cp = clump_splay_walk_init(&sw, &save_mem); cp != 0; cp = clump_splay_walk_fwd(&sw)) { |
421 | 18.0M | if (cp->ctop - cp->cbot > min_inner_clump_space) { |
422 | | /* Create an inner clump to cover only the unallocated part. */ |
423 | 8.50M | clump_t *inner = |
424 | 8.50M | gs_raw_alloc_struct_immovable(mem->non_gc_memory, &st_clump, |
425 | 8.50M | "alloc_save_space(inner)"); |
426 | | |
427 | 8.50M | if (inner == 0) |
428 | 0 | break; /* maybe should fail */ |
429 | 8.50M | alloc_init_clump(inner, cp->cbot, cp->ctop, cp->sreloc != 0, cp); |
430 | 8.50M | alloc_link_clump(inner, mem); |
431 | 8.50M | if_debug2m('u', (gs_memory_t *)mem, "[u]inner clump: cbot="PRI_INTPTR" ctop="PRI_INTPTR"\n", |
432 | 8.50M | (intptr_t) inner->cbot, (intptr_t) inner->ctop); |
433 | 8.50M | if (cp == save_mem.cc) |
434 | 1.22M | new_cc = inner; |
435 | 8.50M | } |
436 | 18.0M | } |
437 | 1.34M | mem->cc = new_cc; |
438 | 1.34M | alloc_open_clump(mem); |
439 | | |
440 | 1.34M | save = gs_alloc_struct((gs_memory_t *) mem, alloc_save_t, |
441 | 1.34M | &st_alloc_save, "alloc_save_space(save)"); |
442 | 1.34M | if_debug2m('u', (gs_memory_t *)mem, "[u]save space %u at "PRI_INTPTR"\n", |
443 | 1.34M | mem->space, (intptr_t) save); |
444 | 1.34M | if (save == 0) { |
445 | | /* Free the inner clump structures. This is the easiest way. */ |
446 | 0 | restore_free(mem); |
447 | 0 | *mem = save_mem; |
448 | 0 | return 0; |
449 | 0 | } |
450 | 1.34M | save->client_data = NULL; |
451 | 1.34M | save->state = save_mem; |
452 | 1.34M | save->spaces = dmem->spaces; |
453 | 1.34M | save->restore_names = (name_memory(mem) == (gs_memory_t *) mem); |
454 | 1.34M | save->is_current = (dmem->current == mem); |
455 | 1.34M | save->id = sid; |
456 | 1.34M | mem->saved = save; |
457 | 1.34M | if_debug2m('u', (gs_memory_t *)mem, "[u%u]file_save "PRI_INTPTR"\n", |
458 | 1.34M | mem->space, (intptr_t) mem->streams); |
459 | 1.34M | mem->streams = 0; |
460 | 1.34M | mem->total_scanned = 0; |
461 | 1.34M | mem->total_scanned_after_compacting = 0; |
462 | 1.34M | if (sid) |
463 | 1.34M | mem->save_level++; |
464 | 1.34M | return save; |
465 | 1.34M | } |
466 | | |
467 | | /* Record a state change that must be undone for restore, */ |
468 | | /* and mark it as having been saved. */ |
469 | | int |
470 | | alloc_save_change_in(gs_ref_memory_t *mem, const ref * pcont, |
471 | | ref_packed * where, client_name_t cname) |
472 | 2.13G | { |
473 | 2.13G | register alloc_change_t *cp; |
474 | | |
475 | 2.13G | if (mem->new_mask == 0) |
476 | 2.13G | return 0; /* no saving */ |
477 | 5.77M | cp = gs_alloc_struct((gs_memory_t *)mem, alloc_change_t, |
478 | 5.77M | &st_alloc_change, "alloc_save_change"); |
479 | 5.77M | if (cp == 0) |
480 | 0 | return -1; |
481 | 5.77M | cp->next = mem->changes; |
482 | 5.77M | cp->where = where; |
483 | 5.77M | if (pcont == NULL) |
484 | 0 | cp->offset = AC_OFFSET_STATIC; |
485 | 5.77M | else if (r_is_array(pcont) || r_has_type(pcont, t_dictionary)) |
486 | 5.77M | cp->offset = AC_OFFSET_REF; |
487 | 2 | else if (r_is_struct(pcont)) |
488 | 2 | cp->offset = (byte *) where - (byte *) pcont->value.pstruct; |
489 | 0 | else { |
490 | 0 | if_debug3('u', "Bad type %u for save! pcont = "PRI_INTPTR", where = "PRI_INTPTR"\n", |
491 | 0 | r_type(pcont), (intptr_t) pcont, (intptr_t) where); |
492 | 0 | gs_abort((const gs_memory_t *)mem); |
493 | 0 | } |
494 | 5.77M | if (r_is_packed(where)) |
495 | 583k | *(ref_packed *)&cp->contents = *where; |
496 | 5.18M | else { |
497 | 5.18M | ref_assign_inline(&cp->contents, (ref *) where); |
498 | 5.18M | r_set_attrs((ref *) where, l_new); |
499 | 5.18M | } |
500 | 5.77M | mem->changes = cp; |
501 | | #ifdef DEBUG |
502 | | if (gs_debug_c('U')) { |
503 | | dmlprintf1((const gs_memory_t *)mem, "[U]save(%s)", client_name_string(cname)); |
504 | | alloc_save_print((const gs_memory_t *)mem, cp, false); |
505 | | } |
506 | | #endif |
507 | 5.77M | return 0; |
508 | 5.77M | } |
509 | | int |
510 | | alloc_save_change(gs_dual_memory_t * dmem, const ref * pcont, |
511 | | ref_packed * where, client_name_t cname) |
512 | 2.13G | { |
513 | 2.13G | gs_ref_memory_t *mem = |
514 | 2.13G | (pcont == NULL ? dmem->space_local : |
515 | 2.13G | dmem->spaces_indexed[r_space(pcont) >> r_space_shift]); |
516 | | |
517 | 2.13G | return alloc_save_change_in(mem, pcont, where, cname); |
518 | 2.13G | } |
519 | | |
520 | | /* Allocate a structure for recording an allocation event. */ |
521 | | int |
522 | | alloc_save_change_alloc(gs_ref_memory_t *mem, client_name_t cname, alloc_change_t **pcp) |
523 | 222M | { |
524 | 222M | register alloc_change_t *cp; |
525 | | |
526 | 222M | if (mem->new_mask == 0) |
527 | 191M | return 0; /* no saving */ |
528 | 31.0M | cp = gs_alloc_struct((gs_memory_t *)mem, alloc_change_t, |
529 | 31.0M | &st_alloc_change, "alloc_save_change"); |
530 | 31.0M | if (cp == 0) |
531 | 0 | return_error(gs_error_VMerror); |
532 | 31.0M | cp->next = mem->changes; |
533 | 31.0M | cp->where = 0; |
534 | 31.0M | cp->offset = AC_OFFSET_ALLOCATED; |
535 | 31.0M | make_null(&cp->contents); |
536 | 31.0M | *pcp = cp; |
537 | 31.0M | return 1; |
538 | 31.0M | } |
539 | | |
540 | | /* Remove an AC_OFFSET_ALLOCATED element. */ |
541 | | void |
542 | | alloc_save_remove(gs_ref_memory_t *mem, ref_packed *obj, client_name_t cname) |
543 | 72.4k | { |
544 | 72.4k | alloc_change_t **cpp = &mem->changes; |
545 | | |
546 | 6.06M | for (; *cpp != NULL;) { |
547 | 5.99M | alloc_change_t *cp = *cpp; |
548 | | |
549 | 5.99M | if (cp->offset == AC_OFFSET_ALLOCATED && cp->where == obj) { |
550 | 54.1k | if (mem->scan_limit == cp) |
551 | 0 | mem->scan_limit = cp->next; |
552 | 54.1k | *cpp = cp->next; |
553 | 54.1k | gs_free_object((gs_memory_t *)mem, cp, "alloc_save_remove"); |
554 | 54.1k | } else |
555 | 5.93M | cpp = &(*cpp)->next; |
556 | 5.99M | } |
557 | 72.4k | } |
558 | | |
559 | | /* Filter save change lists. */ |
560 | | static inline void |
561 | | alloc_save__filter_changes_in_space(gs_ref_memory_t *mem) |
562 | 6.18M | { |
563 | | /* This is a special function, which is called |
564 | | from the garbager after setting marks and before collecting |
565 | | unused space. Therefore it just resets marks for |
566 | | elements being released instead releasing them really. */ |
567 | 6.18M | alloc_change_t **cpp = &mem->changes; |
568 | | |
569 | 35.1M | for (; *cpp != NULL; ) { |
570 | 28.9M | alloc_change_t *cp = *cpp; |
571 | | |
572 | 28.9M | if (cp->offset == AC_OFFSET_ALLOCATED && !check_l_mark(cp->where)) { |
573 | 19.9M | obj_header_t *pre = (obj_header_t *)cp - 1; |
574 | | |
575 | 19.9M | *cpp = cp->next; |
576 | 19.9M | cp->where = 0; |
577 | 19.9M | if (mem->scan_limit == cp) |
578 | 110k | mem->scan_limit = cp->next; |
579 | 19.9M | o_set_unmarked(pre); |
580 | 19.9M | } else |
581 | 9.01M | cpp = &(*cpp)->next; |
582 | 28.9M | } |
583 | 6.18M | } |
584 | | |
585 | | /* Filter save change lists. */ |
586 | | void |
587 | | alloc_save__filter_changes(gs_ref_memory_t *memory) |
588 | 1.65M | { |
589 | 1.65M | gs_ref_memory_t *mem = memory; |
590 | | |
591 | 7.83M | for (; mem; mem = &mem->saved->state) |
592 | 6.18M | alloc_save__filter_changes_in_space(mem); |
593 | 1.65M | } |
594 | | |
595 | | /* Return (the id of) the innermost externally visible save object, */ |
596 | | /* i.e., the innermost save with a non-zero ID. */ |
597 | | ulong |
598 | | alloc_save_current_id(const gs_dual_memory_t * dmem) |
599 | 1.18M | { |
600 | 1.18M | const alloc_save_t *save = dmem->space_local->saved; |
601 | | |
602 | 1.18M | while (save != 0 && save->id == 0) |
603 | 0 | save = save->state.saved; |
604 | 1.18M | if (save) |
605 | 1.18M | return save->id; |
606 | | |
607 | | /* This should never happen, if it does, return a totally |
608 | | * impossible value. |
609 | | */ |
610 | 0 | return (ulong)-1; |
611 | 1.18M | } |
612 | | alloc_save_t * |
613 | | alloc_save_current(const gs_dual_memory_t * dmem) |
614 | 1.18M | { |
615 | 1.18M | return alloc_find_save(dmem, alloc_save_current_id(dmem)); |
616 | 1.18M | } |
617 | | |
618 | | /* Test whether a reference would be invalidated by a restore. */ |
619 | | bool |
620 | | alloc_is_since_save(const void *vptr, const alloc_save_t * save) |
621 | 7.62M | { |
622 | | /* A reference postdates a save iff it is in a clump allocated */ |
623 | | /* since the save (including any carried-over inner clumps). */ |
624 | | |
625 | 7.62M | const char *const ptr = (const char *)vptr; |
626 | 7.62M | register gs_ref_memory_t *mem = save->space_local; |
627 | | |
628 | 7.62M | if_debug2m('U', (gs_memory_t *)mem, "[U]is_since_save "PRI_INTPTR", "PRI_INTPTR":\n", |
629 | 7.62M | (intptr_t) ptr, (intptr_t) save); |
630 | 7.62M | if (mem->saved == 0) { /* This is a special case, the final 'restore' from */ |
631 | | /* alloc_restore_all. */ |
632 | 162k | return true; |
633 | 162k | } |
634 | | /* Check against clumps allocated since the save. */ |
635 | | /* (There may have been intermediate saves as well.) */ |
636 | 7.47M | for (;; mem = &mem->saved->state) { |
637 | 7.47M | if_debug1m('U', (gs_memory_t *)mem, "[U]checking mem="PRI_INTPTR"\n", (intptr_t) mem); |
638 | 7.47M | if (ptr_is_within_mem_clumps(ptr, mem)) { |
639 | 27 | if_debug0m('U', (gs_memory_t *)mem, "[U+]found\n"); |
640 | 27 | return true; |
641 | 27 | } |
642 | 7.47M | if_debug1m('U', (gs_memory_t *)mem, "[U-]not in any chunks belonging to "PRI_INTPTR"\n", (intptr_t) mem); |
643 | 7.47M | if (mem->saved == save) { /* We've checked all the more recent saves, */ |
644 | | /* must be OK. */ |
645 | 7.46M | break; |
646 | 7.46M | } |
647 | 7.47M | } |
648 | | |
649 | | /* |
650 | | * If we're about to do a global restore (a restore to the level 0), |
651 | | * and there is only one context using this global VM |
652 | | * (the normal case, in which global VM is saved by the |
653 | | * outermost save), we also have to check the global save. |
654 | | * Global saves can't be nested, which makes things easy. |
655 | | */ |
656 | 7.46M | if (save->state.save_level == 0 /* Restoring to save level 0 - see bug 688157, 688161 */ && |
657 | 7.46M | (mem = save->space_global) != save->space_local && |
658 | 7.46M | save->space_global->num_contexts == 1 |
659 | 7.46M | ) { |
660 | 176k | if_debug1m('U', (gs_memory_t *)mem, "[U]checking global mem="PRI_INTPTR"\n", (intptr_t) mem); |
661 | 176k | if (ptr_is_within_mem_clumps(ptr, mem)) { |
662 | 0 | if_debug0m('U', (gs_memory_t *)mem, "[U+] found\n"); |
663 | 0 | return true; |
664 | 0 | } |
665 | 176k | } |
666 | 7.46M | return false; |
667 | | |
668 | 7.46M | #undef ptr |
669 | 7.46M | } |
670 | | |
671 | | /* Test whether a name would be invalidated by a restore. */ |
672 | | bool |
673 | | alloc_name_is_since_save(const gs_memory_t *mem, |
674 | | const ref * pnref, const alloc_save_t * save) |
675 | 296k | { |
676 | 296k | const name_string_t *pnstr; |
677 | | |
678 | 296k | if (!save->restore_names) |
679 | 296k | return false; |
680 | 0 | pnstr = names_string_inline(mem->gs_lib_ctx->gs_name_table, pnref); |
681 | 0 | if (pnstr->foreign_string) |
682 | 0 | return false; |
683 | 0 | return alloc_is_since_save(pnstr->string_bytes, save); |
684 | 0 | } |
685 | | bool |
686 | | alloc_name_index_is_since_save(const gs_memory_t *mem, |
687 | | uint nidx, const alloc_save_t *save) |
688 | 0 | { |
689 | 0 | const name_string_t *pnstr; |
690 | |
|
691 | 0 | if (!save->restore_names) |
692 | 0 | return false; |
693 | 0 | pnstr = names_index_string_inline(mem->gs_lib_ctx->gs_name_table, nidx); |
694 | 0 | if (pnstr->foreign_string) |
695 | 0 | return false; |
696 | 0 | return alloc_is_since_save(pnstr->string_bytes, save); |
697 | 0 | } |
698 | | |
699 | | /* Check whether any names have been created since a given save */ |
700 | | /* that might be released by the restore. */ |
701 | | bool |
702 | | alloc_any_names_since_save(const alloc_save_t * save) |
703 | 1.34M | { |
704 | 1.34M | return save->restore_names; |
705 | 1.34M | } |
706 | | |
707 | | /* Get the saved state with a given ID. */ |
708 | | alloc_save_t * |
709 | | alloc_find_save(const gs_dual_memory_t * dmem, ulong sid) |
710 | 3.06M | { |
711 | 3.06M | alloc_save_t *sprev = dmem->space_local->saved; |
712 | | |
713 | 3.06M | if (sid == 0) |
714 | 0 | return 0; /* invalid id */ |
715 | 105M | while (sprev != 0) { |
716 | 105M | if (sprev->id == sid) |
717 | 3.06M | return sprev; |
718 | 102M | sprev = sprev->state.saved; |
719 | 102M | } |
720 | 0 | return 0; |
721 | 3.06M | } |
722 | | |
723 | | /* Get the client data from a saved state. */ |
724 | | void * |
725 | | alloc_save_client_data(const alloc_save_t * save) |
726 | 1.18M | { |
727 | 1.18M | return save->client_data; |
728 | 1.18M | } |
729 | | |
730 | | /* |
731 | | * Do one step of restoring the state. The client is responsible for |
732 | | * calling alloc_find_save to get the save object, and for ensuring that |
733 | | * there are no surviving pointers for which alloc_is_since_save is true. |
734 | | * Return true if the argument was the innermost save, in which case |
735 | | * this is the last (or only) step. |
736 | | * Note that "one step" may involve multiple internal steps, |
737 | | * if this is the outermost restore (which requires restoring both local |
738 | | * and global VM) or if we created extra save levels to reduce scanning. |
739 | | */ |
740 | | static void restore_finalize(gs_ref_memory_t *); |
741 | | static void restore_space(gs_ref_memory_t *, gs_dual_memory_t *); |
742 | | |
743 | | int |
744 | | alloc_restore_step_in(gs_dual_memory_t *dmem, alloc_save_t * save) |
745 | 1.18M | { |
746 | | /* Get save->space_* now, because the save object will be freed. */ |
747 | 1.18M | gs_ref_memory_t *lmem = save->space_local; |
748 | 1.18M | gs_ref_memory_t *gmem = save->space_global; |
749 | 1.18M | gs_ref_memory_t *mem = lmem; |
750 | 1.18M | alloc_save_t *sprev; |
751 | 1.18M | int code; |
752 | | |
753 | | /* Finalize all objects before releasing resources or undoing changes. */ |
754 | 1.18M | do { |
755 | 1.18M | ulong sid; |
756 | | |
757 | 1.18M | sprev = mem->saved; |
758 | 1.18M | sid = sprev->id; |
759 | 1.18M | restore_finalize(mem); /* finalize objects */ |
760 | 1.18M | mem = &sprev->state; |
761 | 1.18M | if (sid != 0) |
762 | 1.18M | break; |
763 | 1.18M | } |
764 | 1.18M | while (sprev != save); |
765 | 1.18M | if (mem->save_level == 0) { |
766 | | /* This is the outermost save, which might also */ |
767 | | /* need to restore global VM. */ |
768 | 162k | mem = gmem; |
769 | 162k | if (mem != lmem && mem->saved != 0) { |
770 | 162k | restore_finalize(mem); |
771 | 162k | } |
772 | 162k | } |
773 | | |
774 | | /* Do one (externally visible) step of restoring the state. */ |
775 | 1.18M | mem = lmem; |
776 | 1.18M | do { |
777 | 1.18M | ulong sid; |
778 | | |
779 | 1.18M | sprev = mem->saved; |
780 | 1.18M | sid = sprev->id; |
781 | 1.18M | code = restore_resources(sprev, mem); /* release other resources */ |
782 | 1.18M | if (code < 0) |
783 | 0 | return code; |
784 | 1.18M | restore_space(mem, dmem); /* release memory */ |
785 | 1.18M | if (sid != 0) |
786 | 1.18M | break; |
787 | 1.18M | } |
788 | 1.18M | while (sprev != save); |
789 | | |
790 | 1.18M | if (mem->save_level == 0) { |
791 | | /* This is the outermost save, which might also */ |
792 | | /* need to restore global VM. */ |
793 | 162k | mem = gmem; |
794 | 162k | if (mem != lmem && mem->saved != 0) { |
795 | 162k | code = restore_resources(mem->saved, mem); |
796 | 162k | if (code < 0) |
797 | 0 | return code; |
798 | 162k | restore_space(mem, dmem); |
799 | 162k | } |
800 | 162k | alloc_set_not_in_save(dmem); |
801 | 1.02M | } else { /* Set the l_new attribute in all slots that are now new. */ |
802 | 1.02M | ulong scanned; |
803 | | |
804 | 1.02M | code = save_set_new(mem, true, false, &scanned); |
805 | 1.02M | if (code < 0) |
806 | 0 | return code; |
807 | 1.02M | } |
808 | | |
809 | 1.18M | return sprev == save; |
810 | 1.18M | } |
811 | | /* Restore the memory of one space, by undoing changes and freeing */ |
812 | | /* memory allocated since the save. */ |
813 | | static void |
814 | | restore_space(gs_ref_memory_t * mem, gs_dual_memory_t *dmem) |
815 | 1.34M | { |
816 | 1.34M | alloc_save_t *save = mem->saved; |
817 | 1.34M | alloc_save_t saved; |
818 | | |
819 | 1.34M | print_save("restore", mem->space, save); |
820 | | |
821 | | /* Undo changes since the save. */ |
822 | 1.34M | { |
823 | 1.34M | register alloc_change_t *cp = mem->changes; |
824 | | |
825 | 18.0M | while (cp) { |
826 | | #ifdef DEBUG |
827 | | if (gs_debug_c('U')) { |
828 | | dmlputs((const gs_memory_t *)mem, "[U]restore"); |
829 | | alloc_save_print((const gs_memory_t *)mem, cp, true); |
830 | | } |
831 | | #endif |
832 | 16.7M | if (cp->offset == AC_OFFSET_ALLOCATED) |
833 | 16.7M | DO_NOTHING; |
834 | 5.77M | else |
835 | 5.77M | if (r_is_packed(&cp->contents)) |
836 | 583k | *cp->where = *(ref_packed *) & cp->contents; |
837 | 5.18M | else |
838 | 5.18M | ref_assign_inline((ref *) cp->where, &cp->contents); |
839 | 16.7M | cp = cp->next; |
840 | 16.7M | } |
841 | 1.34M | } |
842 | | |
843 | | /* Free memory allocated since the save. */ |
844 | | /* Note that this frees all clumps except the inner ones */ |
845 | | /* belonging to this level. */ |
846 | 1.34M | saved = *save; |
847 | 1.34M | restore_free(mem); |
848 | | |
849 | | /* Restore the allocator state. */ |
850 | 1.34M | { |
851 | 1.34M | int num_contexts = mem->num_contexts; /* don't restore */ |
852 | | |
853 | 1.34M | *mem = saved.state; |
854 | 1.34M | mem->num_contexts = num_contexts; |
855 | 1.34M | } |
856 | 1.34M | alloc_open_clump(mem); |
857 | | |
858 | | /* Make the allocator current if it was current before the save. */ |
859 | 1.34M | if (saved.is_current) { |
860 | 1.18M | dmem->current = mem; |
861 | 1.18M | dmem->current_space = mem->space; |
862 | 1.18M | } |
863 | 1.34M | } |
864 | | |
865 | | /* Restore to the initial state, releasing all resources. */ |
866 | | /* The allocator is no longer usable after calling this routine! */ |
867 | | int |
868 | | alloc_restore_all(i_ctx_t *i_ctx_p) |
869 | 162k | { |
870 | | /* |
871 | | * Save the memory pointers, since freeing space_local will also |
872 | | * free dmem itself. |
873 | | */ |
874 | 162k | gs_ref_memory_t *lmem = idmemory->space_local; |
875 | 162k | gs_ref_memory_t *gmem = idmemory->space_global; |
876 | 162k | gs_ref_memory_t *smem = idmemory->space_system; |
877 | | |
878 | 162k | gs_ref_memory_t *mem; |
879 | 162k | int code; |
880 | | |
881 | | /* Restore to a state outside any saves. */ |
882 | 1.23M | while (lmem->save_level != 0) { |
883 | 1.07M | vm_save_t *vmsave = alloc_save_client_data(alloc_save_current(idmemory)); |
884 | 1.07M | if (vmsave->gsave) { |
885 | 1.07M | gs_grestoreall_for_restore(i_ctx_p->pgs, vmsave->gsave); |
886 | 1.07M | } |
887 | 1.07M | vmsave->gsave = 0; |
888 | 1.07M | code = alloc_restore_step_in(idmemory, lmem->saved); |
889 | | |
890 | 1.07M | if (code < 0) |
891 | 0 | return code; |
892 | 1.07M | } |
893 | | |
894 | | /* Finalize memory. */ |
895 | 162k | restore_finalize(lmem); |
896 | 162k | if ((mem = (gs_ref_memory_t *)lmem->stable_memory) != lmem) |
897 | 162k | restore_finalize(mem); |
898 | 162k | if (gmem != lmem && gmem->num_contexts == 1) { |
899 | 162k | restore_finalize(gmem); |
900 | 162k | if ((mem = (gs_ref_memory_t *)gmem->stable_memory) != gmem) |
901 | 162k | restore_finalize(mem); |
902 | 162k | } |
903 | 162k | restore_finalize(smem); |
904 | | |
905 | | /* Release resources other than memory, using fake */ |
906 | | /* save and memory objects. */ |
907 | 162k | { |
908 | 162k | alloc_save_t empty_save; |
909 | | |
910 | 162k | empty_save.spaces = idmemory->spaces; |
911 | 162k | empty_save.restore_names = false; /* don't bother to release */ |
912 | 162k | code = restore_resources(&empty_save, NULL); |
913 | 162k | if (code < 0) |
914 | 0 | return code; |
915 | 162k | } |
916 | | |
917 | | /* Finally, release memory. */ |
918 | 162k | restore_free(lmem); |
919 | 162k | if ((mem = (gs_ref_memory_t *)lmem->stable_memory) != lmem) |
920 | 162k | restore_free(mem); |
921 | 162k | if (gmem != lmem) { |
922 | 162k | if (!--(gmem->num_contexts)) { |
923 | 162k | restore_free(gmem); |
924 | 162k | if ((mem = (gs_ref_memory_t *)gmem->stable_memory) != gmem) |
925 | 162k | restore_free(mem); |
926 | 162k | } |
927 | 162k | } |
928 | 162k | restore_free(smem); |
929 | 162k | return 0; |
930 | 162k | } |
931 | | |
932 | | /* |
933 | | * Finalize objects that will be freed by a restore. |
934 | | * Note that we must temporarily disable the freeing operations |
935 | | * of the allocator while doing this. |
936 | | */ |
937 | | static void |
938 | | restore_finalize(gs_ref_memory_t * mem) |
939 | 2.16M | { |
940 | 2.16M | clump_t *cp; |
941 | 2.16M | clump_splay_walker sw; |
942 | | |
943 | 2.16M | alloc_close_clump(mem); |
944 | 2.16M | gs_enable_free((gs_memory_t *) mem, false); |
945 | 35.7M | for (cp = clump_splay_walk_bwd_init(&sw, mem); cp != 0; cp = clump_splay_walk_bwd(&sw)) { |
946 | 286M | SCAN_CLUMP_OBJECTS(cp) |
947 | 286M | DO_ALL |
948 | 286M | struct_proc_finalize((*finalize)) = |
949 | 286M | pre->o_type->finalize; |
950 | 286M | if (finalize != 0) { |
951 | 9.70M | if_debug2m('u', (gs_memory_t *)mem, "[u]restore finalizing %s "PRI_INTPTR"\n", |
952 | 9.70M | struct_type_name_string(pre->o_type), |
953 | 9.70M | (intptr_t) (pre + 1)); |
954 | 9.70M | (*finalize) ((gs_memory_t *) mem, pre + 1); |
955 | 9.70M | } |
956 | 286M | END_OBJECTS_SCAN |
957 | 33.5M | } |
958 | 2.16M | gs_enable_free((gs_memory_t *) mem, true); |
959 | 2.16M | } |
960 | | |
961 | | /* Release resources for a restore */ |
962 | | static int |
963 | | restore_resources(alloc_save_t * sprev, gs_ref_memory_t * mem) |
964 | 1.51M | { |
965 | 1.51M | int code; |
966 | | #ifdef DEBUG |
967 | | if (mem) { |
968 | | /* Note restoring of the file list. */ |
969 | | if_debug4m('u', (gs_memory_t *)mem, "[u%u]file_restore "PRI_INTPTR" => "PRI_INTPTR" for "PRI_INTPTR"\n", |
970 | | mem->space, (intptr_t)mem->streams, |
971 | | (intptr_t)sprev->state.streams, (intptr_t)sprev); |
972 | | } |
973 | | #endif |
974 | | |
975 | | /* Remove entries from font and character caches. */ |
976 | 1.51M | code = font_restore(sprev); |
977 | 1.51M | if (code < 0) |
978 | 0 | return code; |
979 | | |
980 | | /* Adjust the name table. */ |
981 | 1.51M | if (sprev->restore_names) |
982 | 0 | names_restore(mem->gs_lib_ctx->gs_name_table, sprev); |
983 | 1.51M | return 0; |
984 | 1.51M | } |
985 | | |
986 | | /* Release memory for a restore. */ |
987 | | static void |
988 | | restore_free(gs_ref_memory_t * mem) |
989 | 2.16M | { |
990 | | /* Free clumps allocated since the save. */ |
991 | 2.16M | gs_free_all((gs_memory_t *) mem); |
992 | 2.16M | } |
993 | | |
994 | | /* Forget a save, by merging this level with the next outer one. */ |
995 | | static void file_forget_save(gs_ref_memory_t *); |
996 | | static void combine_space(gs_ref_memory_t *); |
997 | | static void forget_changes(gs_ref_memory_t *); |
998 | | int |
999 | | alloc_forget_save_in(gs_dual_memory_t *dmem, alloc_save_t * save) |
1000 | 0 | { |
1001 | 0 | gs_ref_memory_t *mem = save->space_local; |
1002 | 0 | alloc_save_t *sprev; |
1003 | 0 | ulong scanned; |
1004 | 0 | int code; |
1005 | |
|
1006 | 0 | print_save("forget_save", mem->space, save); |
1007 | | |
1008 | | /* Iteratively combine the current level with the previous one. */ |
1009 | 0 | do { |
1010 | 0 | sprev = mem->saved; |
1011 | 0 | if (sprev->id != 0) |
1012 | 0 | mem->save_level--; |
1013 | 0 | if (mem->save_level != 0) { |
1014 | 0 | alloc_change_t *chp = mem->changes; |
1015 | |
|
1016 | 0 | code = save_set_new(&sprev->state, true, false, &scanned); |
1017 | 0 | if (code < 0) |
1018 | 0 | return code; |
1019 | | /* Concatenate the changes chains. */ |
1020 | 0 | if (chp == 0) |
1021 | 0 | mem->changes = sprev->state.changes; |
1022 | 0 | else { |
1023 | 0 | while (chp->next != 0) |
1024 | 0 | chp = chp->next; |
1025 | 0 | chp->next = sprev->state.changes; |
1026 | 0 | } |
1027 | 0 | file_forget_save(mem); |
1028 | 0 | combine_space(mem); /* combine memory */ |
1029 | 0 | } else { |
1030 | 0 | forget_changes(mem); |
1031 | 0 | code = save_set_new(mem, false, false, &scanned); |
1032 | 0 | if (code < 0) |
1033 | 0 | return code; |
1034 | 0 | file_forget_save(mem); |
1035 | 0 | combine_space(mem); /* combine memory */ |
1036 | | /* This is the outermost save, which might also */ |
1037 | | /* need to combine global VM. */ |
1038 | 0 | mem = save->space_global; |
1039 | 0 | if (mem != save->space_local && mem->saved != 0) { |
1040 | 0 | forget_changes(mem); |
1041 | 0 | code = save_set_new(mem, false, false, &scanned); |
1042 | 0 | if (code < 0) |
1043 | 0 | return code; |
1044 | 0 | file_forget_save(mem); |
1045 | 0 | combine_space(mem); |
1046 | 0 | } |
1047 | 0 | alloc_set_not_in_save(dmem); |
1048 | 0 | break; /* must be outermost */ |
1049 | 0 | } |
1050 | 0 | } |
1051 | 0 | while (sprev != save); |
1052 | 0 | return 0; |
1053 | 0 | } |
1054 | | /* Combine the clumps of the next outer level with those of the current one, */ |
1055 | | /* and free the bookkeeping structures. */ |
1056 | | static void |
1057 | | combine_space(gs_ref_memory_t * mem) |
1058 | 0 | { |
1059 | 0 | alloc_save_t *saved = mem->saved; |
1060 | 0 | gs_ref_memory_t *omem = &saved->state; |
1061 | 0 | clump_t *cp; |
1062 | 0 | clump_splay_walker sw; |
1063 | |
|
1064 | 0 | alloc_close_clump(mem); |
1065 | 0 | for (cp = clump_splay_walk_init(&sw, mem); cp != 0; cp = clump_splay_walk_fwd(&sw)) { |
1066 | 0 | if (cp->outer == 0) |
1067 | 0 | alloc_link_clump(cp, omem); |
1068 | 0 | else { |
1069 | 0 | clump_t *outer = cp->outer; |
1070 | |
|
1071 | 0 | outer->inner_count--; |
1072 | 0 | if (mem->cc == cp) |
1073 | 0 | mem->cc = outer; |
1074 | 0 | if (mem->cfreed.cp == cp) |
1075 | 0 | mem->cfreed.cp = outer; |
1076 | | /* "Free" the header of the inner clump, */ |
1077 | | /* and any immediately preceding gap left by */ |
1078 | | /* the GC having compacted the outer clump. */ |
1079 | 0 | { |
1080 | 0 | obj_header_t *hp = (obj_header_t *) outer->cbot; |
1081 | |
|
1082 | 0 | hp->o_pad = 0; |
1083 | 0 | hp->o_alone = 0; |
1084 | 0 | hp->o_size = (char *)(cp->chead + 1) |
1085 | 0 | - (char *)(hp + 1); |
1086 | 0 | hp->o_type = &st_bytes; |
1087 | | /* The following call is probably not safe. */ |
1088 | | #if 0 /* **************** */ |
1089 | | gs_free_object((gs_memory_t *) mem, |
1090 | | hp + 1, "combine_space(header)"); |
1091 | | #endif /* **************** */ |
1092 | 0 | } |
1093 | | /* Update the outer clump's allocation pointers. */ |
1094 | 0 | outer->cbot = cp->cbot; |
1095 | 0 | outer->rcur = cp->rcur; |
1096 | 0 | outer->rtop = cp->rtop; |
1097 | 0 | outer->ctop = cp->ctop; |
1098 | 0 | outer->has_refs |= cp->has_refs; |
1099 | 0 | gs_free_object(mem->non_gc_memory, cp, |
1100 | 0 | "combine_space(inner)"); |
1101 | 0 | } |
1102 | 0 | } |
1103 | | /* Update relevant parts of allocator state. */ |
1104 | 0 | mem->root = omem->root; |
1105 | 0 | mem->allocated += omem->allocated; |
1106 | 0 | mem->gc_allocated += omem->allocated; |
1107 | 0 | mem->lost.objects += omem->lost.objects; |
1108 | 0 | mem->lost.refs += omem->lost.refs; |
1109 | 0 | mem->lost.strings += omem->lost.strings; |
1110 | 0 | mem->saved = omem->saved; |
1111 | 0 | mem->previous_status = omem->previous_status; |
1112 | 0 | { /* Concatenate free lists. */ |
1113 | 0 | int i; |
1114 | |
|
1115 | 0 | for (i = 0; i < num_freelists; i++) { |
1116 | 0 | obj_header_t *olist = omem->freelists[i]; |
1117 | 0 | obj_header_t *list = mem->freelists[i]; |
1118 | |
|
1119 | 0 | if (olist == 0); |
1120 | 0 | else if (list == 0) |
1121 | 0 | mem->freelists[i] = olist; |
1122 | 0 | else { |
1123 | 0 | while (*(obj_header_t **) list != 0) |
1124 | 0 | list = *(obj_header_t **) list; |
1125 | 0 | *(obj_header_t **) list = olist; |
1126 | 0 | } |
1127 | 0 | } |
1128 | 0 | if (omem->largest_free_size > mem->largest_free_size) |
1129 | 0 | mem->largest_free_size = omem->largest_free_size; |
1130 | 0 | } |
1131 | 0 | gs_free_object((gs_memory_t *) mem, saved, "combine_space(saved)"); |
1132 | 0 | alloc_open_clump(mem); |
1133 | 0 | } |
1134 | | /* Free the changes chain for a level 0 .forgetsave, */ |
1135 | | /* resetting the l_new flag in the changed refs. */ |
1136 | | static void |
1137 | | forget_changes(gs_ref_memory_t * mem) |
1138 | 0 | { |
1139 | 0 | register alloc_change_t *chp = mem->changes; |
1140 | 0 | alloc_change_t *next; |
1141 | |
|
1142 | 0 | for (; chp; chp = next) { |
1143 | 0 | ref_packed *prp = chp->where; |
1144 | |
|
1145 | 0 | if_debug1m('U', (gs_memory_t *)mem, "[U]forgetting change "PRI_INTPTR"\n", (intptr_t) chp); |
1146 | 0 | if (chp->offset == AC_OFFSET_ALLOCATED) |
1147 | 0 | DO_NOTHING; |
1148 | 0 | else |
1149 | 0 | if (!r_is_packed(prp)) |
1150 | 0 | r_clear_attrs((ref *) prp, l_new); |
1151 | 0 | next = chp->next; |
1152 | 0 | gs_free_object((gs_memory_t *) mem, chp, "forget_changes"); |
1153 | 0 | } |
1154 | 0 | mem->changes = 0; |
1155 | 0 | } |
1156 | | /* Update the streams list when forgetting a save. */ |
1157 | | static void |
1158 | | file_forget_save(gs_ref_memory_t * mem) |
1159 | 0 | { |
1160 | 0 | const alloc_save_t *save = mem->saved; |
1161 | 0 | stream *streams = mem->streams; |
1162 | 0 | stream *saved_streams = save->state.streams; |
1163 | |
|
1164 | 0 | if_debug4m('u', (gs_memory_t *)mem, "[u%d]file_forget_save "PRI_INTPTR" + "PRI_INTPTR" for "PRI_INTPTR"\n", |
1165 | 0 | mem->space, (intptr_t) streams, (intptr_t) saved_streams, |
1166 | 0 | (intptr_t) save); |
1167 | 0 | if (streams == 0) |
1168 | 0 | mem->streams = saved_streams; |
1169 | 0 | else if (saved_streams != 0) { |
1170 | 0 | while (streams->next != 0) |
1171 | 0 | streams = streams->next; |
1172 | 0 | streams->next = saved_streams; |
1173 | 0 | saved_streams->prev = streams; |
1174 | 0 | } |
1175 | 0 | } |
1176 | | |
1177 | | static inline int |
1178 | | mark_allocated(void *obj, bool to_new, uint *psize) |
1179 | 12.8M | { |
1180 | 12.8M | obj_header_t *pre = (obj_header_t *)obj - 1; |
1181 | 12.8M | uint size = pre_obj_contents_size(pre); |
1182 | 12.8M | ref_packed *prp = (ref_packed *) (pre + 1); |
1183 | 12.8M | ref_packed *next = (ref_packed *) ((char *)prp + size); |
1184 | | #ifdef ALIGNMENT_ALIASING_BUG |
1185 | | ref *rpref; |
1186 | | # define RP_REF(rp) (rpref = (ref *)rp, rpref) |
1187 | | #else |
1188 | 2.48G | # define RP_REF(rp) ((ref *)rp) |
1189 | 12.8M | #endif |
1190 | | |
1191 | 12.8M | if (pre->o_type != &st_refs) { |
1192 | | /* Must not happen. */ |
1193 | 0 | if_debug0('u', "Wrong object type when expected a ref.\n"); |
1194 | 0 | return_error(gs_error_Fatal); |
1195 | 0 | } |
1196 | | /* We know that every block of refs ends with */ |
1197 | | /* a full-size ref, so we only need the end check */ |
1198 | | /* when we encounter one of those. */ |
1199 | 12.8M | if (to_new) |
1200 | 467M | while (1) { |
1201 | 467M | if (r_is_packed(prp)) |
1202 | 31.4M | prp++; |
1203 | 435M | else { |
1204 | 435M | RP_REF(prp)->tas.type_attrs |= l_new; |
1205 | 435M | prp += packed_per_ref; |
1206 | 435M | if (prp >= next) |
1207 | 4.22M | break; |
1208 | 435M | } |
1209 | 467M | } else |
1210 | 2.23G | while (1) { |
1211 | 2.23G | if (r_is_packed(prp)) |
1212 | 178M | prp++; |
1213 | 2.05G | else { |
1214 | 2.05G | RP_REF(prp)->tas.type_attrs &= ~l_new; |
1215 | 2.05G | prp += packed_per_ref; |
1216 | 2.05G | if (prp >= next) |
1217 | 8.63M | break; |
1218 | 2.05G | } |
1219 | 2.23G | } |
1220 | 12.8M | #undef RP_REF |
1221 | 12.8M | *psize = size; |
1222 | 12.8M | return 0; |
1223 | 12.8M | } |
1224 | | |
1225 | | /* Check if a block contains refs marked by garbager. */ |
1226 | | static bool |
1227 | | check_l_mark(void *obj) |
1228 | 22.3M | { |
1229 | 22.3M | obj_header_t *pre = (obj_header_t *)obj - 1; |
1230 | 22.3M | uint size = pre_obj_contents_size(pre); |
1231 | 22.3M | ref_packed *prp = (ref_packed *) (pre + 1); |
1232 | 22.3M | ref_packed *next = (ref_packed *) ((char *)prp + size); |
1233 | | #ifdef ALIGNMENT_ALIASING_BUG |
1234 | | ref *rpref; |
1235 | | # define RP_REF(rp) (rpref = (ref *)rp, rpref) |
1236 | | #else |
1237 | 22.3M | # define RP_REF(rp) ((ref *)rp) |
1238 | 22.3M | #endif |
1239 | | |
1240 | | /* We know that every block of refs ends with */ |
1241 | | /* a full-size ref, so we only need the end check */ |
1242 | | /* when we encounter one of those. */ |
1243 | 38.5G | while (1) { |
1244 | 38.5G | if (r_is_packed(prp)) { |
1245 | 501M | if (r_has_pmark(prp)) |
1246 | 14.3k | return true; |
1247 | 501M | prp++; |
1248 | 38.0G | } else { |
1249 | 38.0G | if (r_has_attr(RP_REF(prp), l_mark)) |
1250 | 2.37M | return true; |
1251 | 38.0G | prp += packed_per_ref; |
1252 | 38.0G | if (prp >= next) |
1253 | 19.9M | return false; |
1254 | 38.0G | } |
1255 | 38.5G | } |
1256 | 22.3M | #undef RP_REF |
1257 | 22.3M | } |
1258 | | |
1259 | | /* Set or reset the l_new attribute in every relevant slot. */ |
1260 | | /* This includes every slot on the current change chain, */ |
1261 | | /* and every (ref) slot allocated at this save level. */ |
1262 | | /* Return the number of bytes of data scanned. */ |
1263 | | static int |
1264 | | save_set_new(gs_ref_memory_t * mem, bool to_new, bool set_limit, ulong *pscanned) |
1265 | 2.04M | { |
1266 | 2.04M | ulong scanned = 0; |
1267 | 2.04M | int code; |
1268 | | |
1269 | | /* Handle the change chain. */ |
1270 | 2.04M | code = save_set_new_changes(mem, to_new, set_limit); |
1271 | 2.04M | if (code < 0) |
1272 | 0 | return code; |
1273 | | |
1274 | | /* Handle newly allocated ref objects. */ |
1275 | 6.85M | SCAN_MEM_CLUMPS(mem, cp) { |
1276 | 6.85M | if (cp->has_refs) { |
1277 | 1.60M | bool has_refs = false; |
1278 | 1.60M | bool no_outer_clump = !(cp->outer != NULL && cp->ctop - cp->cbot > min_inner_clump_space); |
1279 | 21.6M | SCAN_CLUMP_OBJECTS(cp) |
1280 | 21.6M | DO_ALL |
1281 | 21.6M | if_debug3m('U', (gs_memory_t *)mem, "[U]set_new scan("PRI_INTPTR"(%u), %d)\n", |
1282 | 21.6M | (intptr_t) pre, size, to_new); |
1283 | 21.6M | if (pre->o_type == &st_refs) { |
1284 | | /* These are refs, scan them. */ |
1285 | 6.62M | ref_packed *prp = (ref_packed *) (pre + 1); |
1286 | 6.62M | uint size; |
1287 | | /* In order to avoid the garbager unnecessarily scanning for refs that may |
1288 | | not exist, we reset the "has_refs" flag if we're doing a save (and leave |
1289 | | it alone during a restore. This generally works because when we get here |
1290 | | during a save, we've already created the inner clump, and during a restore, |
1291 | | we've already restored to the outer clump. |
1292 | | Where is goes wrong is when there isn't sufficient space left in the clump |
1293 | | for any new allocations, so we won't have created the inner clump, and then |
1294 | | the flag isn't retained. Spot that above, and only meddle with the flag here if |
1295 | | an inner clump has been created. |
1296 | | */ |
1297 | 6.62M | has_refs = true && (to_new | no_outer_clump); |
1298 | 6.62M | code = mark_allocated(prp, to_new, &size); |
1299 | 6.62M | if (code < 0) |
1300 | 0 | return code; |
1301 | 6.62M | scanned += size; |
1302 | 6.62M | } else |
1303 | 14.9M | scanned += sizeof(obj_header_t); |
1304 | 21.6M | END_OBJECTS_SCAN |
1305 | 1.60M | cp->has_refs = has_refs; |
1306 | 1.60M | } |
1307 | 6.85M | } |
1308 | 6.85M | END_CLUMPS_SCAN |
1309 | 2.04M | if_debug2m('u', (gs_memory_t *)mem, "[u]set_new (%s) scanned %ld\n", |
1310 | 2.04M | (to_new ? "restore" : "save"), scanned); |
1311 | 2.04M | *pscanned = scanned; |
1312 | 2.04M | return 0; |
1313 | 2.04M | } |
1314 | | |
1315 | | /* Drop redundant elements from the changes list and set l_new. */ |
1316 | | static void |
1317 | | drop_redundant_changes(gs_ref_memory_t * mem) |
1318 | 14 | { |
1319 | 14 | register alloc_change_t *chp = mem->changes, *chp_back = NULL, *chp_forth; |
1320 | | |
1321 | | /* As we are trying to throw away redundant changes in an allocator instance |
1322 | | that has already been "saved", the active clump has already been "closed" |
1323 | | by alloc_save_space(). Using such an allocator (for example, by calling |
1324 | | gs_free_object() with it) can leave it in an unstable state, causing |
1325 | | problems for the garbage collector (specifically, the clump validator code). |
1326 | | So, before we might use it, open the current clump, and then close it again |
1327 | | when we're done. |
1328 | | */ |
1329 | 14 | alloc_open_clump(mem); |
1330 | | |
1331 | | /* First reverse the list and set all. */ |
1332 | 1.66k | for (; chp; chp = chp_forth) { |
1333 | 1.64k | chp_forth = chp->next; |
1334 | 1.64k | if (chp->offset != AC_OFFSET_ALLOCATED) { |
1335 | 45 | ref_packed *prp = chp->where; |
1336 | | |
1337 | 45 | if (!r_is_packed(prp)) { |
1338 | 41 | ref *const rp = (ref *)prp; |
1339 | | |
1340 | 41 | rp->tas.type_attrs |= l_new; |
1341 | 41 | } |
1342 | 45 | } |
1343 | 1.64k | chp->next = chp_back; |
1344 | 1.64k | chp_back = chp; |
1345 | 1.64k | } |
1346 | 14 | mem->changes = chp_back; |
1347 | 14 | chp_back = NULL; |
1348 | | /* Then filter, reset and reverse again. */ |
1349 | 1.66k | for (chp = mem->changes; chp; chp = chp_forth) { |
1350 | 1.64k | chp_forth = chp->next; |
1351 | 1.64k | if (chp->offset != AC_OFFSET_ALLOCATED) { |
1352 | 45 | ref_packed *prp = chp->where; |
1353 | | |
1354 | 45 | if (!r_is_packed(prp)) { |
1355 | 41 | ref *const rp = (ref *)prp; |
1356 | | |
1357 | 41 | if ((rp->tas.type_attrs & l_new) == 0) { |
1358 | 3 | if (mem->scan_limit == chp) |
1359 | 0 | mem->scan_limit = chp_back; |
1360 | 3 | if (mem->changes == chp) |
1361 | 0 | mem->changes = chp_back; |
1362 | 3 | gs_free_object((gs_memory_t *)mem, chp, "alloc_save_remove"); |
1363 | 3 | continue; |
1364 | 3 | } else |
1365 | 38 | rp->tas.type_attrs &= ~l_new; |
1366 | 41 | } |
1367 | 45 | } |
1368 | 1.64k | chp->next = chp_back; |
1369 | 1.64k | chp_back = chp; |
1370 | 1.64k | } |
1371 | 14 | mem->changes = chp_back; |
1372 | | |
1373 | 14 | alloc_close_clump(mem); |
1374 | 14 | } |
1375 | | |
1376 | | /* Set or reset the l_new attribute on the changes chain. */ |
1377 | | static int |
1378 | | save_set_new_changes(gs_ref_memory_t * mem, bool to_new, bool set_limit) |
1379 | 2.04M | { |
1380 | 2.04M | register alloc_change_t *chp; |
1381 | 2.04M | register uint new = (to_new ? l_new : 0); |
1382 | 2.04M | ulong scanned = 0; |
1383 | | |
1384 | 2.04M | if (!to_new && mem->total_scanned_after_compacting > max_repeated_scan * 16) { |
1385 | 14 | mem->total_scanned_after_compacting = 0; |
1386 | 14 | drop_redundant_changes(mem); |
1387 | 14 | } |
1388 | 10.9M | for (chp = mem->changes; chp; chp = chp->next) { |
1389 | 8.94M | if (chp->offset == AC_OFFSET_ALLOCATED) { |
1390 | 6.23M | if (chp->where != 0) { |
1391 | 6.23M | uint size; |
1392 | 6.23M | int code = mark_allocated((void *)chp->where, to_new, &size); |
1393 | | |
1394 | 6.23M | if (code < 0) |
1395 | 0 | return code; |
1396 | 6.23M | scanned += size; |
1397 | 6.23M | } |
1398 | 6.23M | } else { |
1399 | 2.70M | ref_packed *prp = chp->where; |
1400 | | |
1401 | 2.70M | if_debug3m('U', (gs_memory_t *)mem, "[U]set_new "PRI_INTPTR": ("PRI_INTPTR", %d)\n", |
1402 | 2.70M | (intptr_t)chp, (intptr_t)prp, new); |
1403 | 2.70M | if (!r_is_packed(prp)) { |
1404 | 2.69M | ref *const rp = (ref *) prp; |
1405 | | |
1406 | 2.69M | rp->tas.type_attrs = |
1407 | 2.69M | (rp->tas.type_attrs & ~l_new) + new; |
1408 | 2.69M | } |
1409 | 2.70M | } |
1410 | 8.94M | if (mem->scan_limit == chp) |
1411 | 12.7k | break; |
1412 | 8.94M | } |
1413 | 2.04M | if (set_limit) { |
1414 | 1.02M | mem->total_scanned_after_compacting += scanned; |
1415 | 1.02M | if (scanned + mem->total_scanned >= max_repeated_scan) { |
1416 | 7.63k | mem->scan_limit = mem->changes; |
1417 | 7.63k | mem->total_scanned = 0; |
1418 | 7.63k | } else |
1419 | 1.01M | mem->total_scanned += scanned; |
1420 | 1.02M | } |
1421 | 2.04M | return 0; |
1422 | 2.04M | } |
1423 | | |
1424 | | gs_memory_t * |
1425 | | gs_save_any_memory(const alloc_save_t *save) |
1426 | 1.51M | { |
1427 | 1.51M | return((gs_memory_t *)save->space_local); |
1428 | 1.51M | } |