/src/ghostpdl/base/gsmchunk.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2023 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* chunk consolidating wrapper on a base memory allocator */ |
18 | | |
19 | | /* This uses dual binary trees to handle the free list. One tree |
20 | | * holds the blocks in size order, one in location order. We use |
21 | | * a top-down semi-splaying access scheme on lookups and |
22 | | * insertions. */ |
23 | | |
24 | | #include "memory_.h" |
25 | | #include "gx.h" |
26 | | #include "gsstruct.h" |
27 | | #include "gxobj.h" |
28 | | #include "gsstype.h" |
29 | | #include "gserrors.h" |
30 | | #include "gsmchunk.h" |
31 | | #include "gxsync.h" |
32 | | #include "malloc_.h" /* For MEMENTO */ |
33 | | #include "assert_.h" |
34 | | #include "gsmdebug.h" |
35 | | |
36 | | /* Enable DEBUG_CHUNK to check the validity of the heap at every turn */ |
37 | | #undef DEBUG_CHUNK |
38 | | |
39 | | /* Enable DEBUG_SEQ to keep sequence numbers in every block */ |
40 | | #undef DEBUG_SEQ |
41 | | |
42 | | /* Enable DEBUG_CHUNK_PRINT to print the heap at every turn */ |
43 | | #undef DEBUG_CHUNK_PRINT |
44 | | |
45 | | /* Enable DEBUG_CHUNK_PRINT_SLABS to list the slabs in the heap */ |
46 | | #undef DEBUG_CHUNK_PRINT_SLABS |
47 | | |
48 | | #if defined(DEBUG_CHUNK_PRINT_SLABS) && !defined(DEBUG_CHUNK_PRINT) |
49 | | #define DEBUG_CHUNK_PRINT |
50 | | #endif |
51 | | |
52 | | #if defined(DEBUG_CHUNK_PRINT) && !defined(DEBUG_CHUNK) |
53 | | #define DEBUG_CHUNK |
54 | | #endif |
55 | | |
56 | | #if defined(DEBUG_CHUNK) && !defined(DEBUG) |
57 | | #define DEBUG |
58 | | #define CHUNK_FAKE_ASSERT |
59 | | #endif |
60 | | |
61 | | #ifdef DEBUG |
62 | | #ifdef CHUNK_FAKE_ASSERT |
63 | | #define CHUNK_ASSERT(M,A) gs_chunk_assert(M, A, #A) |
64 | | |
65 | | static void gs_chunk_assert(gs_memory_t *m, int v, const char *s) |
66 | | { |
67 | | void (*crash)(void); |
68 | | if (v) |
69 | | return; |
70 | | dmlprintf1(m, "Assert failed: %s\n", s); |
71 | | crash = NULL; |
72 | | crash(); |
73 | | } |
74 | | #else |
75 | | #define CHUNK_ASSERT(M,A) assert(A) |
76 | | #endif |
77 | | #endif |
78 | | |
79 | | /* Raw memory procedures */ |
80 | | static gs_memory_proc_alloc_bytes(chunk_alloc_bytes_immovable); |
81 | | static gs_memory_proc_resize_object(chunk_resize_object); |
82 | | static gs_memory_proc_free_object(chunk_free_object); |
83 | | static gs_memory_proc_stable(chunk_stable); |
84 | | static gs_memory_proc_status(chunk_status); |
85 | | static gs_memory_proc_free_all(chunk_free_all); |
86 | | static gs_memory_proc_consolidate_free(chunk_consolidate_free); |
87 | | |
88 | | /* Object memory procedures */ |
89 | | static gs_memory_proc_alloc_bytes(chunk_alloc_bytes); |
90 | | static gs_memory_proc_alloc_struct(chunk_alloc_struct); |
91 | | static gs_memory_proc_alloc_struct(chunk_alloc_struct_immovable); |
92 | | static gs_memory_proc_alloc_byte_array(chunk_alloc_byte_array); |
93 | | static gs_memory_proc_alloc_byte_array(chunk_alloc_byte_array_immovable); |
94 | | static gs_memory_proc_alloc_struct_array(chunk_alloc_struct_array); |
95 | | static gs_memory_proc_alloc_struct_array(chunk_alloc_struct_array_immovable); |
96 | | static gs_memory_proc_object_size(chunk_object_size); |
97 | | static gs_memory_proc_object_type(chunk_object_type); |
98 | | static gs_memory_proc_alloc_string(chunk_alloc_string); |
99 | | static gs_memory_proc_alloc_string(chunk_alloc_string_immovable); |
100 | | static gs_memory_proc_resize_string(chunk_resize_string); |
101 | | static gs_memory_proc_free_string(chunk_free_string); |
102 | | static gs_memory_proc_register_root(chunk_register_root); |
103 | | static gs_memory_proc_unregister_root(chunk_unregister_root); |
104 | | static gs_memory_proc_enable_free(chunk_enable_free); |
105 | | static gs_memory_proc_set_object_type(chunk_set_object_type); |
106 | | static gs_memory_proc_defer_frees(chunk_defer_frees); |
107 | | static const gs_memory_procs_t chunk_procs = |
108 | | { |
109 | | /* Raw memory procedures */ |
110 | | chunk_alloc_bytes_immovable, |
111 | | chunk_resize_object, |
112 | | chunk_free_object, |
113 | | chunk_stable, |
114 | | chunk_status, |
115 | | chunk_free_all, |
116 | | chunk_consolidate_free, |
117 | | /* Object memory procedures */ |
118 | | chunk_alloc_bytes, |
119 | | chunk_alloc_struct, |
120 | | chunk_alloc_struct_immovable, |
121 | | chunk_alloc_byte_array, |
122 | | chunk_alloc_byte_array_immovable, |
123 | | chunk_alloc_struct_array, |
124 | | chunk_alloc_struct_array_immovable, |
125 | | chunk_object_size, |
126 | | chunk_object_type, |
127 | | chunk_alloc_string, |
128 | | chunk_alloc_string_immovable, |
129 | | chunk_resize_string, |
130 | | chunk_free_string, |
131 | | chunk_register_root, |
132 | | chunk_unregister_root, |
133 | | chunk_enable_free, |
134 | | chunk_set_object_type, |
135 | | chunk_defer_frees |
136 | | }; |
137 | | |
138 | | typedef struct chunk_obj_node_s { |
139 | | gs_memory_type_ptr_t type; |
140 | | #ifdef DEBUG_SEQ |
141 | | unsigned int sequence; |
142 | | #endif |
143 | | struct chunk_obj_node_s *defer_next; |
144 | | size_t size; /* Actual size of block */ |
145 | | size_t padding; /* Actual size - requested size */ |
146 | | } chunk_obj_node_t; |
147 | | |
148 | | typedef struct chunk_free_node_s { |
149 | | struct chunk_free_node_s *left_loc; |
150 | | struct chunk_free_node_s *right_loc; |
151 | | struct chunk_free_node_s *left_size; |
152 | | struct chunk_free_node_s *right_size; |
153 | | size_t size; /* size of entire freelist block */ |
154 | | } chunk_free_node_t; |
155 | | |
156 | | /* |
157 | | * Note: All objects within a chunk are 'aligned' since we round_up_to_align |
158 | | * the free list pointer when removing part of a free area. |
159 | | */ |
160 | | typedef struct chunk_slab_s { |
161 | | struct chunk_slab_s *next; |
162 | | } chunk_slab_t; |
163 | | |
164 | | typedef struct gs_memory_chunk_s { |
165 | | gs_memory_common; /* interface outside world sees */ |
166 | | gs_memory_t *target; /* base allocator */ |
167 | | chunk_slab_t *slabs; /* list of slabs for freeing */ |
168 | | chunk_free_node_t *free_size;/* free tree */ |
169 | | chunk_free_node_t *free_loc; /* free tree */ |
170 | | chunk_obj_node_t *defer_finalize_list; |
171 | | chunk_obj_node_t *defer_free_list; |
172 | | size_t used; |
173 | | size_t max_used; |
174 | | size_t total_free; |
175 | | #ifdef DEBUG_SEQ |
176 | | unsigned int sequence; |
177 | | #endif |
178 | | int deferring; |
179 | | } gs_memory_chunk_t; |
180 | | |
181 | 11.5G | #define SIZEOF_ROUND_ALIGN(a) ROUND_UP(sizeof(a), obj_align_mod) |
182 | | |
183 | | /* ---------- Public constructors/destructors ---------- */ |
184 | | |
185 | | /* Initialize a gs_memory_chunk_t */ |
186 | | int |
187 | | gs_memory_chunk_wrap(gs_memory_t **wrapped, /* chunk allocator init */ |
188 | | gs_memory_t *target) /* base allocator */ |
189 | 276k | { |
190 | | /* Use the non-GC allocator of the target. */ |
191 | 276k | gs_memory_t *non_gc_target = target->non_gc_memory; |
192 | 276k | gs_memory_chunk_t *cmem = NULL; |
193 | | |
194 | 276k | if (non_gc_target) |
195 | 276k | cmem = (gs_memory_chunk_t *)gs_alloc_bytes_immovable(non_gc_target, |
196 | 276k | sizeof(gs_memory_chunk_t), |
197 | 276k | "gs_memory_chunk_wrap"); |
198 | 276k | if (cmem == NULL) { |
199 | 0 | *wrapped = NULL; |
200 | 0 | return_error(gs_error_VMerror); |
201 | 0 | } |
202 | 276k | cmem->stable_memory = (gs_memory_t *)cmem; /* we are stable */ |
203 | 276k | cmem->procs = chunk_procs; |
204 | 276k | cmem->gs_lib_ctx = non_gc_target->gs_lib_ctx; |
205 | 276k | cmem->non_gc_memory = (gs_memory_t *)cmem; /* and are not subject to GC */ |
206 | 276k | cmem->thread_safe_memory = non_gc_target->thread_safe_memory; |
207 | 276k | cmem->target = non_gc_target; |
208 | 276k | cmem->slabs = NULL; |
209 | 276k | cmem->free_size = NULL; |
210 | 276k | cmem->free_loc = NULL; |
211 | 276k | cmem->used = 0; |
212 | 276k | cmem->max_used = 0; |
213 | 276k | cmem->total_free = 0; |
214 | | #ifdef DEBUG_SEQ |
215 | | cmem->sequence = 0; |
216 | | #endif |
217 | 276k | cmem->deferring = 0; |
218 | 276k | cmem->defer_finalize_list = NULL; |
219 | 276k | cmem->defer_free_list = NULL; |
220 | | |
221 | | #ifdef DEBUG_CHUNK_PRINT |
222 | | dmlprintf1(non_gc_target, "New chunk "PRI_INTPTR"\n", (intptr_t)cmem); |
223 | | #endif |
224 | | |
225 | | /* Init the chunk management values */ |
226 | 276k | *wrapped = (gs_memory_t *)cmem; |
227 | 276k | return 0; |
228 | 276k | } |
229 | | |
230 | | /* Release a chunk memory manager. */ |
231 | | /* Note that this has no effect on the target. */ |
232 | | void |
233 | | gs_memory_chunk_release(gs_memory_t *mem) |
234 | 276k | { |
235 | 276k | gs_memory_free_all((gs_memory_t *)mem, FREE_ALL_EVERYTHING, |
236 | 276k | "gs_memory_chunk_release"); |
237 | 276k | } |
238 | | |
239 | | /* Release chunk memory manager, and return the target */ |
240 | | gs_memory_t * /* Always succeeds */ |
241 | | gs_memory_chunk_unwrap(gs_memory_t *mem) |
242 | 114k | { |
243 | 114k | gs_memory_t *tmem; |
244 | | |
245 | | /* If this isn't a chunk, nothing to unwrap */ |
246 | 114k | if (mem->procs.status != chunk_status) |
247 | 0 | return mem; |
248 | | |
249 | 114k | tmem = ((gs_memory_chunk_t *)mem)->target; |
250 | 114k | gs_memory_chunk_release(mem); |
251 | | |
252 | 114k | return tmem; |
253 | 114k | } |
254 | | |
255 | | /* ---------- Accessors ------------- */ |
256 | | |
257 | | /* Retrieve this allocator's target */ |
258 | | gs_memory_t * |
259 | | gs_memory_chunk_target(const gs_memory_t *mem) |
260 | 0 | { |
261 | 0 | gs_memory_chunk_t *cmem = (gs_memory_chunk_t *)mem; |
262 | 0 | return cmem->target; |
263 | 0 | } |
264 | | |
265 | | /* -------- Private members --------- */ |
266 | | |
267 | | /* Note that all of the data is 'immovable' and is opaque to the base allocator */ |
268 | | /* thus even if it is a GC type of allocator, no GC functions will be applied */ |
269 | | /* All allocations are done in the target */ |
270 | | |
271 | | /* Procedures */ |
272 | | |
273 | | static void |
274 | | chunk_mem_node_free_all_slabs(gs_memory_chunk_t *cmem) |
275 | 276k | { |
276 | 276k | chunk_slab_t *slab, *next; |
277 | 276k | gs_memory_t *const target = cmem->target; |
278 | | |
279 | 1.25M | for (slab = cmem->slabs; slab != NULL; slab = next) { |
280 | 975k | next = slab->next; |
281 | 975k | gs_free_object(target, slab, "chunk_mem_node_free_all_slabs"); |
282 | 975k | } |
283 | | |
284 | 276k | cmem->slabs = NULL; |
285 | 276k | cmem->free_size = NULL; |
286 | 276k | cmem->free_loc = NULL; |
287 | 276k | cmem->total_free = 0; |
288 | 276k | cmem->used = 0; |
289 | 276k | } |
290 | | |
291 | | static void |
292 | | chunk_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname) |
293 | 276k | { |
294 | 276k | gs_memory_chunk_t * const cmem = (gs_memory_chunk_t *)mem; |
295 | 276k | gs_memory_t * const target = cmem->target; |
296 | | |
297 | 276k | if (free_mask & FREE_ALL_DATA) |
298 | 276k | chunk_mem_node_free_all_slabs(cmem); |
299 | | /* Only free the structures and the allocator itself. */ |
300 | 276k | if (mem->stable_memory) { |
301 | 276k | if (mem->stable_memory != mem) |
302 | 0 | gs_memory_free_all(mem->stable_memory, free_mask, cname); |
303 | 276k | if (free_mask & FREE_ALL_ALLOCATOR) |
304 | 276k | mem->stable_memory = 0; |
305 | 276k | } |
306 | 276k | if (free_mask & FREE_ALL_STRUCTURES) { |
307 | 276k | cmem->target = 0; |
308 | 276k | } |
309 | 276k | if (free_mask & FREE_ALL_ALLOCATOR) |
310 | 276k | gs_free_object(target, cmem, cname); |
311 | 276k | } |
312 | | |
313 | | extern const gs_memory_struct_type_t st_bytes; |
314 | | |
315 | | #ifdef DEBUG |
316 | | static int dump_free_loc(gs_memory_t *mem, chunk_free_node_t *node, int depth, void **limit, uint *total) |
317 | | { |
318 | | #ifdef DEBUG_CHUNK_PRINT |
319 | | int i; |
320 | | #endif |
321 | | int count; |
322 | | |
323 | | if (node == NULL) |
324 | | return 0; |
325 | | |
326 | | count = dump_free_loc(mem, node->left_loc, depth + 1 + (depth&1), limit, total); |
327 | | *total += node->size; |
328 | | #ifdef DEBUG_CHUNK_PRINT |
329 | | if (depth != 0) { |
330 | | for (i = (depth-1)>>1; i != 0; i--) |
331 | | dmlprintf(mem, " "); |
332 | | if (depth & 1) |
333 | | dmlprintf(mem, "/"); |
334 | | else |
335 | | dmlprintf(mem, "\\"); |
336 | | } |
337 | | dmlprintf3(mem, PRI_INTPTR"+%x->"PRI_INTPTR"\n", (intptr_t)node, node->size, (intptr_t)((byte *)node)+node->size); |
338 | | #endif |
339 | | CHUNK_ASSERT(mem, *limit < (void *)node); |
340 | | *limit = ((byte *)node)+node->size; |
341 | | return 1 + count + dump_free_loc(mem, node->right_loc, depth + 2 + (depth&1), limit, total); |
342 | | } |
343 | | |
344 | | static int dump_free_size(gs_memory_t *mem, chunk_free_node_t *node, int depth, uint *size, void **addr) |
345 | | { |
346 | | #ifdef DEBUG_CHUNK_PRINT |
347 | | int i; |
348 | | #endif |
349 | | int count; |
350 | | |
351 | | if (node == NULL) |
352 | | return 0; |
353 | | |
354 | | count = dump_free_size(mem, node->left_size, depth + 1 + (depth&1), size, addr); |
355 | | #ifdef DEBUG_CHUNK_PRINT |
356 | | if (depth != 0) { |
357 | | for (i = (depth-1)>>1; i != 0; i--) |
358 | | dmlprintf(mem, " "); |
359 | | if (depth & 1) |
360 | | dmlprintf(mem, "/"); |
361 | | else |
362 | | dmlprintf(mem, "\\"); |
363 | | } |
364 | | dmlprintf3(mem, PRI_INTPTR"+%x->"PRI_INTPTR"\n", (intptr_t)node, node->size, (intptr_t)((byte *)node)+node->size); |
365 | | #endif |
366 | | CHUNK_ASSERT(mem, *size < node->size || (*size == node->size && *addr < (void *)node)); |
367 | | *size = node->size; |
368 | | *addr = node; |
369 | | return 1 + count + dump_free_size(mem, node->right_size, depth + 2 + (depth&1), size, addr); |
370 | | } |
371 | | |
372 | | #ifdef DEBUG_CHUNK_PRINT |
373 | | static size_t |
374 | | largest_free_block(chunk_free_node_t *size) |
375 | | { |
376 | | if (size == NULL) |
377 | | return 0; |
378 | | while (1) { |
379 | | if (size->right_size == NULL) |
380 | | return size->size; |
381 | | size = size->right_size; |
382 | | } |
383 | | } |
384 | | #endif |
385 | | |
386 | | void |
387 | | gs_memory_chunk_dump_memory(const gs_memory_t *mem) |
388 | | { |
389 | | const gs_memory_chunk_t *cmem = (const gs_memory_chunk_t *)mem; |
390 | | int count1, count2; |
391 | | void *limit = NULL; |
392 | | void *addr = NULL; |
393 | | uint size = 1; |
394 | | uint total = 0; |
395 | | |
396 | | #ifdef DEBUG_CHUNK_PRINT |
397 | | dmlprintf1(cmem->target, "Chunk "PRI_INTPTR":\n", (intptr_t)cmem); |
398 | | dmlprintf3(cmem->target, "Used=%"PRIxSIZE", Max Used=%"PRIxSIZE", Total Free=%"PRIxSIZE"\n", cmem->used, cmem->max_used, cmem->total_free); |
399 | | dmlprintf1(cmem->target, "Largest free block=%d bytes\n", largest_free_block(cmem->free_size)); |
400 | | #ifdef DEBUG_CHUNK_PRINT_SLABS |
401 | | { |
402 | | chunk_slab_t *slab; |
403 | | dmlprintf(cmem->target, "Slabs:\n"); |
404 | | for (slab = cmem->slabs; slab != NULL; slab = slab->next) |
405 | | dmlprintf1(cmem->target, " "PRI_INTPTR"\n", (intptr_t)slab); |
406 | | } |
407 | | #endif |
408 | | dmlprintf(cmem->target, "Locs:\n"); |
409 | | #endif |
410 | | count1 = dump_free_loc(cmem->target, cmem->free_loc, 0, &limit, &total); |
411 | | #ifdef DEBUG_CHUNK_PRINT |
412 | | dmlprintf(cmem->target, "Sizes:\n"); |
413 | | #endif |
414 | | count2 = dump_free_size(cmem->target, cmem->free_size, 0, &size, &addr); |
415 | | if (count1 != count2) { |
416 | | void (*crash)(void) = NULL; |
417 | | dmlprintf2(cmem->target, "Tree mismatch! %d vs %d\n", count1, count2); |
418 | | crash(); |
419 | | } |
420 | | if (total != cmem->total_free) { |
421 | | void (*crash)(void) = NULL; |
422 | | dmlprintf2(cmem->target, "Free size mismatch! %u vs %lu\n", total, cmem->total_free); |
423 | | crash(); |
424 | | } |
425 | | } |
426 | | #endif |
427 | | |
428 | | /* round up objects to make sure we have room for a header left */ |
429 | | inline static uint |
430 | | round_up_to_align(uint size) |
431 | 1.65G | { |
432 | 1.65G | uint num_node_headers = (size + SIZEOF_ROUND_ALIGN(chunk_obj_node_t) - 1) / SIZEOF_ROUND_ALIGN(chunk_obj_node_t); |
433 | | |
434 | 1.65G | return num_node_headers * SIZEOF_ROUND_ALIGN(chunk_obj_node_t); |
435 | 1.65G | } |
436 | | |
437 | | static inline int CMP_SIZE(const chunk_free_node_t * a, const chunk_free_node_t * b) |
438 | 8.97G | { |
439 | 8.97G | if (a->size > b->size) |
440 | 4.01G | return 1; |
441 | 4.95G | if (a->size < b->size) |
442 | 4.21G | return 0; |
443 | 740M | return (a > b); |
444 | 4.95G | } |
445 | | |
446 | | static void insert_free_size(gs_memory_chunk_t *cmem, chunk_free_node_t *node) |
447 | 2.71G | { |
448 | 2.71G | chunk_free_node_t **ap; |
449 | 2.71G | chunk_free_node_t *a, *b, *c; |
450 | | |
451 | 2.71G | node->left_size = NULL; |
452 | 2.71G | node->right_size = NULL; |
453 | | |
454 | | /* Insert into size */ |
455 | 2.71G | ap = &cmem->free_size; |
456 | 3.64G | while ((a = *ap) != NULL) { |
457 | 3.07G | if (CMP_SIZE(a, node)) { |
458 | 788M | b = a->left_size; |
459 | 788M | if (b == NULL) { |
460 | 149M | ap = &a->left_size; |
461 | 149M | break; /* Stop searching */ |
462 | 149M | } |
463 | 638M | if (CMP_SIZE(b, node)) { |
464 | 234M | c = b->left_size; |
465 | 234M | if (c == NULL) { |
466 | 47.0M | ap = &b->left_size; |
467 | 47.0M | break; |
468 | 47.0M | } |
469 | | /* Splay: a c |
470 | | * b Z => W b |
471 | | * c Y X a |
472 | | * W X Y Z |
473 | | */ |
474 | 186M | *ap = c; |
475 | 186M | a->left_size = b->right_size; |
476 | 186M | b->left_size = c->right_size; |
477 | 186M | b->right_size = a; |
478 | 186M | c->right_size = b; |
479 | 186M | if (CMP_SIZE(c, node)) |
480 | 105M | ap = &c->left_size; |
481 | 81.7M | else |
482 | 81.7M | ap = &b->left_size; |
483 | 404M | } else { |
484 | 404M | c = b->right_size; |
485 | 404M | if (c == NULL) { |
486 | 249M | ap = &b->right_size; |
487 | 249M | break; |
488 | 249M | } |
489 | | /* Splay: a c |
490 | | * b Z => b a |
491 | | * W c W X Y Z |
492 | | * X Y |
493 | | */ |
494 | 155M | *ap = c; |
495 | 155M | a->left_size = c->right_size; |
496 | 155M | b->right_size = c->left_size; |
497 | 155M | c->left_size = b; |
498 | 155M | c->right_size = a; |
499 | 155M | if (CMP_SIZE(c, node)) |
500 | 108M | ap = &b->right_size; |
501 | 46.9M | else |
502 | 46.9M | ap = &a->left_size; |
503 | 155M | } |
504 | 2.28G | } else { |
505 | 2.28G | b = a->right_size; |
506 | 2.28G | if (b == NULL) |
507 | 301M | { |
508 | 301M | ap = &a->right_size; |
509 | 301M | break; |
510 | 301M | } |
511 | 1.98G | if (CMP_SIZE(b, node)) { |
512 | 1.69G | c = b->left_size; |
513 | 1.69G | if (c == NULL) { |
514 | 1.34G | ap = &b->left_size; |
515 | 1.34G | break; |
516 | 1.34G | } |
517 | | /* Splay: a c |
518 | | * W b => a b |
519 | | * c Z W X Y Z |
520 | | * X Y |
521 | | */ |
522 | 356M | *ap = c; |
523 | 356M | a->right_size = c->left_size; |
524 | 356M | b->left_size = c->right_size; |
525 | 356M | c->left_size = a; |
526 | 356M | c->right_size = b; |
527 | 356M | if (CMP_SIZE(c, node)) |
528 | 287M | ap = &a->right_size; |
529 | 68.4M | else |
530 | 68.4M | ap = &b->left_size; |
531 | 356M | } else { |
532 | 283M | c = b->right_size; |
533 | 283M | if (c == NULL) { |
534 | 50.5M | ap = &b->right_size; |
535 | 50.5M | break; |
536 | 50.5M | } |
537 | | /* Splay: a c |
538 | | * W b => b Z |
539 | | * X c a Y |
540 | | * Y Z W X |
541 | | */ |
542 | 232M | *ap = c; |
543 | 232M | a->right_size = b->left_size; |
544 | 232M | b->right_size = c->left_size; |
545 | 232M | b->left_size = a; |
546 | 232M | c->left_size = b; |
547 | 232M | if (CMP_SIZE(c, node)) |
548 | 110M | ap = &b->right_size; |
549 | 122M | else |
550 | 122M | ap = &c->right_size; |
551 | 232M | } |
552 | 1.98G | } |
553 | 3.07G | } |
554 | 2.71G | *ap = node; |
555 | 2.71G | } |
556 | | |
557 | | static void insert_free_loc(gs_memory_chunk_t *cmem, chunk_free_node_t *node) |
558 | 1.06G | { |
559 | 1.06G | chunk_free_node_t **ap; |
560 | 1.06G | chunk_free_node_t *a, *b, *c; |
561 | | |
562 | 1.06G | node->left_loc = NULL; |
563 | 1.06G | node->right_loc = NULL; |
564 | | |
565 | | /* Insert into loc */ |
566 | 1.06G | ap = &cmem->free_loc; |
567 | 1.28G | while ((a = *ap) != NULL) { |
568 | 1.12G | if (a > node) { |
569 | 373M | b = a->left_loc; |
570 | 373M | if (b == NULL) { |
571 | 129M | ap = &a->left_loc; |
572 | 129M | break; /* Stop searching */ |
573 | 129M | } |
574 | 243M | if (b > node) { |
575 | 89.1M | c = b->left_loc; |
576 | 89.1M | if (c == NULL) { |
577 | 43.0M | ap = &b->left_loc; |
578 | 43.0M | break; |
579 | 43.0M | } |
580 | | /* Splay: a c |
581 | | * b Z => W b |
582 | | * c Y X a |
583 | | * W X Y Z |
584 | | */ |
585 | 46.1M | *ap = c; |
586 | 46.1M | a->left_loc = b->right_loc; |
587 | 46.1M | b->left_loc = c->right_loc; |
588 | 46.1M | b->right_loc = a; |
589 | 46.1M | c->right_loc = b; |
590 | 46.1M | if (c > node) |
591 | 22.1M | ap = &c->left_loc; |
592 | 23.9M | else |
593 | 23.9M | ap = &b->left_loc; |
594 | 154M | } else { |
595 | 154M | c = b->right_loc; |
596 | 154M | if (c == NULL) { |
597 | 136M | ap = &b->right_loc; |
598 | 136M | break; |
599 | 136M | } |
600 | | /* Splay: a c |
601 | | * b Z => b a |
602 | | * W c W X Y Z |
603 | | * X Y |
604 | | */ |
605 | 17.8M | *ap = c; |
606 | 17.8M | a->left_loc = c->right_loc; |
607 | 17.8M | b->right_loc = c->left_loc; |
608 | 17.8M | c->left_loc = b; |
609 | 17.8M | c->right_loc = a; |
610 | 17.8M | if (c > node) |
611 | 14.4M | ap = &b->right_loc; |
612 | 3.42M | else |
613 | 3.42M | ap = &a->left_loc; |
614 | 17.8M | } |
615 | 754M | } else { |
616 | 754M | b = a->right_loc; |
617 | 754M | if (b == NULL) |
618 | 110M | { |
619 | 110M | ap = &a->right_loc; |
620 | 110M | break; |
621 | 110M | } |
622 | 644M | if (b > node) { |
623 | 460M | c = b->left_loc; |
624 | 460M | if (c == NULL) { |
625 | 427M | ap = &b->left_loc; |
626 | 427M | break; |
627 | 427M | } |
628 | | /* Splay: a c |
629 | | * W b => a b |
630 | | * c Z W X Y Z |
631 | | * X Y |
632 | | */ |
633 | 33.2M | *ap = c; |
634 | 33.2M | a->right_loc = c->left_loc; |
635 | 33.2M | b->left_loc = c->right_loc; |
636 | 33.2M | c->left_loc = a; |
637 | 33.2M | c->right_loc = b; |
638 | 33.2M | if (c > node) |
639 | 30.2M | ap = &a->right_loc; |
640 | 2.97M | else |
641 | 2.97M | ap = &b->left_loc; |
642 | 184M | } else { |
643 | 184M | c = b->right_loc; |
644 | 184M | if (c == NULL) { |
645 | 60.1M | ap = &b->right_loc; |
646 | 60.1M | break; |
647 | 60.1M | } |
648 | | /* Splay: a c |
649 | | * W b => b Z |
650 | | * X c a Y |
651 | | * Y Z W X |
652 | | */ |
653 | 123M | *ap = c; |
654 | 123M | a->right_loc = b->left_loc; |
655 | 123M | b->right_loc = c->left_loc; |
656 | 123M | b->left_loc = a; |
657 | 123M | c->left_loc = b; |
658 | 123M | if (c > node) |
659 | 56.1M | ap = &b->right_loc; |
660 | 67.8M | else |
661 | 67.8M | ap = &c->right_loc; |
662 | 123M | } |
663 | 644M | } |
664 | 1.12G | } |
665 | 1.06G | *ap = node; |
666 | 1.06G | } |
667 | | |
668 | | static void insert_free(gs_memory_chunk_t *cmem, chunk_free_node_t *node, uint size) |
669 | 1.06G | { |
670 | 1.06G | node->size = size; |
671 | 1.06G | insert_free_size(cmem, node); |
672 | 1.06G | insert_free_loc(cmem, node); |
673 | 1.06G | } |
674 | | |
675 | | static void remove_free_loc(gs_memory_chunk_t *cmem, chunk_free_node_t *node) |
676 | 1.77G | { |
677 | 1.77G | chunk_free_node_t **ap = &cmem->free_loc; |
678 | | |
679 | 4.82G | while (*ap != node) |
680 | 3.05G | { |
681 | 3.05G | if (*ap > node) |
682 | 1.50G | ap = &(*ap)->left_loc; |
683 | 1.54G | else |
684 | 1.54G | ap = &(*ap)->right_loc; |
685 | 3.05G | } |
686 | | |
687 | 1.77G | if (node->left_loc == NULL) |
688 | 1.35G | *ap = node->right_loc; |
689 | 424M | else if (node->right_loc == NULL) |
690 | 72.3M | *ap = node->left_loc; |
691 | 351M | else { |
692 | | /* Find the in-order predecessor to node and use that to replace node */ |
693 | 351M | chunk_free_node_t **bp; |
694 | 351M | chunk_free_node_t *b; |
695 | 351M | bp = &node->left_loc; |
696 | 400M | while ((*bp)->right_loc) |
697 | 48.3M | bp = &(*bp)->right_loc; |
698 | 351M | b = (*bp); |
699 | 351M | *bp = b->left_loc; |
700 | 351M | b->left_loc = node->left_loc; |
701 | 351M | b->right_loc = node->right_loc; |
702 | 351M | *ap = b; |
703 | 351M | } |
704 | 1.77G | } |
705 | | |
706 | | static void remove_free_size(gs_memory_chunk_t *cmem, chunk_free_node_t *node) |
707 | 1.06G | { |
708 | 1.06G | chunk_free_node_t **ap = &cmem->free_size; |
709 | | |
710 | 3.41G | while (*ap != node) |
711 | 2.35G | { |
712 | 2.35G | if (CMP_SIZE(*ap, node)) |
713 | 1.19G | ap = &(*ap)->left_size; |
714 | 1.15G | else |
715 | 1.15G | ap = &(*ap)->right_size; |
716 | 2.35G | } |
717 | | |
718 | 1.06G | if (node->left_size == NULL) |
719 | 863M | *ap = node->right_size; |
720 | 197M | else if (node->right_size == NULL) |
721 | 41.1M | *ap = node->left_size; |
722 | 156M | else { |
723 | | /* Find the in-order predecessor to node and use that to replace node */ |
724 | 156M | chunk_free_node_t **bp; |
725 | 156M | chunk_free_node_t *b; |
726 | 156M | bp = &node->left_size; |
727 | 226M | while ((*bp)->right_size) |
728 | 70.2M | bp = &(*bp)->right_size; |
729 | 156M | b = (*bp); |
730 | 156M | *bp = b->left_size; |
731 | 156M | b->left_size = node->left_size; |
732 | 156M | b->right_size = node->right_size; |
733 | 156M | *ap = b; |
734 | 156M | } |
735 | 1.06G | } |
736 | | |
737 | | static void remove_free_size_fast(gs_memory_chunk_t *cmem, chunk_free_node_t **ap) |
738 | 1.64G | { |
739 | 1.64G | chunk_free_node_t *node = *ap; |
740 | | |
741 | 1.64G | if (node->left_size == NULL) |
742 | 648M | *ap = node->right_size; |
743 | 1.00G | else if (node->right_size == NULL) |
744 | 41.1M | *ap = node->left_size; |
745 | 960M | else { |
746 | | /* Find the in-order predecessor to node and use that to replace node */ |
747 | 960M | chunk_free_node_t **bp; |
748 | 960M | chunk_free_node_t *b; |
749 | 960M | bp = &node->left_size; |
750 | 966M | while ((*bp)->right_size) |
751 | 6.27M | bp = &(*bp)->right_size; |
752 | 960M | b = (*bp); |
753 | 960M | *bp = b->left_size; |
754 | 960M | b->left_size = node->left_size; |
755 | 960M | b->right_size = node->right_size; |
756 | 960M | *ap = b; |
757 | 960M | } |
758 | 1.64G | } |
759 | | |
760 | | static void remove_free(gs_memory_chunk_t *cmem, chunk_free_node_t *node) |
761 | 128M | { |
762 | 128M | remove_free_loc(cmem, node); |
763 | 128M | remove_free_size(cmem, node); |
764 | 128M | } |
765 | | |
766 | | #if defined(MEMENTO) || defined(SINGLE_OBJECT_MEMORY_BLOCKS_ONLY) |
767 | | #define SINGLE_OBJECT_CHUNK(size) (1) |
768 | | #else |
769 | 3.30G | #define SINGLE_OBJECT_CHUNK(size) ((size) > (CHUNK_SIZE>>1)) |
770 | | #endif |
771 | | |
772 | | /* All of the allocation routines reduce to this function */ |
773 | | static byte * |
774 | | chunk_obj_alloc(gs_memory_t *mem, size_t size, gs_memory_type_ptr_t type, client_name_t cname) |
775 | 1.65G | { |
776 | 1.65G | gs_memory_chunk_t *cmem = (gs_memory_chunk_t *)mem; |
777 | 1.65G | chunk_free_node_t **ap, **okp; |
778 | 1.65G | chunk_free_node_t *a, *b, *c; |
779 | 1.65G | size_t newsize; |
780 | 1.65G | chunk_obj_node_t *obj = NULL; |
781 | | |
782 | 1.65G | newsize = round_up_to_align(size + SIZEOF_ROUND_ALIGN(chunk_obj_node_t)); /* space we will need */ |
783 | | /* When we free this block it might have to go in free - so it had |
784 | | * better be large enough to accommodate a complete free node! */ |
785 | 1.65G | if (newsize < SIZEOF_ROUND_ALIGN(chunk_free_node_t)) |
786 | 967k | newsize = SIZEOF_ROUND_ALIGN(chunk_free_node_t); |
787 | | /* Protect against overflow */ |
788 | 1.65G | if (newsize < size) |
789 | 3.80k | return NULL; |
790 | | |
791 | | #ifdef DEBUG_SEQ |
792 | | cmem->sequence++; |
793 | | #endif |
794 | | |
795 | | #ifdef DEBUG_CHUNK_PRINT |
796 | | #ifdef DEBUG_SEQ |
797 | | dmlprintf4(mem, "Event %x: malloc(chunk="PRI_INTPTR", size=%"PRIxSIZE", cname=%s)\n", |
798 | | cmem->sequence, (intptr_t)cmem, newsize, cname); |
799 | | #else |
800 | | dmlprintf3(mem, "malloc(chunk="PRI_INTPTR", size=%"PRIxSIZE", cname=%s)\n", |
801 | | (intptr_t)cmem, newsize, cname); |
802 | | #endif |
803 | | #endif |
804 | | |
805 | | /* Large blocks are allocated directly */ |
806 | 1.65G | if (SINGLE_OBJECT_CHUNK(size)) { |
807 | 1.55M | obj = (chunk_obj_node_t *)gs_alloc_bytes_immovable(cmem->target, newsize, cname); |
808 | 1.55M | if (obj == NULL) |
809 | 12 | return NULL; |
810 | 1.65G | } else { |
811 | | /* Find the smallest free block that's large enough */ |
812 | | /* okp points to the parent pointer to the block we pick */ |
813 | 1.65G | ap = &cmem->free_size; |
814 | 1.65G | okp = NULL; |
815 | 2.84G | while ((a = *ap) != NULL) { |
816 | 1.85G | if (a->size >= newsize) { |
817 | 682M | b = a->left_size; |
818 | 682M | if (b == NULL) { |
819 | 107M | okp = ap; /* a will do */ |
820 | 107M | break; /* Stop searching */ |
821 | 107M | } |
822 | 575M | if (b->size >= newsize) { |
823 | 186M | c = b->left_size; |
824 | 186M | if (c == NULL) { |
825 | 42.3M | okp = &a->left_size; /* b is as good as we're going to get */ |
826 | 42.3M | break; |
827 | 42.3M | } |
828 | | /* Splay: a c |
829 | | * b Z => W b |
830 | | * c Y X a |
831 | | * W X Y Z |
832 | | */ |
833 | 144M | *ap = c; |
834 | 144M | a->left_size = b->right_size; |
835 | 144M | b->left_size = c->right_size; |
836 | 144M | b->right_size = a; |
837 | 144M | c->right_size = b; |
838 | 144M | if (c->size >= newsize) { |
839 | 77.1M | okp = ap; /* c is the best so far */ |
840 | 77.1M | ap = &c->left_size; |
841 | 77.1M | } else { |
842 | 67.3M | okp = &c->right_size; /* b is the best so far */ |
843 | 67.3M | ap = &b->left_size; |
844 | 67.3M | } |
845 | 388M | } else { |
846 | 388M | c = b->right_size; |
847 | 388M | if (c == NULL) { |
848 | 133M | okp = ap; /* a is as good as we are going to get */ |
849 | 133M | break; |
850 | 133M | } |
851 | | /* Splay: a c |
852 | | * b Z => b a |
853 | | * W c W X Y Z |
854 | | * X Y |
855 | | */ |
856 | 254M | *ap = c; |
857 | 254M | a->left_size = c->right_size; |
858 | 254M | b->right_size = c->left_size; |
859 | 254M | c->left_size = b; |
860 | 254M | c->right_size = a; |
861 | 254M | if (c->size >= newsize) { |
862 | 227M | okp = ap; /* c is the best so far */ |
863 | 227M | ap = &b->right_size; |
864 | 227M | } else { |
865 | 27.0M | okp = &c->right_size; /* a is the best so far */ |
866 | 27.0M | ap = &a->left_size; |
867 | 27.0M | } |
868 | 254M | } |
869 | 1.17G | } else { |
870 | 1.17G | b = a->right_size; |
871 | 1.17G | if (b == NULL) |
872 | 14.4M | break; /* No better match to be found */ |
873 | 1.16G | if (b->size >= newsize) { |
874 | 1.02G | c = b->left_size; |
875 | 1.02G | if (c == NULL) { |
876 | 364M | okp = &a->right_size; /* b is as good as we're going to get */ |
877 | 364M | break; |
878 | 364M | } |
879 | | /* Splay: a c |
880 | | * W b => a b |
881 | | * c Z W X Y Z |
882 | | * X Y |
883 | | */ |
884 | 664M | *ap = c; |
885 | 664M | a->right_size = c->left_size; |
886 | 664M | b->left_size = c->right_size; |
887 | 664M | c->left_size = a; |
888 | 664M | c->right_size = b; |
889 | 664M | if (c->size >= newsize) { |
890 | 587M | okp = ap; /* c is the best so far */ |
891 | 587M | ap = &a->right_size; |
892 | 587M | } else { |
893 | 76.3M | okp = &c->right_size; /* b is the best so far */ |
894 | 76.3M | ap = &b->left_size; |
895 | 76.3M | } |
896 | 664M | } else { |
897 | 132M | c = b->right_size; |
898 | 132M | if (c == NULL) |
899 | 1.48M | break; /* No better match to be found */ |
900 | | /* Splay: a c |
901 | | * W b => b Z |
902 | | * X c a Y |
903 | | * Y Z W X |
904 | | */ |
905 | 131M | *ap = c; |
906 | 131M | a->right_size = b->left_size; |
907 | 131M | b->right_size = c->left_size; |
908 | 131M | b->left_size = a; |
909 | 131M | c->left_size = b; |
910 | 131M | if (c->size >= newsize) { |
911 | 68.1M | okp = ap; /* c is the best so far */ |
912 | 68.1M | ap = &b->right_size; |
913 | 68.1M | } else |
914 | 63.3M | ap = &c->right_size; |
915 | 131M | } |
916 | 1.16G | } |
917 | 1.85G | } |
918 | | |
919 | | /* So *okp points to the most appropriate free tree entry. */ |
920 | | |
921 | 1.65G | if (okp == NULL) { |
922 | | /* No appropriate free space slot. We need to allocate a new slab. */ |
923 | 975k | chunk_slab_t *slab; |
924 | 975k | uint slab_size = newsize + SIZEOF_ROUND_ALIGN(chunk_slab_t); |
925 | | |
926 | 975k | if (slab_size <= (CHUNK_SIZE>>1)) |
927 | 903k | slab_size = CHUNK_SIZE; |
928 | 975k | slab = (chunk_slab_t *)gs_alloc_bytes_immovable(cmem->target, slab_size, cname); |
929 | 975k | if (slab == NULL) |
930 | 0 | return NULL; |
931 | 975k | slab->next = cmem->slabs; |
932 | 975k | cmem->slabs = slab; |
933 | | |
934 | 975k | obj = (chunk_obj_node_t *)(((byte *)slab) + SIZEOF_ROUND_ALIGN(chunk_slab_t)); |
935 | 975k | if (slab_size != newsize + SIZEOF_ROUND_ALIGN(chunk_slab_t)) { |
936 | 903k | insert_free(cmem, (chunk_free_node_t *)(((byte *)obj)+newsize), slab_size - newsize - SIZEOF_ROUND_ALIGN(chunk_slab_t)); |
937 | 903k | cmem->total_free += slab_size - newsize - SIZEOF_ROUND_ALIGN(chunk_slab_t); |
938 | 903k | } |
939 | 1.64G | } else { |
940 | 1.64G | chunk_free_node_t *ok = *okp; |
941 | 1.64G | obj = (chunk_obj_node_t *)(void *)ok; |
942 | 1.64G | if (ok->size >= newsize + SIZEOF_ROUND_ALIGN(chunk_free_node_t)) { |
943 | 1.05G | chunk_free_node_t *tail = (chunk_free_node_t *)(((byte *)ok) + newsize); |
944 | 1.05G | uint tail_size = ok->size - newsize; |
945 | 1.05G | remove_free_size_fast(cmem, okp); |
946 | 1.05G | remove_free_loc(cmem, ok); |
947 | 1.05G | insert_free(cmem, tail, tail_size); |
948 | 1.05G | } else { |
949 | 589M | newsize = ok->size; |
950 | 589M | remove_free_size_fast(cmem, okp); |
951 | 589M | remove_free_loc(cmem, ok); |
952 | 589M | } |
953 | 1.64G | cmem->total_free -= newsize; |
954 | 1.64G | } |
955 | 1.65G | } |
956 | | |
957 | 1.65G | if (gs_alloc_debug) { |
958 | 0 | memset((byte *)(obj) + SIZEOF_ROUND_ALIGN(chunk_obj_node_t), 0xa1, newsize - SIZEOF_ROUND_ALIGN(chunk_obj_node_t)); |
959 | 0 | memset((byte *)(obj) + SIZEOF_ROUND_ALIGN(chunk_obj_node_t), 0xac, size); |
960 | 0 | } |
961 | | |
962 | 1.65G | cmem->used += newsize; |
963 | 1.65G | obj->size = newsize; /* actual size */ |
964 | 1.65G | obj->padding = newsize - size; /* actual size - client requested size */ |
965 | 1.65G | obj->type = type; /* and client desired type */ |
966 | 1.65G | obj->defer_next = NULL; |
967 | | |
968 | | #ifdef DEBUG_SEQ |
969 | | obj->sequence = cmem->sequence; |
970 | | #endif |
971 | 1.65G | if (gs_debug_c('A')) |
972 | 0 | dmlprintf3(mem, "[a+]chunk_obj_alloc (%s)(%"PRIuSIZE") = "PRI_INTPTR": OK.\n", |
973 | 1.65G | client_name_string(cname), size, (intptr_t) obj); |
974 | | #ifdef DEBUG_CHUNK_PRINT |
975 | | #ifdef DEBUG_SEQ |
976 | | dmlprintf5(mem, "Event %x: malloced(chunk="PRI_INTPTR", addr="PRI_INTPTR", size=%"PRIxSIZE", cname=%s)\n", |
977 | | obj->sequence, (intptr_t)cmem, (intptr_t)obj, obj->size, cname); |
978 | | #else |
979 | | dmlprintf4(mem, "malloced(chunk="PRI_INTPTR", addr="PRI_INTPTR", size=%"PRIxSIZE", cname=%s)\n", |
980 | | (intptr_t)cmem, (intptr_t)obj, obj->size, cname); |
981 | | #endif |
982 | | #endif |
983 | | #ifdef DEBUG_CHUNK |
984 | | gs_memory_chunk_dump_memory(cmem); |
985 | | #endif |
986 | | |
987 | 1.65G | return (byte *)Memento_label((byte *)(obj) + SIZEOF_ROUND_ALIGN(chunk_obj_node_t), cname); |
988 | 1.65G | } |
989 | | |
990 | | static byte * |
991 | | chunk_alloc_bytes_immovable(gs_memory_t * mem, size_t size, client_name_t cname) |
992 | 1.14M | { |
993 | 1.14M | return chunk_obj_alloc(mem, size, &st_bytes, cname); |
994 | 1.14M | } |
995 | | |
996 | | static byte * |
997 | | chunk_alloc_bytes(gs_memory_t * mem, size_t size, client_name_t cname) |
998 | 1.49G | { |
999 | 1.49G | return chunk_obj_alloc(mem, size, &st_bytes, cname); |
1000 | 1.49G | } |
1001 | | |
1002 | | static void * |
1003 | | chunk_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype, |
1004 | | client_name_t cname) |
1005 | 2.24M | { |
1006 | 2.24M | return chunk_obj_alloc(mem, pstype->ssize, pstype, cname); |
1007 | 2.24M | } |
1008 | | |
1009 | | static void * |
1010 | | chunk_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype, |
1011 | | client_name_t cname) |
1012 | 151M | { |
1013 | 151M | return chunk_obj_alloc(mem, pstype->ssize, pstype, cname); |
1014 | 151M | } |
1015 | | |
1016 | | static byte * |
1017 | | chunk_alloc_byte_array_immovable(gs_memory_t * mem, size_t num_elements, |
1018 | | size_t elt_size, client_name_t cname) |
1019 | 3.62M | { |
1020 | 3.62M | return chunk_alloc_bytes(mem, num_elements * elt_size, cname); |
1021 | 3.62M | } |
1022 | | |
1023 | | static byte * |
1024 | | chunk_alloc_byte_array(gs_memory_t * mem, size_t num_elements, size_t elt_size, |
1025 | | client_name_t cname) |
1026 | 175M | { |
1027 | 175M | return chunk_alloc_bytes(mem, num_elements * elt_size, cname); |
1028 | 175M | } |
1029 | | |
1030 | | static void * |
1031 | | chunk_alloc_struct_array_immovable(gs_memory_t * mem, size_t num_elements, |
1032 | | gs_memory_type_ptr_t pstype, client_name_t cname) |
1033 | 34.6k | { |
1034 | 34.6k | return chunk_obj_alloc(mem, num_elements * pstype->ssize, pstype, cname); |
1035 | 34.6k | } |
1036 | | |
1037 | | static void * |
1038 | | chunk_alloc_struct_array(gs_memory_t * mem, size_t num_elements, |
1039 | | gs_memory_type_ptr_t pstype, client_name_t cname) |
1040 | 380k | { |
1041 | 380k | return chunk_obj_alloc(mem, num_elements * pstype->ssize, pstype, cname); |
1042 | 380k | } |
1043 | | |
1044 | | static void * |
1045 | | chunk_resize_object(gs_memory_t * mem, void *ptr, size_t new_num_elements, client_name_t cname) |
1046 | 878k | { |
1047 | 878k | void *new_ptr = NULL; |
1048 | | |
1049 | 878k | if (ptr != NULL) { |
1050 | | /* This isn't particularly efficient, but it is rarely used */ |
1051 | 878k | chunk_obj_node_t *obj = (chunk_obj_node_t *)(((byte *)ptr) - SIZEOF_ROUND_ALIGN(chunk_obj_node_t)); |
1052 | 878k | size_t new_size = (obj->type->ssize * new_num_elements); |
1053 | 878k | size_t old_size = obj->size - obj->padding; |
1054 | | /* get the type from the old object */ |
1055 | 878k | gs_memory_type_ptr_t type = obj->type; |
1056 | 878k | gs_memory_chunk_t *cmem = (gs_memory_chunk_t *)mem; |
1057 | 878k | size_t save_max_used = cmem->max_used; |
1058 | | |
1059 | 878k | if (new_size == old_size) |
1060 | 109k | return ptr; |
1061 | 768k | if ((new_ptr = chunk_obj_alloc(mem, new_size, type, cname)) == 0) |
1062 | 0 | return NULL; |
1063 | 768k | memcpy(new_ptr, ptr, min(old_size, new_size)); |
1064 | 768k | chunk_free_object(mem, ptr, cname); |
1065 | 768k | cmem->max_used = save_max_used; |
1066 | 768k | if (cmem->used > cmem->max_used) |
1067 | 450k | cmem->max_used = cmem->used; |
1068 | 768k | } |
1069 | | |
1070 | 768k | return new_ptr; |
1071 | 878k | } |
1072 | | |
1073 | | static void |
1074 | | chunk_free_object(gs_memory_t *mem, void *ptr, client_name_t cname) |
1075 | 1.67G | { |
1076 | 1.67G | gs_memory_chunk_t * const cmem = (gs_memory_chunk_t *)mem; |
1077 | 1.67G | size_t obj_node_size; |
1078 | 1.67G | chunk_obj_node_t *obj; |
1079 | 1.67G | struct_proc_finalize((*finalize)); |
1080 | 1.67G | chunk_free_node_t **ap, **gtp, **ltp; |
1081 | 1.67G | chunk_free_node_t *a, *b, *c; |
1082 | | |
1083 | 1.67G | if (ptr == NULL) |
1084 | 24.6M | return; |
1085 | | |
1086 | | /* back up to obj header */ |
1087 | 1.65G | obj_node_size = SIZEOF_ROUND_ALIGN(chunk_obj_node_t); |
1088 | 1.65G | obj = (chunk_obj_node_t *)(((byte *)ptr) - obj_node_size); |
1089 | | |
1090 | 1.65G | if (cmem->deferring) { |
1091 | 0 | if (obj->defer_next == NULL) { |
1092 | 0 | obj->defer_next = cmem->defer_finalize_list; |
1093 | 0 | cmem->defer_finalize_list = obj; |
1094 | 0 | } |
1095 | 0 | return; |
1096 | 0 | } |
1097 | | |
1098 | | #ifdef DEBUG_CHUNK_PRINT |
1099 | | #ifdef DEBUG_SEQ |
1100 | | cmem->sequence++; |
1101 | | dmlprintf6(cmem->target, "Event %x: free(chunk="PRI_INTPTR", addr="PRI_INTPTR", size=%x, num=%x, cname=%s)\n", |
1102 | | cmem->sequence, (intptr_t)cmem, (intptr_t)obj, obj->size, obj->sequence, cname); |
1103 | | #else |
1104 | | dmlprintf4(cmem->target, "free(chunk="PRI_INTPTR", addr="PRI_INTPTR", size=%x, cname=%s)\n", |
1105 | | (intptr_t)cmem, (intptr_t)obj, obj->size, cname); |
1106 | | #endif |
1107 | | #endif |
1108 | | |
1109 | 1.65G | if (obj->type) { |
1110 | 1.65G | finalize = obj->type->finalize; |
1111 | 1.65G | if (finalize != NULL) |
1112 | 45.7M | finalize(mem, ptr); |
1113 | 1.65G | } |
1114 | | /* finalize may change the head_**_chunk doing free of stuff */ |
1115 | | |
1116 | 1.65G | if_debug3m('A', cmem->target, "[a-]chunk_free_object(%s) "PRI_INTPTR"(%"PRIuSIZE")\n", |
1117 | 1.65G | client_name_string(cname), (intptr_t)ptr, obj->size); |
1118 | | |
1119 | 1.65G | cmem->used -= obj->size; |
1120 | | |
1121 | 1.65G | if (SINGLE_OBJECT_CHUNK(obj->size - obj->padding)) { |
1122 | 1.55M | gs_free_object(cmem->target, obj, "chunk_free_object(single object)"); |
1123 | | #ifdef DEBUG_CHUNK |
1124 | | gs_memory_chunk_dump_memory(cmem); |
1125 | | #endif |
1126 | 1.55M | return; |
1127 | 1.55M | } |
1128 | | |
1129 | | /* We want to find where to insert this free entry into our free tree. We need to know |
1130 | | * both the point to the left of it, and the point to the right of it, in order to see |
1131 | | * if we can merge the free entries. Accordingly, we search from the top of the tree |
1132 | | * and keep pointers to the nodes that we pass that are greater than it, and less than |
1133 | | * it. */ |
1134 | 1.65G | gtp = NULL; /* gtp is set to the address of the pointer to the node where we last stepped left */ |
1135 | 1.65G | ltp = NULL; /* ltp is set to the address of the pointer to the node where we last stepped right */ |
1136 | 1.65G | ap = &cmem->free_loc; |
1137 | 2.65G | while ((a = *ap) != NULL) { |
1138 | 2.06G | if ((void *)a > (void *)obj) { |
1139 | 1.09G | b = a->left_loc; /* Try to step left from a */ |
1140 | 1.09G | if (b == NULL) { |
1141 | 166M | gtp = ap; /* a is greater than us */ |
1142 | 166M | break; |
1143 | 166M | } |
1144 | 929M | if ((void *)b > (void *)obj) { |
1145 | 450M | c = b->left_loc; /* Try to step left from b */ |
1146 | 450M | if (c == NULL) { |
1147 | 142M | gtp = &a->left_loc; /* b is greater than us */ |
1148 | 142M | break; |
1149 | 142M | } |
1150 | | /* Splay: a c |
1151 | | * b Z => W b |
1152 | | * c Y X a |
1153 | | * W X Y Z |
1154 | | */ |
1155 | 307M | *ap = c; |
1156 | 307M | a->left_loc = b->right_loc; |
1157 | 307M | b->left_loc = c->right_loc; |
1158 | 307M | b->right_loc = a; |
1159 | 307M | c->right_loc = b; |
1160 | 307M | if ((void *)c > (void *)obj) { /* W */ |
1161 | 183M | gtp = ap; /* c is greater than us */ |
1162 | 183M | ap = &c->left_loc; |
1163 | 183M | } else { /* X */ |
1164 | 124M | gtp = &c->right_loc; /* b is greater than us */ |
1165 | 124M | ltp = ap; /* c is less than us */ |
1166 | 124M | ap = &b->left_loc; |
1167 | 124M | } |
1168 | 478M | } else { |
1169 | 478M | c = b->right_loc; /* Try to step right from b */ |
1170 | 478M | if (c == NULL) { |
1171 | 306M | gtp = ap; /* a is greater than us */ |
1172 | 306M | ltp = &a->left_loc; /* b is less than us */ |
1173 | 306M | break; |
1174 | 306M | } |
1175 | | /* Splay: a c |
1176 | | * b Z => b a |
1177 | | * W c W X Y Z |
1178 | | * X Y |
1179 | | */ |
1180 | 172M | *ap = c; |
1181 | 172M | a->left_loc = c->right_loc; |
1182 | 172M | b->right_loc = c->left_loc; |
1183 | 172M | c->left_loc = b; |
1184 | 172M | c->right_loc = a; |
1185 | 172M | if ((void *)c > (void *)obj) { /* X */ |
1186 | 107M | gtp = ap; /* c is greater than us */ |
1187 | 107M | ltp = &c->left_loc; /* b is less than us */ |
1188 | 107M | ap = &b->right_loc; |
1189 | 107M | } else { /* Y */ |
1190 | 64.6M | gtp = &c->right_loc; /* a is greater than us */ |
1191 | 64.6M | ltp = ap; /* c is less than us */ |
1192 | 64.6M | ap = &a->left_loc; |
1193 | 64.6M | } |
1194 | 172M | } |
1195 | 964M | } else { |
1196 | 964M | b = a->right_loc; /* Try to step right from a */ |
1197 | 964M | if (b == NULL) { |
1198 | 73.8M | ltp = ap; /* a is less than us */ |
1199 | 73.8M | break; |
1200 | 73.8M | } |
1201 | 891M | if ((void *)b > (void *)obj) { |
1202 | 654M | c = b->left_loc; |
1203 | 654M | if (c == NULL) { |
1204 | 351M | gtp = &a->right_loc; /* b is greater than us */ |
1205 | 351M | ltp = ap; /* a is less than us */ |
1206 | 351M | break; |
1207 | 351M | } |
1208 | | /* Splay: a c |
1209 | | * W b => a b |
1210 | | * c Z W X Y Z |
1211 | | * X Y |
1212 | | */ |
1213 | 302M | *ap = c; |
1214 | 302M | a->right_loc = c->left_loc; |
1215 | 302M | b->left_loc = c->right_loc; |
1216 | 302M | c->left_loc = a; |
1217 | 302M | c->right_loc = b; |
1218 | 302M | if ((void *)c > (void *)obj) { /* X */ |
1219 | 242M | gtp = ap; /* c is greater than us */ |
1220 | 242M | ltp = &c->left_loc; /* a is less than us */ |
1221 | 242M | ap = &a->right_loc; |
1222 | 242M | } else { /* Y */ |
1223 | 59.6M | gtp = &c->right_loc; /* b is greater than us */ |
1224 | 59.6M | ltp = ap; /* c is less than than us */ |
1225 | 59.6M | ap = &b->left_loc; |
1226 | 59.6M | } |
1227 | 302M | } else { |
1228 | 236M | c = b->right_loc; |
1229 | 236M | if (c == NULL) { |
1230 | 14.1M | ltp = &a->right_loc; /* b is greater than us */ |
1231 | 14.1M | break; |
1232 | 14.1M | } |
1233 | | /* Splay: a c |
1234 | | * W b => b Z |
1235 | | * X c a Y |
1236 | | * Y Z W X |
1237 | | */ |
1238 | 222M | *ap = c; |
1239 | 222M | a->right_loc = b->left_loc; |
1240 | 222M | b->right_loc = c->left_loc; |
1241 | 222M | b->left_loc = a; |
1242 | 222M | c->left_loc = b; |
1243 | 222M | if ((void *)c > (void *)obj) { /* Y */ |
1244 | 108M | gtp = ap; /* c is greater than us */ |
1245 | 108M | ltp = &c->left_loc; /* b is less than us */ |
1246 | 108M | ap = &b->right_loc; |
1247 | 114M | } else { /* Z */ |
1248 | 114M | ltp = ap; /* c is less than than us */ |
1249 | 114M | ap = &c->right_loc; |
1250 | 114M | } |
1251 | 222M | } |
1252 | 891M | } |
1253 | 2.06G | } |
1254 | | |
1255 | 1.65G | if (ltp) { |
1256 | | /* There is at least 1 node smaller than us - check for merging */ |
1257 | 1.42G | chunk_free_node_t *ltfree = (chunk_free_node_t *)(*ltp); |
1258 | 1.42G | if ((((byte *)ltfree) + ltfree->size) == (byte *)(void *)obj) { |
1259 | | /* Merge! */ |
1260 | 328M | cmem->total_free += obj->size; |
1261 | 328M | remove_free_size(cmem, ltfree); |
1262 | 328M | ltfree->size += obj->size; |
1263 | 328M | if (gtp) { |
1264 | | /* There is at least 1 node greater than us - check for merging */ |
1265 | 326M | chunk_free_node_t *gtfree = (chunk_free_node_t *)(*gtp); |
1266 | 326M | if ((((byte *)obj) + obj->size) == (byte *)(void *)gtfree) { |
1267 | | /* Double merge! */ |
1268 | 128M | ltfree->size += gtfree->size; |
1269 | 128M | remove_free(cmem, gtfree); |
1270 | 128M | } |
1271 | 326M | gtp = NULL; |
1272 | 326M | } |
1273 | 328M | insert_free_size(cmem, ltfree); |
1274 | 328M | if (gs_alloc_debug) |
1275 | 0 | memset(((byte *)ltfree) + SIZEOF_ROUND_ALIGN(chunk_free_node_t), 0x69, ltfree->size - SIZEOF_ROUND_ALIGN(chunk_free_node_t)); |
1276 | 328M | obj = NULL; |
1277 | 328M | } |
1278 | 1.42G | } |
1279 | 1.65G | if (gtp && obj) { |
1280 | | /* There is at least 1 node greater than us - check for merging */ |
1281 | 1.31G | chunk_free_node_t *gtfree = (chunk_free_node_t *)(*gtp); |
1282 | 1.31G | if ((((byte *)obj) + obj->size) == (byte *)(void *)gtfree) { |
1283 | | /* Merge! */ |
1284 | 604M | chunk_free_node_t *objfree = (chunk_free_node_t *)(void *)obj; |
1285 | 604M | uint obj_size = obj->size; |
1286 | 604M | cmem->total_free += obj_size; |
1287 | 604M | remove_free_size(cmem, gtfree); |
1288 | 604M | *objfree = *gtfree; |
1289 | 604M | objfree->size += obj_size; |
1290 | 604M | *gtp = objfree; |
1291 | 604M | insert_free_size(cmem, objfree); |
1292 | 604M | if (gs_alloc_debug) |
1293 | 0 | memset(((byte *)objfree) + SIZEOF_ROUND_ALIGN(chunk_free_node_t), 0x96, objfree->size - SIZEOF_ROUND_ALIGN(chunk_free_node_t)); |
1294 | 604M | obj = NULL; |
1295 | 604M | } |
1296 | 1.31G | } |
1297 | | |
1298 | 1.65G | if (obj) { |
1299 | | /* Insert new one */ |
1300 | 717M | chunk_free_node_t *objfree = (chunk_free_node_t *)(void *)obj; |
1301 | 717M | cmem->total_free += obj->size; |
1302 | 717M | objfree->size = obj->size; |
1303 | 717M | objfree->left_loc = NULL; |
1304 | 717M | objfree->right_loc = NULL; |
1305 | 717M | if (gtp) { |
1306 | 706M | ap = &(*gtp)->left_loc; |
1307 | 1.00G | while (*ap) { |
1308 | 296M | ap = &(*ap)->right_loc; |
1309 | 296M | } |
1310 | 706M | } else if (ltp) { |
1311 | 11.0M | ap = &(*ltp)->right_loc; |
1312 | 11.0M | while (*ap) { |
1313 | 0 | ap = &(*ap)->left_loc; |
1314 | 0 | } |
1315 | 11.0M | } else |
1316 | 45 | ap = &cmem->free_loc; |
1317 | 717M | *ap = objfree; |
1318 | 717M | insert_free_size(cmem, objfree); |
1319 | 717M | if (gs_alloc_debug) |
1320 | 0 | memset(((byte *)objfree) + SIZEOF_ROUND_ALIGN(chunk_free_node_t), 0x9b, objfree->size - SIZEOF_ROUND_ALIGN(chunk_free_node_t)); |
1321 | 717M | } |
1322 | | |
1323 | | #ifdef DEBUG_CHUNK |
1324 | | gs_memory_chunk_dump_memory(cmem); |
1325 | | #endif |
1326 | 1.65G | } |
1327 | | |
1328 | | static byte * |
1329 | | chunk_alloc_string_immovable(gs_memory_t * mem, size_t nbytes, client_name_t cname) |
1330 | 0 | { |
1331 | | /* we just alloc bytes here */ |
1332 | 0 | return chunk_alloc_bytes(mem, nbytes, cname); |
1333 | 0 | } |
1334 | | |
1335 | | static byte * |
1336 | | chunk_alloc_string(gs_memory_t * mem, size_t nbytes, client_name_t cname) |
1337 | 756k | { |
1338 | | /* we just alloc bytes here */ |
1339 | 756k | return chunk_alloc_bytes(mem, nbytes, cname); |
1340 | 756k | } |
1341 | | |
1342 | | static byte * |
1343 | | chunk_resize_string(gs_memory_t * mem, byte * data, size_t old_num, size_t new_num, |
1344 | | client_name_t cname) |
1345 | 0 | { |
1346 | | /* just resize object - ignores old_num */ |
1347 | 0 | return chunk_resize_object(mem, data, new_num, cname); |
1348 | 0 | } |
1349 | | |
1350 | | static void |
1351 | | chunk_free_string(gs_memory_t * mem, byte * data, size_t nbytes, |
1352 | | client_name_t cname) |
1353 | 1.03M | { |
1354 | 1.03M | chunk_free_object(mem, data, cname); |
1355 | 1.03M | } |
1356 | | |
1357 | | static void |
1358 | | chunk_status(gs_memory_t * mem, gs_memory_status_t * pstat) |
1359 | 0 | { |
1360 | 0 | gs_memory_chunk_t *cmem = (gs_memory_chunk_t *)mem; |
1361 | |
|
1362 | 0 | pstat->allocated = cmem->used; |
1363 | 0 | pstat->used = cmem->used - cmem->total_free; |
1364 | 0 | pstat->max_used = cmem->max_used; |
1365 | 0 | pstat->limit = (size_t)~1; /* No limit on allocations */ |
1366 | 0 | pstat->is_thread_safe = false; /* this allocator does not have an internal mutex */ |
1367 | 0 | } |
1368 | | |
1369 | | static gs_memory_t * |
1370 | | chunk_stable(gs_memory_t * mem) |
1371 | 214M | { |
1372 | 214M | return mem; |
1373 | 214M | } |
1374 | | |
1375 | | static void |
1376 | | chunk_enable_free(gs_memory_t * mem, bool enable) |
1377 | 0 | { |
1378 | 0 | } |
1379 | | |
1380 | | static void chunk_set_object_type(gs_memory_t *mem, void *ptr, gs_memory_type_ptr_t type) |
1381 | 0 | { |
1382 | 0 | chunk_obj_node_t *obj = (chunk_obj_node_t *)(((byte *)ptr) - SIZEOF_ROUND_ALIGN(chunk_obj_node_t)); |
1383 | |
|
1384 | 0 | if (ptr == 0) |
1385 | 0 | return; |
1386 | 0 | obj->type = type; |
1387 | 0 | } |
1388 | | |
1389 | | static void chunk_defer_frees(gs_memory_t *mem, int defer) |
1390 | 0 | { |
1391 | 0 | gs_memory_chunk_t *cmem = (gs_memory_chunk_t *)mem; |
1392 | 0 | chunk_obj_node_t *n; |
1393 | |
|
1394 | 0 | if (defer == 0) { |
1395 | | /* Run through and finalise everything on the finalize list. This |
1396 | | * might cause other stuff to be put onto the finalize list. As we |
1397 | | * finalize stuff we move it to the free list. */ |
1398 | 0 | while (cmem->defer_finalize_list) { |
1399 | 0 | n = cmem->defer_finalize_list; |
1400 | 0 | cmem->defer_finalize_list = n->defer_next; |
1401 | 0 | if (n->type) { |
1402 | 0 | if (n->type->finalize) |
1403 | 0 | n->type->finalize(mem, ((byte *)n) + SIZEOF_ROUND_ALIGN(chunk_obj_node_t)); |
1404 | 0 | n->type = NULL; |
1405 | 0 | } |
1406 | 0 | n->defer_next = cmem->defer_free_list; |
1407 | 0 | cmem->defer_free_list = n; |
1408 | 0 | } |
1409 | 0 | } |
1410 | 0 | cmem->deferring = defer; |
1411 | 0 | if (defer == 0) { |
1412 | | /* Now run through and free everything on the free list */ |
1413 | 0 | while (cmem->defer_free_list) { |
1414 | 0 | n = cmem->defer_free_list; |
1415 | 0 | cmem->defer_free_list = n->defer_next; |
1416 | 0 | chunk_free_object(mem, ((byte *)n) + SIZEOF_ROUND_ALIGN(chunk_obj_node_t), "deferred free"); |
1417 | 0 | } |
1418 | 0 | } |
1419 | 0 | } |
1420 | | |
1421 | | static void |
1422 | | chunk_consolidate_free(gs_memory_t *mem) |
1423 | 0 | { |
1424 | 0 | } |
1425 | | |
1426 | | /* accessors to get size and type given the pointer returned to the client */ |
1427 | | static size_t |
1428 | | chunk_object_size(gs_memory_t * mem, const void *ptr) |
1429 | 0 | { |
1430 | 0 | if (ptr != NULL) { |
1431 | 0 | chunk_obj_node_t *obj = (chunk_obj_node_t *)(((byte *)ptr) - SIZEOF_ROUND_ALIGN(chunk_obj_node_t)); |
1432 | |
|
1433 | 0 | return obj->size - obj->padding; |
1434 | 0 | } |
1435 | | |
1436 | 0 | return 0; |
1437 | 0 | } |
1438 | | |
1439 | | static gs_memory_type_ptr_t |
1440 | | chunk_object_type(const gs_memory_t * mem, const void *ptr) |
1441 | 70.6k | { |
1442 | 70.6k | chunk_obj_node_t *obj = (chunk_obj_node_t *)(((byte *)ptr) - SIZEOF_ROUND_ALIGN(chunk_obj_node_t)); |
1443 | 70.6k | return obj->type; |
1444 | 70.6k | } |
1445 | | |
1446 | | static int |
1447 | | chunk_register_root(gs_memory_t * mem, gs_gc_root_t ** rp, gs_ptr_type_t ptype, |
1448 | | void **up, client_name_t cname) |
1449 | 0 | { |
1450 | 0 | return 0; |
1451 | 0 | } |
1452 | | |
1453 | | static void |
1454 | | chunk_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname) |
1455 | 0 | { |
1456 | 0 | } |