/src/server/mysys/my_alloc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | Copyright (c) 2000, 2010, Oracle and/or its affiliates |
3 | | Copyright (c) 2010, 2020, MariaDB |
4 | | |
5 | | This program is free software; you can redistribute it and/or modify |
6 | | it under the terms of the GNU General Public License as published by |
7 | | the Free Software Foundation; version 2 of the License. |
8 | | |
9 | | This program is distributed in the hope that it will be useful, |
10 | | but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | | GNU General Public License for more details. |
13 | | |
14 | | You should have received a copy of the GNU General Public License |
15 | | along with this program; if not, write to the Free Software |
16 | | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ |
17 | | |
18 | | /* Routines to handle mallocing of results which will be freed the same time */ |
19 | | |
20 | | #include <my_global.h> |
21 | | #include <my_sys.h> |
22 | | #include <m_string.h> |
23 | | #include <my_bit.h> |
24 | | #ifdef HAVE_SYS_MMAN_H |
25 | | #include <sys/mman.h> |
26 | | #endif |
27 | | |
28 | | #undef EXTRA_DEBUG |
29 | | #define EXTRA_DEBUG |
30 | | |
31 | 0 | #define ROOT_FLAG_THREAD_SPECIFIC 1 |
32 | 0 | #define ROOT_FLAG_MPROTECT 2 |
33 | | #define ROOT_FLAG_READ_ONLY 4 |
34 | | |
35 | | /* data packed in MEM_ROOT -> min_malloc */ |
36 | | |
37 | | /* Don't allocate too small blocks */ |
38 | | #define ROOT_MIN_BLOCK_SIZE 256 |
39 | | |
40 | 0 | #define MALLOC_FLAG(root) (((root)->flags & ROOT_FLAG_THREAD_SPECIFIC) ? MY_THREAD_SPECIFIC : 0) |
41 | | |
42 | 0 | #define TRASH_MEM(X) TRASH_FREE(((char*)(X) + ((X)->size-(X)->left)), (X)->left) |
43 | | |
44 | | |
45 | | /* |
46 | | Alloc memory through either my_malloc or mmap() |
47 | | */ |
48 | | |
49 | | static void *root_alloc(MEM_ROOT *root, size_t size, size_t *alloced_size, |
50 | | myf my_flags) |
51 | 0 | { |
52 | 0 | *alloced_size= size; |
53 | 0 | #if defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(MAP_ANONYMOUS) |
54 | 0 | if (root->flags & ROOT_FLAG_MPROTECT) |
55 | 0 | { |
56 | 0 | void *res; |
57 | 0 | *alloced_size= MY_ALIGN(size, my_system_page_size); |
58 | 0 | res= my_mmap(0, *alloced_size, PROT_READ | PROT_WRITE, |
59 | 0 | MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
60 | 0 | if (res == MAP_FAILED) |
61 | 0 | res= 0; |
62 | 0 | return res; |
63 | 0 | } |
64 | 0 | #endif /* HAVE_MMAP */ |
65 | | |
66 | 0 | return my_malloc(root->psi_key, size, |
67 | 0 | my_flags | MALLOC_FLAG(root)); |
68 | 0 | } |
69 | | |
70 | | static void root_free(MEM_ROOT *root, void *ptr, size_t size) |
71 | 0 | { |
72 | 0 | #if defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(MAP_ANONYMOUS) |
73 | 0 | if (root->flags & ROOT_FLAG_MPROTECT) |
74 | 0 | my_munmap(ptr, size); |
75 | 0 | else |
76 | 0 | #endif |
77 | 0 | my_free(ptr); |
78 | 0 | } |
79 | | |
80 | | |
81 | | /* |
82 | | Calculate block sizes to use |
83 | | |
84 | | Sizes will be updated to next power of 2, minus operating system |
85 | | memory management size. |
86 | | |
87 | | The idea is to reduce memory fragmentation as most system memory |
88 | | allocators are using power of 2 block size internally. |
89 | | */ |
90 | | |
91 | | static void calculate_block_sizes(MEM_ROOT *mem_root, size_t block_size, |
92 | | size_t *pre_alloc_size) |
93 | 0 | { |
94 | 0 | size_t pre_alloc= *pre_alloc_size; |
95 | |
|
96 | 0 | if (mem_root->flags & ROOT_FLAG_MPROTECT) |
97 | 0 | { |
98 | 0 | mem_root->block_size= MY_ALIGN(block_size, my_system_page_size); |
99 | 0 | if (pre_alloc) |
100 | 0 | pre_alloc= MY_ALIGN(pre_alloc, my_system_page_size); |
101 | 0 | } |
102 | 0 | else |
103 | 0 | { |
104 | 0 | DBUG_ASSERT(block_size <= UINT_MAX32); |
105 | 0 | mem_root->block_size= (my_round_up_to_next_power((uint32) block_size - |
106 | 0 | MALLOC_OVERHEAD)- |
107 | 0 | MALLOC_OVERHEAD); |
108 | 0 | if (pre_alloc) |
109 | 0 | pre_alloc= (my_round_up_to_next_power((uint32) pre_alloc - |
110 | 0 | MALLOC_OVERHEAD)- |
111 | 0 | MALLOC_OVERHEAD); |
112 | 0 | } |
113 | 0 | *pre_alloc_size= pre_alloc; |
114 | 0 | } |
115 | | |
116 | | |
117 | | /* |
118 | | Initialize memory root |
119 | | |
120 | | SYNOPSIS |
121 | | init_alloc_root() |
122 | | key - key to register instrumented memory |
123 | | mem_root - memory root to initialize |
124 | | block_size - size of chunks (blocks) used for memory allocation. |
125 | | Will be updated to next power of 2, minus |
126 | | internal and system memory management size. This is |
127 | | will reduce memory fragmentation as most system memory |
128 | | allocators are using power of 2 block size internally. |
129 | | (It is external size of chunk i.e. it should include |
130 | | memory required for internal structures, thus it |
131 | | should be no less than ROOT_MIN_BLOCK_SIZE). |
132 | | pre_alloc_size - if non-0, then size of block that should be |
133 | | pre-allocated during memory root initialization. |
134 | | my_flags MY_THREAD_SPECIFIC flag for my_malloc |
135 | | MY_RROOT_USE_MPROTECT for read only protected memory |
136 | | |
137 | | DESCRIPTION |
138 | | This function prepares memory root for further use, sets initial size of |
139 | | chunk for memory allocation and pre-allocates first block if specified. |
140 | | Although error can happen during execution of this function if |
141 | | pre_alloc_size is non-0 it won't be reported. Instead it will be |
142 | | reported as error in first alloc_root() on this memory root. |
143 | | */ |
144 | | |
145 | | void init_alloc_root(PSI_memory_key key, MEM_ROOT *mem_root, size_t block_size, |
146 | | size_t pre_alloc_size __attribute__((unused)), |
147 | | myf my_flags) |
148 | 0 | { |
149 | 0 | DBUG_ENTER("init_alloc_root"); |
150 | 0 | DBUG_PRINT("enter",("root: %p prealloc: %zu", mem_root, pre_alloc_size)); |
151 | |
|
152 | 0 | mem_root->free= mem_root->used= mem_root->pre_alloc= 0; |
153 | 0 | mem_root->min_malloc= 32 + REDZONE_SIZE; |
154 | | |
155 | | /* Ensure block size is not to small (we need space for memory accounting */ |
156 | 0 | block_size= MY_MAX(block_size, ROOT_MIN_BLOCK_SIZE); |
157 | |
|
158 | 0 | mem_root->flags= 0; |
159 | 0 | DBUG_ASSERT(!test_all_bits(mem_root->flags, |
160 | 0 | (MY_THREAD_SPECIFIC | MY_ROOT_USE_MPROTECT))); |
161 | 0 | if (my_flags & MY_THREAD_SPECIFIC) |
162 | 0 | mem_root->flags|= ROOT_FLAG_THREAD_SPECIFIC; |
163 | 0 | if (my_flags & MY_ROOT_USE_MPROTECT) |
164 | 0 | mem_root->flags|= ROOT_FLAG_MPROTECT; |
165 | |
|
166 | 0 | calculate_block_sizes(mem_root, block_size, &pre_alloc_size); |
167 | |
|
168 | 0 | mem_root->error_handler= 0; |
169 | 0 | mem_root->block_num= 4; /* We shift this with >>2 */ |
170 | 0 | mem_root->first_block_usage= 0; |
171 | 0 | mem_root->psi_key= key; |
172 | |
|
173 | 0 | #if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG)) |
174 | 0 | if (pre_alloc_size) |
175 | 0 | { |
176 | 0 | size_t alloced_size; |
177 | 0 | if ((mem_root->free= mem_root->pre_alloc= |
178 | 0 | (USED_MEM*) root_alloc(mem_root, pre_alloc_size, &alloced_size, |
179 | 0 | MYF(0)))) |
180 | 0 | { |
181 | 0 | mem_root->free->size= alloced_size; |
182 | 0 | mem_root->free->left= alloced_size - ALIGN_SIZE(sizeof(USED_MEM)); |
183 | 0 | mem_root->free->next= 0; |
184 | 0 | TRASH_MEM(mem_root->free); |
185 | 0 | } |
186 | 0 | } |
187 | 0 | #endif |
188 | 0 | DBUG_VOID_RETURN; |
189 | 0 | } |
190 | | |
191 | | /* |
192 | | SYNOPSIS |
193 | | reset_root_defaults() |
194 | | mem_root memory root to change defaults of |
195 | | block_size new value of block size. Must be greater or equal |
196 | | than ALLOC_ROOT_MIN_BLOCK_SIZE (this value is about |
197 | | 68 bytes and depends on platform and compilation flags) |
198 | | pre_alloc_size new size of preallocated block. If not zero, |
199 | | must be equal to or greater than block size, |
200 | | otherwise means 'no prealloc'. |
201 | | DESCRIPTION |
202 | | Function aligns and assigns new value to block size; then it tries to |
203 | | reuse one of existing blocks as prealloc block, or malloc new one of |
204 | | requested size. If no blocks can be reused, all unused blocks are freed |
205 | | before allocation. |
206 | | */ |
207 | | |
208 | | void reset_root_defaults(MEM_ROOT *mem_root, size_t block_size, |
209 | | size_t pre_alloc_size __attribute__((unused))) |
210 | 0 | { |
211 | 0 | DBUG_ENTER("reset_root_defaults"); |
212 | 0 | DBUG_ASSERT(alloc_root_inited(mem_root)); |
213 | |
|
214 | 0 | calculate_block_sizes(mem_root, block_size, &pre_alloc_size); |
215 | |
|
216 | 0 | #if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG)) |
217 | 0 | if (pre_alloc_size) |
218 | 0 | { |
219 | 0 | size_t size= mem_root->block_size, alloced_size; |
220 | 0 | if (!mem_root->pre_alloc || |
221 | 0 | mem_root->pre_alloc->size != mem_root->block_size) |
222 | 0 | { |
223 | 0 | USED_MEM *mem, **prev= &mem_root->free; |
224 | | /* |
225 | | Free unused blocks, so that consequent calls |
226 | | to reset_root_defaults won't eat away memory. |
227 | | */ |
228 | 0 | while (*prev) |
229 | 0 | { |
230 | 0 | mem= *prev; |
231 | 0 | if (mem->size == size) |
232 | 0 | { |
233 | | /* We found a suitable block, no need to do anything else */ |
234 | 0 | mem_root->pre_alloc= mem; |
235 | 0 | DBUG_VOID_RETURN; |
236 | 0 | } |
237 | 0 | if (mem->left + ALIGN_SIZE(sizeof(USED_MEM)) == mem->size) |
238 | 0 | { |
239 | | /* remove block from the list and free it */ |
240 | 0 | *prev= mem->next; |
241 | 0 | root_free(mem_root, mem, mem->size); |
242 | 0 | } |
243 | 0 | else |
244 | 0 | prev= &mem->next; |
245 | 0 | } |
246 | | /* Allocate new prealloc block and add it to the end of free list */ |
247 | 0 | if ((mem= (USED_MEM *) root_alloc(mem_root, size, &alloced_size, |
248 | 0 | MYF(MY_WME)))) |
249 | 0 | { |
250 | 0 | mem->size= alloced_size; |
251 | 0 | mem->left= alloced_size - ALIGN_SIZE(sizeof(USED_MEM)); |
252 | 0 | mem->next= *prev; |
253 | 0 | *prev= mem_root->pre_alloc= mem; |
254 | 0 | TRASH_MEM(mem); |
255 | 0 | } |
256 | 0 | else |
257 | 0 | mem_root->pre_alloc= 0; |
258 | 0 | } |
259 | 0 | } |
260 | 0 | else |
261 | 0 | #endif |
262 | 0 | mem_root->pre_alloc= 0; |
263 | | |
264 | 0 | DBUG_VOID_RETURN; |
265 | 0 | } |
266 | | |
267 | | |
268 | | void *alloc_root(MEM_ROOT *mem_root, size_t length) |
269 | 0 | { |
270 | 0 | size_t get_size, block_size; |
271 | 0 | uchar* point; |
272 | 0 | USED_MEM *next= 0; |
273 | 0 | USED_MEM **prev; |
274 | 0 | size_t original_length __attribute__((unused)) = length; |
275 | 0 | DBUG_ENTER("alloc_root"); |
276 | 0 | DBUG_PRINT("enter",("root: %p length: %zu", mem_root, length)); |
277 | 0 | DBUG_ASSERT(alloc_root_inited(mem_root)); |
278 | 0 | DBUG_ASSERT((mem_root->flags & ROOT_FLAG_READ_ONLY) == 0); |
279 | |
|
280 | 0 | DBUG_EXECUTE_IF("simulate_out_of_memory", |
281 | 0 | { |
282 | 0 | if (mem_root->error_handler) |
283 | 0 | (*mem_root->error_handler)(); |
284 | 0 | DBUG_SET("-d,simulate_out_of_memory"); |
285 | 0 | DBUG_RETURN((void*) 0); /* purecov: inspected */ |
286 | 0 | }); |
287 | |
|
288 | | #if defined(HAVE_valgrind) && defined(EXTRA_DEBUG) |
289 | | if (!(mem_root->flags & ROOT_FLAG_MPROTECT)) |
290 | | { |
291 | | length+= ALIGN_SIZE(sizeof(USED_MEM)); |
292 | | if (!(next = (USED_MEM*) my_malloc(mem_root->psi_key, length, |
293 | | MYF(MY_WME | ME_FATAL | |
294 | | MALLOC_FLAG(mem_root))))) |
295 | | { |
296 | | if (mem_root->error_handler) |
297 | | (*mem_root->error_handler)(); |
298 | | DBUG_RETURN((uchar*) 0); /* purecov: inspected */ |
299 | | } |
300 | | next->next= mem_root->used; |
301 | | next->left= 0; |
302 | | next->size= length; |
303 | | mem_root->used= next; |
304 | | DBUG_PRINT("exit",("ptr: %p", (((char*)next)+ALIGN_SIZE(sizeof(USED_MEM))))); |
305 | | DBUG_RETURN((((uchar*) next)+ALIGN_SIZE(sizeof(USED_MEM)))); |
306 | | } |
307 | | #endif /* defined(HAVE_valgrind) && defined(EXTRA_DEBUG) */ |
308 | |
|
309 | 0 | length= ALIGN_SIZE(length) + REDZONE_SIZE; |
310 | 0 | if ((*(prev= &mem_root->free)) != NULL) |
311 | 0 | { |
312 | 0 | if ((*prev)->left < length && |
313 | 0 | mem_root->first_block_usage++ >= ALLOC_MAX_BLOCK_USAGE_BEFORE_DROP && |
314 | 0 | (*prev)->left < ALLOC_MAX_BLOCK_TO_DROP) |
315 | 0 | { |
316 | 0 | next= *prev; |
317 | 0 | *prev= next->next; /* Remove block from free list */ |
318 | 0 | next->next= mem_root->used; /* Add to used list */ |
319 | 0 | mem_root->used= next; |
320 | 0 | mem_root->first_block_usage= 0; |
321 | 0 | } |
322 | 0 | for (next= *prev ; next && next->left < length ; next= next->next) |
323 | 0 | prev= &next->next; |
324 | 0 | } |
325 | 0 | if (! next) |
326 | 0 | { /* Time to alloc new block */ |
327 | 0 | size_t alloced_length; |
328 | | |
329 | | /* Increase block size over time if there is a lot of mallocs */ |
330 | | /* when changing this logic, update root_size() to match */ |
331 | 0 | block_size= (MY_ALIGN(mem_root->block_size, ROOT_MIN_BLOCK_SIZE) * |
332 | 0 | (mem_root->block_num >> 2)- MALLOC_OVERHEAD); |
333 | 0 | get_size= length + ALIGN_SIZE(sizeof(USED_MEM)); |
334 | 0 | get_size= MY_MAX(get_size, block_size); |
335 | |
|
336 | 0 | if (!(next= (USED_MEM*) root_alloc(mem_root, get_size, &alloced_length, |
337 | 0 | MYF(MY_WME | ME_FATAL)))) |
338 | 0 | { |
339 | 0 | if (mem_root->error_handler) |
340 | 0 | (*mem_root->error_handler)(); |
341 | 0 | DBUG_RETURN((void*) 0); /* purecov: inspected */ |
342 | 0 | } |
343 | 0 | mem_root->block_num++; |
344 | 0 | DBUG_ASSERT(*prev == 0); |
345 | 0 | next->next= 0; |
346 | 0 | next->size= alloced_length; |
347 | 0 | next->left= alloced_length - ALIGN_SIZE(sizeof(USED_MEM)); |
348 | 0 | *prev= next; |
349 | 0 | TRASH_MEM(next); |
350 | 0 | } |
351 | 0 | else |
352 | 0 | { |
353 | | /* Reset first_block_usage if we used the first block */ |
354 | 0 | if (prev == &mem_root->free) |
355 | 0 | mem_root->first_block_usage= 0; |
356 | 0 | } |
357 | | |
358 | 0 | point= (uchar*) ((char*) next+ (next->size-next->left)); |
359 | 0 | if ((next->left-= length) < mem_root->min_malloc) |
360 | 0 | { |
361 | | /* Full block. Move the block from the free list to the used list */ |
362 | 0 | *prev= next->next; |
363 | 0 | next->next= mem_root->used; |
364 | 0 | mem_root->used= next; |
365 | 0 | } |
366 | 0 | point+= REDZONE_SIZE; |
367 | 0 | TRASH_ALLOC(point, original_length); |
368 | 0 | DBUG_PRINT("exit",("ptr: %p", point)); |
369 | 0 | DBUG_RETURN((void*) point); |
370 | 0 | } |
371 | | |
372 | | |
373 | | /* |
374 | | Allocate many pointers at the same time. |
375 | | |
376 | | DESCRIPTION |
377 | | ptr1, ptr2, etc all point into big allocated memory area. |
378 | | |
379 | | SYNOPSIS |
380 | | multi_alloc_root() |
381 | | root Memory root |
382 | | ptr1, length1 Multiple arguments terminated by a NULL pointer |
383 | | ptr2, length2 ... |
384 | | ... |
385 | | NULL |
386 | | |
387 | | RETURN VALUE |
388 | | A pointer to the beginning of the allocated memory block |
389 | | in case of success or NULL if out of memory. |
390 | | */ |
391 | | |
392 | | void *multi_alloc_root(MEM_ROOT *root, ...) |
393 | 0 | { |
394 | 0 | va_list args; |
395 | 0 | char **ptr, *start, *res; |
396 | 0 | size_t tot_length, length; |
397 | 0 | DBUG_ENTER("multi_alloc_root"); |
398 | | /* |
399 | | We don't need to do DBUG_PRINT here as it will be done when alloc_root |
400 | | is called |
401 | | */ |
402 | |
|
403 | 0 | va_start(args, root); |
404 | 0 | tot_length= 0; |
405 | 0 | while ((ptr= va_arg(args, char **))) |
406 | 0 | { |
407 | 0 | length= va_arg(args, uint); |
408 | 0 | tot_length+= ALIGN_SIZE(length); |
409 | | #ifndef DBUG_OFF |
410 | | tot_length+= ALIGN_SIZE(1); |
411 | | #endif |
412 | 0 | } |
413 | 0 | va_end(args); |
414 | |
|
415 | 0 | if (!(start= (char*) alloc_root(root, tot_length))) |
416 | 0 | DBUG_RETURN(0); /* purecov: inspected */ |
417 | | |
418 | 0 | va_start(args, root); |
419 | 0 | res= start; |
420 | 0 | while ((ptr= va_arg(args, char **))) |
421 | 0 | { |
422 | 0 | *ptr= res; |
423 | 0 | length= va_arg(args, uint); |
424 | 0 | res+= ALIGN_SIZE(length); |
425 | | #ifndef DBUG_OFF |
426 | | TRASH_FREE(res, ALIGN_SIZE(1)); |
427 | | res+= ALIGN_SIZE(1); |
428 | | #endif |
429 | 0 | } |
430 | 0 | va_end(args); |
431 | 0 | DBUG_RETURN((void*) start); |
432 | 0 | } |
433 | | |
434 | | |
435 | | #if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG)) |
436 | | /** Mark all data in blocks free for reusage */ |
437 | | |
438 | | static void mark_blocks_free(MEM_ROOT* root) |
439 | 0 | { |
440 | 0 | USED_MEM *next; |
441 | 0 | USED_MEM **last; |
442 | | |
443 | | /* iterate through (partially) free blocks, mark them free */ |
444 | 0 | last= &root->free; |
445 | 0 | for (next= root->free; next; next= *(last= &next->next)) |
446 | 0 | { |
447 | 0 | next->left= next->size - ALIGN_SIZE(sizeof(USED_MEM)); |
448 | 0 | TRASH_MEM(next); |
449 | 0 | } |
450 | | |
451 | | /* Combine the free and the used list */ |
452 | 0 | *last= next=root->used; |
453 | | |
454 | | /* now go through the used blocks and mark them free */ |
455 | 0 | for (; next; next= next->next) |
456 | 0 | { |
457 | 0 | next->left= next->size - ALIGN_SIZE(sizeof(USED_MEM)); |
458 | 0 | TRASH_MEM(next); |
459 | 0 | } |
460 | | |
461 | | /* Now everything is set; Indicate that nothing is used anymore */ |
462 | 0 | root->used= 0; |
463 | 0 | root->first_block_usage= 0; |
464 | 0 | } |
465 | | #endif |
466 | | |
467 | | |
468 | | /* |
469 | | Deallocate everything used by alloc_root or just move |
470 | | used blocks to free list if called with MY_USED_TO_FREE |
471 | | |
472 | | SYNOPSIS |
473 | | free_root() |
474 | | root Memory root |
475 | | MyFlags Flags for what should be freed: |
476 | | |
477 | | MY_MARK_BLOCKS_FREED Don't free blocks, just mark them free |
478 | | MY_KEEP_PREALLOC If this is not set, then free also the |
479 | | preallocated block |
480 | | |
481 | | NOTES |
482 | | One can call this function either with root block initialised with |
483 | | init_alloc_root() or with a bzero()-ed block. |
484 | | It's also safe to call this multiple times with the same mem_root. |
485 | | */ |
486 | | |
487 | | void free_root(MEM_ROOT *root, myf MyFlags) |
488 | 0 | { |
489 | 0 | USED_MEM *next,*old; |
490 | 0 | DBUG_ENTER("free_root"); |
491 | 0 | DBUG_PRINT("enter",("root: %p flags: %lu", root, MyFlags)); |
492 | |
|
493 | 0 | #if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG)) |
494 | | /* |
495 | | There is no point in using mark_blocks_free when using valgrind as |
496 | | it will not reclaim any memory |
497 | | */ |
498 | 0 | if (MyFlags & MY_MARK_BLOCKS_FREE) |
499 | 0 | { |
500 | 0 | mark_blocks_free(root); |
501 | 0 | DBUG_VOID_RETURN; |
502 | 0 | } |
503 | 0 | #endif |
504 | 0 | if (!(MyFlags & MY_KEEP_PREALLOC)) |
505 | 0 | root->pre_alloc=0; |
506 | |
|
507 | 0 | for (next=root->used; next ;) |
508 | 0 | { |
509 | 0 | old=next; next= next->next ; |
510 | 0 | if (old != root->pre_alloc) |
511 | 0 | root_free(root, old, old->size); |
512 | 0 | } |
513 | 0 | for (next=root->free ; next ;) |
514 | 0 | { |
515 | 0 | old=next; next= next->next; |
516 | 0 | if (old != root->pre_alloc) |
517 | 0 | root_free(root, old, old->size); |
518 | 0 | } |
519 | 0 | root->used=root->free=0; |
520 | 0 | if (root->pre_alloc) |
521 | 0 | { |
522 | 0 | root->free=root->pre_alloc; |
523 | 0 | root->free->left=root->pre_alloc->size-ALIGN_SIZE(sizeof(USED_MEM)); |
524 | 0 | TRASH_MEM(root->pre_alloc); |
525 | 0 | root->free->next=0; |
526 | 0 | } |
527 | 0 | root->block_num= 4; |
528 | 0 | root->first_block_usage= 0; |
529 | 0 | DBUG_VOID_RETURN; |
530 | 0 | } |
531 | | |
532 | | |
533 | | /* |
534 | | Find block that contains an object and set the pre_alloc to it |
535 | | */ |
536 | | |
537 | | void set_prealloc_root(MEM_ROOT *root, char *ptr) |
538 | 0 | { |
539 | 0 | USED_MEM *next; |
540 | 0 | for (next=root->used; next ; next=next->next) |
541 | 0 | { |
542 | 0 | if ((char*) next <= ptr && (char*) next + next->size > ptr) |
543 | 0 | { |
544 | 0 | root->pre_alloc=next; |
545 | 0 | return; |
546 | 0 | } |
547 | 0 | } |
548 | 0 | for (next=root->free ; next ; next=next->next) |
549 | 0 | { |
550 | 0 | if ((char*) next <= ptr && (char*) next + next->size > ptr) |
551 | 0 | { |
552 | 0 | root->pre_alloc=next; |
553 | 0 | return; |
554 | 0 | } |
555 | 0 | } |
556 | 0 | } |
557 | | |
558 | | /* |
559 | | Move allocated objects from one root to another. |
560 | | |
561 | | Notes: |
562 | | We do not increase 'to->block_num' here as the variable isused to |
563 | | increase block sizes in case of many allocations. This is special |
564 | | case where this is not needed to take into account |
565 | | */ |
566 | | |
567 | | void move_root(MEM_ROOT *to, MEM_ROOT *from) |
568 | 0 | { |
569 | 0 | USED_MEM *block, *next; |
570 | 0 | for (block= from->used; block ; block= next) |
571 | 0 | { |
572 | 0 | next= block->next; |
573 | 0 | block->next= to->used; |
574 | 0 | to->used= block; |
575 | 0 | } |
576 | 0 | from->used= 0; |
577 | 0 | } |
578 | | |
579 | | /* |
580 | | Prepare MEM_ROOT to a later truncation. Everything allocated after |
581 | | that point can be freed while keeping earlier allocations intact. |
582 | | |
583 | | For this to work we cannot allow new allocations in partially filled blocks, |
584 | | so remove all non-empty blocks from the memroot. For simplicity, let's |
585 | | also remove all used blocks. |
586 | | */ |
587 | | void root_make_savepoint(MEM_ROOT *root, MEM_ROOT_SAVEPOINT *sv) |
588 | 0 | { |
589 | 0 | USED_MEM **prev= &root->free, *block= *prev; |
590 | 0 | for ( ; block; prev= &block->next, block= *prev) |
591 | 0 | if (block->left < block->size - ALIGN_SIZE(sizeof(USED_MEM))) |
592 | 0 | break; |
593 | 0 | sv->root= root; |
594 | 0 | sv->free= block; |
595 | 0 | sv->used= root->used; |
596 | 0 | sv->first_block_usage= root->first_block_usage; |
597 | 0 | *prev= 0; |
598 | 0 | root->used= 0; |
599 | 0 | } |
600 | | |
601 | | /* |
602 | | Restore MEM_ROOT to the state before the savepoint was made. |
603 | | |
604 | | Restore old free and used lists. |
605 | | Mark all new (after savepoint) used and partially used blocks free |
606 | | and put them into the free list. |
607 | | */ |
608 | | void root_free_to_savepoint(const MEM_ROOT_SAVEPOINT *sv) |
609 | 0 | { |
610 | 0 | MEM_ROOT *root= sv->root; |
611 | 0 | USED_MEM **prev= &root->free, *block= *prev; |
612 | | |
613 | | /* iterate through (partially) free blocks, mark them free */ |
614 | 0 | for ( ; block; prev= &block->next, block= *prev) |
615 | 0 | { |
616 | 0 | block->left= block->size - ALIGN_SIZE(sizeof(USED_MEM)); |
617 | 0 | TRASH_MEM(block); |
618 | 0 | } |
619 | | |
620 | | /* Combine the free and the used list */ |
621 | 0 | *prev= block=root->used; |
622 | | |
623 | | /* now go through the used blocks and mark them free */ |
624 | 0 | for ( ; block; prev= &block->next, block= *prev) |
625 | 0 | { |
626 | 0 | block->left= block->size - ALIGN_SIZE(sizeof(USED_MEM)); |
627 | 0 | TRASH_MEM(block); |
628 | 0 | } |
629 | | |
630 | | /* restore free and used lists from savepoint */ |
631 | 0 | *prev= sv->free; |
632 | 0 | root->used= sv->used; |
633 | 0 | root->first_block_usage= prev == &root->free ? sv->first_block_usage : 0; |
634 | 0 | } |
635 | | |
636 | | /** |
637 | | Change protection for all blocks in the mem root |
638 | | */ |
639 | | |
640 | | #if defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(MAP_ANONYMOUS) |
641 | | void protect_root(MEM_ROOT *root, int prot) |
642 | 0 | { |
643 | 0 | USED_MEM *next,*old; |
644 | 0 | DBUG_ENTER("protect_root"); |
645 | 0 | DBUG_PRINT("enter",("root: %p prot: %d", root, prot)); |
646 | |
|
647 | 0 | DBUG_ASSERT(root->flags & ROOT_FLAG_MPROTECT); |
648 | |
|
649 | 0 | for (next= root->used; next ;) |
650 | 0 | { |
651 | 0 | old= next; next= next->next ; |
652 | 0 | mprotect(old, old->size, prot); |
653 | 0 | } |
654 | 0 | for (next= root->free; next ;) |
655 | 0 | { |
656 | 0 | old= next; next= next->next ; |
657 | 0 | mprotect(old, old->size, prot); |
658 | 0 | } |
659 | 0 | DBUG_VOID_RETURN; |
660 | 0 | } |
661 | | #else |
662 | | void protect_root(MEM_ROOT *root, int prot) |
663 | | { |
664 | | } |
665 | | #endif /* defined(HAVE_MMAP) && ... */ |
666 | | |
667 | | |
668 | | char *strdup_root(MEM_ROOT *root, const char *str) |
669 | 0 | { |
670 | 0 | return strmake_root(root, str, strlen(str)); |
671 | 0 | } |
672 | | |
673 | | |
674 | | char *strmake_root(MEM_ROOT *root, const char *str, size_t len) |
675 | 0 | { |
676 | 0 | char *pos; |
677 | 0 | if ((pos=alloc_root(root,len+1))) |
678 | 0 | { |
679 | 0 | if (len) |
680 | 0 | memcpy(pos,str,len); |
681 | 0 | pos[len]=0; |
682 | 0 | } |
683 | 0 | return pos; |
684 | 0 | } |
685 | | |
686 | | |
687 | | void *memdup_root(MEM_ROOT *root, const void *str, size_t len) |
688 | 0 | { |
689 | 0 | char *pos; |
690 | 0 | if ((pos=alloc_root(root,len)) && len) |
691 | 0 | memcpy(pos,str,len); |
692 | 0 | return pos; |
693 | 0 | } |
694 | | |
695 | | LEX_CSTRING safe_lexcstrdup_root(MEM_ROOT *root, const LEX_CSTRING str) |
696 | 0 | { |
697 | 0 | LEX_CSTRING res; |
698 | 0 | if (str.length) |
699 | 0 | res.str= strmake_root(root, str.str, str.length); |
700 | 0 | else |
701 | 0 | res.str= (const char *)""; |
702 | 0 | res.length= str.length; |
703 | 0 | return res; |
704 | 0 | } |
705 | | |
706 | | |
707 | | LEX_STRING lex_string_casedn_root(MEM_ROOT *root, CHARSET_INFO *cs, |
708 | | const char *str, size_t length) |
709 | 0 | { |
710 | 0 | size_t nbytes= length * cs->cset->casedn_multiply(cs); |
711 | 0 | LEX_STRING res= {NULL, 0}; |
712 | 0 | if (!(res.str= alloc_root(root, nbytes + 1))) |
713 | 0 | return res; |
714 | 0 | res.length= cs->cset->casedn(cs, str, length, res.str, nbytes); |
715 | 0 | res.str[res.length]= '\0'; |
716 | 0 | return res; |
717 | 0 | } |