/src/server/mysys/my_alloc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | Copyright (c) 2000, 2010, Oracle and/or its affiliates |
3 | | Copyright (c) 2010, 2020, MariaDB |
4 | | |
5 | | This program is free software; you can redistribute it and/or modify |
6 | | it under the terms of the GNU General Public License as published by |
7 | | the Free Software Foundation; version 2 of the License. |
8 | | |
9 | | This program is distributed in the hope that it will be useful, |
10 | | but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | | GNU General Public License for more details. |
13 | | |
14 | | You should have received a copy of the GNU General Public License |
15 | | along with this program; if not, write to the Free Software |
16 | | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ |
17 | | |
18 | | /* Routines to handle mallocing of results which will be freed the same time */ |
19 | | |
20 | | #include <my_global.h> |
21 | | #include <my_sys.h> |
22 | | #include <m_string.h> |
23 | | #include <my_bit.h> |
24 | | #ifdef HAVE_SYS_MMAN_H |
25 | | #include <sys/mman.h> |
26 | | #endif |
27 | | |
28 | | #undef EXTRA_DEBUG |
29 | | #define EXTRA_DEBUG |
30 | | |
31 | 0 | #define ROOT_FLAG_THREAD_SPECIFIC 1 |
32 | 0 | #define ROOT_FLAG_MPROTECT 2 |
33 | | #define ROOT_FLAG_READ_ONLY 4 |
34 | | |
35 | | /* data packed in MEM_ROOT -> min_malloc */ |
36 | | |
37 | | /* Don't allocate too small blocks */ |
38 | | #define ROOT_MIN_BLOCK_SIZE 256 |
39 | | |
40 | 0 | #define MALLOC_FLAG(root) (((root)->flags & ROOT_FLAG_THREAD_SPECIFIC) ? MY_THREAD_SPECIFIC : 0) |
41 | | |
42 | 0 | #define TRASH_MEM(X) TRASH_FREE(((char*)(X) + ((X)->size-(X)->left)), (X)->left) |
43 | | |
44 | | |
45 | | /* |
46 | | Alloc memory through either my_malloc or mmap() |
47 | | */ |
48 | | |
49 | | static void *root_alloc(MEM_ROOT *root, size_t size, size_t *alloced_size, |
50 | | myf my_flags) |
51 | 0 | { |
52 | 0 | *alloced_size= size; |
53 | 0 | #if defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(MAP_ANONYMOUS) |
54 | 0 | if (root->flags & ROOT_FLAG_MPROTECT) |
55 | 0 | { |
56 | 0 | void *res; |
57 | 0 | *alloced_size= MY_ALIGN(size, my_system_page_size); |
58 | 0 | res= my_mmap(0, *alloced_size, PROT_READ | PROT_WRITE, |
59 | 0 | MAP_NORESERVE | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
60 | 0 | if (res == MAP_FAILED) |
61 | 0 | res= 0; |
62 | 0 | return res; |
63 | 0 | } |
64 | 0 | #endif /* HAVE_MMAP */ |
65 | | |
66 | 0 | return my_malloc(root->psi_key, size, |
67 | 0 | my_flags | MALLOC_FLAG(root)); |
68 | 0 | } |
69 | | |
70 | | static void root_free(MEM_ROOT *root, void *ptr, size_t size) |
71 | 0 | { |
72 | 0 | #if defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(MAP_ANONYMOUS) |
73 | 0 | if (root->flags & ROOT_FLAG_MPROTECT) |
74 | 0 | my_munmap(ptr, size); |
75 | 0 | else |
76 | 0 | #endif |
77 | 0 | my_free(ptr); |
78 | 0 | } |
79 | | |
80 | | |
81 | | /* |
82 | | Calculate block sizes to use |
83 | | |
84 | | Sizes will be updated to next power of 2, minus operating system |
85 | | memory management size. |
86 | | |
87 | | The idea is to reduce memory fragmentation as most system memory |
88 | | allocators are using power of 2 block size internally. |
89 | | */ |
90 | | |
91 | | static void calculate_block_sizes(MEM_ROOT *mem_root, size_t block_size, |
92 | | size_t *pre_alloc_size) |
93 | 0 | { |
94 | 0 | size_t pre_alloc= *pre_alloc_size; |
95 | |
|
96 | 0 | if (mem_root->flags & ROOT_FLAG_MPROTECT) |
97 | 0 | { |
98 | 0 | mem_root->block_size= MY_ALIGN(block_size, my_system_page_size); |
99 | 0 | if (pre_alloc) |
100 | 0 | pre_alloc= MY_ALIGN(pre_alloc, my_system_page_size); |
101 | 0 | } |
102 | 0 | else |
103 | 0 | { |
104 | 0 | DBUG_ASSERT(block_size <= UINT_MAX32); |
105 | 0 | mem_root->block_size= (my_round_up_to_next_power((uint32) block_size - |
106 | 0 | MALLOC_OVERHEAD)- |
107 | 0 | MALLOC_OVERHEAD); |
108 | 0 | if (pre_alloc) |
109 | 0 | pre_alloc= (my_round_up_to_next_power((uint32) pre_alloc - |
110 | 0 | MALLOC_OVERHEAD)- |
111 | 0 | MALLOC_OVERHEAD); |
112 | 0 | } |
113 | 0 | *pre_alloc_size= pre_alloc; |
114 | 0 | } |
115 | | |
116 | | |
117 | | /* |
118 | | Initialize memory root |
119 | | |
120 | | SYNOPSIS |
121 | | init_alloc_root() |
122 | | key - key to register instrumented memory |
123 | | mem_root - memory root to initialize |
124 | | block_size - size of chunks (blocks) used for memory allocation. |
125 | | Will be updated to next power of 2, minus |
126 | | internal and system memory management size. This is |
127 | | will reduce memory fragmentation as most system memory |
128 | | allocators are using power of 2 block size internally. |
129 | | (It is external size of chunk i.e. it should include |
130 | | memory required for internal structures, thus it |
131 | | should be no less than ROOT_MIN_BLOCK_SIZE). |
132 | | pre_alloc_size - if non-0, then size of block that should be |
133 | | pre-allocated during memory root initialization. |
134 | | my_flags MY_THREAD_SPECIFIC flag for my_malloc |
135 | | MY_RROOT_USE_MPROTECT for read only protected memory |
136 | | |
137 | | DESCRIPTION |
138 | | This function prepares memory root for further use, sets initial size of |
139 | | chunk for memory allocation and pre-allocates first block if specified. |
140 | | Although error can happen during execution of this function if |
141 | | pre_alloc_size is non-0 it won't be reported. Instead it will be |
142 | | reported as error in first alloc_root() on this memory root. |
143 | | */ |
144 | | |
145 | | void init_alloc_root(PSI_memory_key key, MEM_ROOT *mem_root, size_t block_size, |
146 | | size_t pre_alloc_size __attribute__((unused)), |
147 | | myf my_flags) |
148 | 0 | { |
149 | 0 | DBUG_ENTER("init_alloc_root"); |
150 | 0 | DBUG_PRINT("enter",("root: %p prealloc: %zu", mem_root, pre_alloc_size)); |
151 | |
|
152 | 0 | mem_root->free= mem_root->used= mem_root->pre_alloc= 0; |
153 | 0 | mem_root->min_malloc= 32 + REDZONE_SIZE; |
154 | 0 | mem_root->block_size= MY_MAX(block_size, ROOT_MIN_BLOCK_SIZE); |
155 | 0 | mem_root->flags= 0; |
156 | 0 | DBUG_ASSERT(!test_all_bits(mem_root->flags, |
157 | 0 | (MY_THREAD_SPECIFIC | MY_ROOT_USE_MPROTECT))); |
158 | 0 | if (my_flags & MY_THREAD_SPECIFIC) |
159 | 0 | mem_root->flags|= ROOT_FLAG_THREAD_SPECIFIC; |
160 | 0 | if (my_flags & MY_ROOT_USE_MPROTECT) |
161 | 0 | mem_root->flags|= ROOT_FLAG_MPROTECT; |
162 | |
|
163 | 0 | calculate_block_sizes(mem_root, block_size, &pre_alloc_size); |
164 | |
|
165 | 0 | mem_root->error_handler= 0; |
166 | 0 | mem_root->block_num= 4; /* We shift this with >>2 */ |
167 | 0 | mem_root->first_block_usage= 0; |
168 | 0 | mem_root->psi_key= key; |
169 | |
|
170 | 0 | #if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG)) |
171 | 0 | if (pre_alloc_size) |
172 | 0 | { |
173 | 0 | size_t alloced_size; |
174 | 0 | if ((mem_root->free= mem_root->pre_alloc= |
175 | 0 | (USED_MEM*) root_alloc(mem_root, pre_alloc_size, &alloced_size, |
176 | 0 | MYF(0)))) |
177 | 0 | { |
178 | 0 | mem_root->free->size= alloced_size; |
179 | 0 | mem_root->free->left= alloced_size - ALIGN_SIZE(sizeof(USED_MEM)); |
180 | 0 | mem_root->free->next= 0; |
181 | 0 | TRASH_MEM(mem_root->free); |
182 | 0 | } |
183 | 0 | } |
184 | 0 | #endif |
185 | 0 | DBUG_VOID_RETURN; |
186 | 0 | } |
187 | | |
188 | | /* |
189 | | SYNOPSIS |
190 | | reset_root_defaults() |
191 | | mem_root memory root to change defaults of |
192 | | block_size new value of block size. Must be greater or equal |
193 | | than ALLOC_ROOT_MIN_BLOCK_SIZE (this value is about |
194 | | 68 bytes and depends on platform and compilation flags) |
195 | | pre_alloc_size new size of preallocated block. If not zero, |
196 | | must be equal to or greater than block size, |
197 | | otherwise means 'no prealloc'. |
198 | | DESCRIPTION |
199 | | Function aligns and assigns new value to block size; then it tries to |
200 | | reuse one of existing blocks as prealloc block, or malloc new one of |
201 | | requested size. If no blocks can be reused, all unused blocks are freed |
202 | | before allocation. |
203 | | */ |
204 | | |
205 | | void reset_root_defaults(MEM_ROOT *mem_root, size_t block_size, |
206 | | size_t pre_alloc_size __attribute__((unused))) |
207 | 0 | { |
208 | 0 | DBUG_ENTER("reset_root_defaults"); |
209 | 0 | DBUG_ASSERT(alloc_root_inited(mem_root)); |
210 | |
|
211 | 0 | calculate_block_sizes(mem_root, block_size, &pre_alloc_size); |
212 | |
|
213 | 0 | #if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG)) |
214 | 0 | if (pre_alloc_size) |
215 | 0 | { |
216 | 0 | size_t size= mem_root->block_size, alloced_size; |
217 | 0 | if (!mem_root->pre_alloc || |
218 | 0 | mem_root->pre_alloc->size != mem_root->block_size) |
219 | 0 | { |
220 | 0 | USED_MEM *mem, **prev= &mem_root->free; |
221 | | /* |
222 | | Free unused blocks, so that consequent calls |
223 | | to reset_root_defaults won't eat away memory. |
224 | | */ |
225 | 0 | while (*prev) |
226 | 0 | { |
227 | 0 | mem= *prev; |
228 | 0 | if (mem->size == size) |
229 | 0 | { |
230 | | /* We found a suitable block, no need to do anything else */ |
231 | 0 | mem_root->pre_alloc= mem; |
232 | 0 | DBUG_VOID_RETURN; |
233 | 0 | } |
234 | 0 | if (mem->left + ALIGN_SIZE(sizeof(USED_MEM)) == mem->size) |
235 | 0 | { |
236 | | /* remove block from the list and free it */ |
237 | 0 | *prev= mem->next; |
238 | 0 | root_free(mem_root, mem, mem->size); |
239 | 0 | } |
240 | 0 | else |
241 | 0 | prev= &mem->next; |
242 | 0 | } |
243 | | /* Allocate new prealloc block and add it to the end of free list */ |
244 | 0 | if ((mem= (USED_MEM *) root_alloc(mem_root, size, &alloced_size, |
245 | 0 | MYF(MY_WME)))) |
246 | 0 | { |
247 | 0 | mem->size= alloced_size; |
248 | 0 | mem->left= alloced_size - ALIGN_SIZE(sizeof(USED_MEM)); |
249 | 0 | mem->next= *prev; |
250 | 0 | *prev= mem_root->pre_alloc= mem; |
251 | 0 | TRASH_MEM(mem); |
252 | 0 | } |
253 | 0 | else |
254 | 0 | mem_root->pre_alloc= 0; |
255 | 0 | } |
256 | 0 | } |
257 | 0 | else |
258 | 0 | #endif |
259 | 0 | mem_root->pre_alloc= 0; |
260 | | |
261 | 0 | DBUG_VOID_RETURN; |
262 | 0 | } |
263 | | |
264 | | |
265 | | void *alloc_root(MEM_ROOT *mem_root, size_t length) |
266 | 0 | { |
267 | 0 | size_t get_size, block_size; |
268 | 0 | uchar* point; |
269 | 0 | reg1 USED_MEM *next= 0; |
270 | 0 | reg2 USED_MEM **prev; |
271 | 0 | size_t original_length __attribute__((unused)) = length; |
272 | 0 | DBUG_ENTER("alloc_root"); |
273 | 0 | DBUG_PRINT("enter",("root: %p", mem_root)); |
274 | 0 | DBUG_ASSERT(alloc_root_inited(mem_root)); |
275 | 0 | DBUG_ASSERT((mem_root->flags & ROOT_FLAG_READ_ONLY) == 0); |
276 | |
|
277 | 0 | DBUG_EXECUTE_IF("simulate_out_of_memory", |
278 | 0 | { |
279 | 0 | if (mem_root->error_handler) |
280 | 0 | (*mem_root->error_handler)(); |
281 | 0 | DBUG_SET("-d,simulate_out_of_memory"); |
282 | 0 | DBUG_RETURN((void*) 0); /* purecov: inspected */ |
283 | 0 | }); |
284 | |
|
285 | | #if defined(HAVE_valgrind) && defined(EXTRA_DEBUG) |
286 | | if (!(mem_root->flags & ROOT_FLAG_MPROTECT)) |
287 | | { |
288 | | length+= ALIGN_SIZE(sizeof(USED_MEM)); |
289 | | if (!(next = (USED_MEM*) my_malloc(mem_root->psi_key, length, |
290 | | MYF(MY_WME | ME_FATAL | |
291 | | MALLOC_FLAG(mem_root))))) |
292 | | { |
293 | | if (mem_root->error_handler) |
294 | | (*mem_root->error_handler)(); |
295 | | DBUG_RETURN((uchar*) 0); /* purecov: inspected */ |
296 | | } |
297 | | next->next= mem_root->used; |
298 | | next->left= 0; |
299 | | next->size= length; |
300 | | mem_root->used= next; |
301 | | DBUG_PRINT("exit",("ptr: %p", (((char*)next)+ALIGN_SIZE(sizeof(USED_MEM))))); |
302 | | DBUG_RETURN((((uchar*) next)+ALIGN_SIZE(sizeof(USED_MEM)))); |
303 | | } |
304 | | #endif /* defined(HAVE_valgrind) && defined(EXTRA_DEBUG) */ |
305 | |
|
306 | 0 | length= ALIGN_SIZE(length) + REDZONE_SIZE; |
307 | 0 | if ((*(prev= &mem_root->free)) != NULL) |
308 | 0 | { |
309 | 0 | if ((*prev)->left < length && |
310 | 0 | mem_root->first_block_usage++ >= ALLOC_MAX_BLOCK_USAGE_BEFORE_DROP && |
311 | 0 | (*prev)->left < ALLOC_MAX_BLOCK_TO_DROP) |
312 | 0 | { |
313 | 0 | next= *prev; |
314 | 0 | *prev= next->next; /* Remove block from list */ |
315 | 0 | next->next= mem_root->used; |
316 | 0 | mem_root->used= next; |
317 | 0 | mem_root->first_block_usage= 0; |
318 | 0 | } |
319 | 0 | for (next= *prev ; next && next->left < length ; next= next->next) |
320 | 0 | prev= &next->next; |
321 | 0 | } |
322 | 0 | if (! next) |
323 | 0 | { /* Time to alloc new block */ |
324 | 0 | size_t alloced_length; |
325 | | |
326 | | /* Increase block size over time if there is a lot of mallocs */ |
327 | 0 | block_size= (MY_ALIGN(mem_root->block_size, ROOT_MIN_BLOCK_SIZE) * |
328 | 0 | (mem_root->block_num >> 2)- MALLOC_OVERHEAD); |
329 | 0 | get_size= length + ALIGN_SIZE(sizeof(USED_MEM)); |
330 | 0 | get_size= MY_MAX(get_size, block_size); |
331 | |
|
332 | 0 | if (!(next= (USED_MEM*) root_alloc(mem_root, get_size, &alloced_length, |
333 | 0 | MYF(MY_WME | ME_FATAL)))) |
334 | 0 | { |
335 | 0 | if (mem_root->error_handler) |
336 | 0 | (*mem_root->error_handler)(); |
337 | 0 | DBUG_RETURN((void*) 0); /* purecov: inspected */ |
338 | 0 | } |
339 | 0 | mem_root->block_num++; |
340 | 0 | next->next= *prev; |
341 | 0 | next->size= alloced_length; |
342 | 0 | next->left= alloced_length - ALIGN_SIZE(sizeof(USED_MEM)); |
343 | 0 | *prev=next; |
344 | 0 | TRASH_MEM(next); |
345 | 0 | } |
346 | | |
347 | 0 | point= (uchar*) ((char*) next+ (next->size-next->left)); |
348 | | /*TODO: next part may be unneded due to mem_root->first_block_usage counter*/ |
349 | 0 | if ((next->left-= length) < mem_root->min_malloc) |
350 | 0 | { /* Full block */ |
351 | 0 | *prev= next->next; /* Remove block from list */ |
352 | 0 | next->next= mem_root->used; |
353 | 0 | mem_root->used= next; |
354 | 0 | mem_root->first_block_usage= 0; |
355 | 0 | } |
356 | 0 | point+= REDZONE_SIZE; |
357 | 0 | TRASH_ALLOC(point, original_length); |
358 | 0 | DBUG_PRINT("exit",("ptr: %p", point)); |
359 | 0 | DBUG_RETURN((void*) point); |
360 | 0 | } |
361 | | |
362 | | |
363 | | /* |
364 | | Allocate many pointers at the same time. |
365 | | |
366 | | DESCRIPTION |
367 | | ptr1, ptr2, etc all point into big allocated memory area. |
368 | | |
369 | | SYNOPSIS |
370 | | multi_alloc_root() |
371 | | root Memory root |
372 | | ptr1, length1 Multiple arguments terminated by a NULL pointer |
373 | | ptr2, length2 ... |
374 | | ... |
375 | | NULL |
376 | | |
377 | | RETURN VALUE |
378 | | A pointer to the beginning of the allocated memory block |
379 | | in case of success or NULL if out of memory. |
380 | | */ |
381 | | |
382 | | void *multi_alloc_root(MEM_ROOT *root, ...) |
383 | 0 | { |
384 | 0 | va_list args; |
385 | 0 | char **ptr, *start, *res; |
386 | 0 | size_t tot_length, length; |
387 | 0 | DBUG_ENTER("multi_alloc_root"); |
388 | | /* |
389 | | We don't need to do DBUG_PRINT here as it will be done when alloc_root |
390 | | is called |
391 | | */ |
392 | |
|
393 | 0 | va_start(args, root); |
394 | 0 | tot_length= 0; |
395 | 0 | while ((ptr= va_arg(args, char **))) |
396 | 0 | { |
397 | 0 | length= va_arg(args, uint); |
398 | 0 | tot_length+= ALIGN_SIZE(length); |
399 | | #ifndef DBUG_OFF |
400 | | tot_length+= ALIGN_SIZE(1); |
401 | | #endif |
402 | 0 | } |
403 | 0 | va_end(args); |
404 | |
|
405 | 0 | if (!(start= (char*) alloc_root(root, tot_length))) |
406 | 0 | DBUG_RETURN(0); /* purecov: inspected */ |
407 | | |
408 | 0 | va_start(args, root); |
409 | 0 | res= start; |
410 | 0 | while ((ptr= va_arg(args, char **))) |
411 | 0 | { |
412 | 0 | *ptr= res; |
413 | 0 | length= va_arg(args, uint); |
414 | 0 | res+= ALIGN_SIZE(length); |
415 | | #ifndef DBUG_OFF |
416 | | TRASH_FREE(res, ALIGN_SIZE(1)); |
417 | | res+= ALIGN_SIZE(1); |
418 | | #endif |
419 | 0 | } |
420 | 0 | va_end(args); |
421 | 0 | DBUG_RETURN((void*) start); |
422 | 0 | } |
423 | | |
424 | | |
425 | | #if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG)) |
426 | | /** Mark all data in blocks free for reusage */ |
427 | | |
428 | | static inline void mark_blocks_free(MEM_ROOT* root) |
429 | 0 | { |
430 | 0 | reg1 USED_MEM *next; |
431 | 0 | reg2 USED_MEM **last; |
432 | | |
433 | | /* iterate through (partially) free blocks, mark them free */ |
434 | 0 | last= &root->free; |
435 | 0 | for (next= root->free; next; next= *(last= &next->next)) |
436 | 0 | { |
437 | 0 | next->left= next->size - ALIGN_SIZE(sizeof(USED_MEM)); |
438 | 0 | TRASH_MEM(next); |
439 | 0 | } |
440 | | |
441 | | /* Combine the free and the used list */ |
442 | 0 | *last= next=root->used; |
443 | | |
444 | | /* now go through the used blocks and mark them free */ |
445 | 0 | for (; next; next= next->next) |
446 | 0 | { |
447 | 0 | next->left= next->size - ALIGN_SIZE(sizeof(USED_MEM)); |
448 | 0 | TRASH_MEM(next); |
449 | 0 | } |
450 | | |
451 | | /* Now everything is set; Indicate that nothing is used anymore */ |
452 | 0 | root->used= 0; |
453 | 0 | root->first_block_usage= 0; |
454 | 0 | root->block_num= 4; |
455 | 0 | } |
456 | | #endif |
457 | | |
458 | | |
459 | | /* |
460 | | Deallocate everything used by alloc_root or just move |
461 | | used blocks to free list if called with MY_USED_TO_FREE |
462 | | |
463 | | SYNOPSIS |
464 | | free_root() |
465 | | root Memory root |
466 | | MyFlags Flags for what should be freed: |
467 | | |
468 | | MY_MARK_BLOCKS_FREED Don't free blocks, just mark them free |
469 | | MY_KEEP_PREALLOC If this is not set, then free also the |
470 | | preallocated block |
471 | | |
472 | | NOTES |
473 | | One can call this function either with root block initialised with |
474 | | init_alloc_root() or with a bzero()-ed block. |
475 | | It's also safe to call this multiple times with the same mem_root. |
476 | | */ |
477 | | |
478 | | void free_root(MEM_ROOT *root, myf MyFlags) |
479 | 0 | { |
480 | 0 | reg1 USED_MEM *next,*old; |
481 | 0 | DBUG_ENTER("free_root"); |
482 | 0 | DBUG_PRINT("enter",("root: %p flags: %lu", root, MyFlags)); |
483 | |
|
484 | 0 | #if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG)) |
485 | | /* |
486 | | There is no point in using mark_blocks_free when using valgrind as |
487 | | it will not reclaim any memory |
488 | | */ |
489 | 0 | if (MyFlags & MY_MARK_BLOCKS_FREE) |
490 | 0 | { |
491 | 0 | mark_blocks_free(root); |
492 | 0 | DBUG_VOID_RETURN; |
493 | 0 | } |
494 | 0 | #endif |
495 | 0 | if (!(MyFlags & MY_KEEP_PREALLOC)) |
496 | 0 | root->pre_alloc=0; |
497 | |
|
498 | 0 | for (next=root->used; next ;) |
499 | 0 | { |
500 | 0 | old=next; next= next->next ; |
501 | 0 | if (old != root->pre_alloc) |
502 | 0 | root_free(root, old, old->size); |
503 | 0 | } |
504 | 0 | for (next=root->free ; next ;) |
505 | 0 | { |
506 | 0 | old=next; next= next->next; |
507 | 0 | if (old != root->pre_alloc) |
508 | 0 | root_free(root, old, old->size); |
509 | 0 | } |
510 | 0 | root->used=root->free=0; |
511 | 0 | if (root->pre_alloc) |
512 | 0 | { |
513 | 0 | root->free=root->pre_alloc; |
514 | 0 | root->free->left=root->pre_alloc->size-ALIGN_SIZE(sizeof(USED_MEM)); |
515 | 0 | TRASH_MEM(root->pre_alloc); |
516 | 0 | root->free->next=0; |
517 | 0 | } |
518 | 0 | root->block_num= 4; |
519 | 0 | root->first_block_usage= 0; |
520 | 0 | DBUG_VOID_RETURN; |
521 | 0 | } |
522 | | |
523 | | |
524 | | /* |
525 | | Find block that contains an object and set the pre_alloc to it |
526 | | */ |
527 | | |
528 | | void set_prealloc_root(MEM_ROOT *root, char *ptr) |
529 | 0 | { |
530 | 0 | USED_MEM *next; |
531 | 0 | for (next=root->used; next ; next=next->next) |
532 | 0 | { |
533 | 0 | if ((char*) next <= ptr && (char*) next + next->size > ptr) |
534 | 0 | { |
535 | 0 | root->pre_alloc=next; |
536 | 0 | return; |
537 | 0 | } |
538 | 0 | } |
539 | 0 | for (next=root->free ; next ; next=next->next) |
540 | 0 | { |
541 | 0 | if ((char*) next <= ptr && (char*) next + next->size > ptr) |
542 | 0 | { |
543 | 0 | root->pre_alloc=next; |
544 | 0 | return; |
545 | 0 | } |
546 | 0 | } |
547 | 0 | } |
548 | | |
549 | | /* |
550 | | Move allocated objects from one root to another. |
551 | | |
552 | | Notes: |
553 | | We do not increase 'to->block_num' here as the variable isused to |
554 | | increase block sizes in case of many allocations. This is special |
555 | | case where this is not needed to take into account |
556 | | */ |
557 | | |
558 | | void move_root(MEM_ROOT *to, MEM_ROOT *from) |
559 | 0 | { |
560 | 0 | USED_MEM *block, *next; |
561 | 0 | for (block= from->used; block ; block= next) |
562 | 0 | { |
563 | 0 | next= block->next; |
564 | 0 | block->next= to->used; |
565 | 0 | to->used= block; |
566 | 0 | } |
567 | 0 | from->used= 0; |
568 | 0 | } |
569 | | |
570 | | |
571 | | |
572 | | /* |
573 | | Remember last MEM_ROOT block. |
574 | | |
575 | | This allows one to free all new allocated blocks. |
576 | | */ |
577 | | |
578 | | USED_MEM *get_last_memroot_block(MEM_ROOT* root) |
579 | 0 | { |
580 | 0 | return root->used ? root->used : root->pre_alloc; |
581 | 0 | } |
582 | | |
583 | | /* |
584 | | Free all newly allocated blocks |
585 | | */ |
586 | | |
587 | | void free_all_new_blocks(MEM_ROOT *root, USED_MEM *last_block) |
588 | 0 | { |
589 | 0 | USED_MEM *old, *next; |
590 | 0 | if (!root->used) |
591 | 0 | return; /* Nothing allocated */ |
592 | 0 | return; |
593 | | /* |
594 | | Free everying allocated up to, but not including, last_block. |
595 | | However do not go past pre_alloc as we do not want to free |
596 | | that one. This should not be a problem as in almost all normal |
597 | | usage pre_alloc is last in the list. |
598 | | */ |
599 | | |
600 | 0 | for (next= root->used ; |
601 | 0 | next && next != last_block && next != root->pre_alloc ; ) |
602 | 0 | { |
603 | 0 | old= next; next= next->next; |
604 | 0 | root_free(root, old, old->size); |
605 | 0 | } |
606 | 0 | root->used= next; |
607 | 0 | root->block_num= 4; |
608 | 0 | root->first_block_usage= 0; |
609 | 0 | } |
610 | | |
611 | | /** |
612 | | Change protection for all blocks in the mem root |
613 | | */ |
614 | | |
615 | | #if defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(MAP_ANONYMOUS) |
616 | | void protect_root(MEM_ROOT *root, int prot) |
617 | 0 | { |
618 | 0 | reg1 USED_MEM *next,*old; |
619 | 0 | DBUG_ENTER("protect_root"); |
620 | 0 | DBUG_PRINT("enter",("root: %p prot: %d", root, prot)); |
621 | |
|
622 | 0 | DBUG_ASSERT(root->flags & ROOT_FLAG_MPROTECT); |
623 | |
|
624 | 0 | for (next= root->used; next ;) |
625 | 0 | { |
626 | 0 | old= next; next= next->next ; |
627 | 0 | mprotect(old, old->size, prot); |
628 | 0 | } |
629 | 0 | for (next= root->free; next ;) |
630 | 0 | { |
631 | 0 | old= next; next= next->next ; |
632 | 0 | mprotect(old, old->size, prot); |
633 | 0 | } |
634 | 0 | DBUG_VOID_RETURN; |
635 | 0 | } |
636 | | #else |
637 | | void protect_root(MEM_ROOT *root, int prot) |
638 | | { |
639 | | } |
640 | | #endif /* defined(HAVE_MMAP) && ... */ |
641 | | |
642 | | |
643 | | char *strdup_root(MEM_ROOT *root, const char *str) |
644 | 0 | { |
645 | 0 | return strmake_root(root, str, strlen(str)); |
646 | 0 | } |
647 | | |
648 | | |
649 | | char *strmake_root(MEM_ROOT *root, const char *str, size_t len) |
650 | 0 | { |
651 | 0 | char *pos; |
652 | 0 | if ((pos=alloc_root(root,len+1))) |
653 | 0 | { |
654 | 0 | if (len) |
655 | 0 | memcpy(pos,str,len); |
656 | 0 | pos[len]=0; |
657 | 0 | } |
658 | 0 | return pos; |
659 | 0 | } |
660 | | |
661 | | |
662 | | void *memdup_root(MEM_ROOT *root, const void *str, size_t len) |
663 | 0 | { |
664 | 0 | char *pos; |
665 | 0 | if ((pos=alloc_root(root,len)) && len) |
666 | 0 | memcpy(pos,str,len); |
667 | 0 | return pos; |
668 | 0 | } |
669 | | |
670 | | LEX_CSTRING safe_lexcstrdup_root(MEM_ROOT *root, const LEX_CSTRING str) |
671 | 0 | { |
672 | 0 | LEX_CSTRING res; |
673 | 0 | if (str.length) |
674 | 0 | res.str= strmake_root(root, str.str, str.length); |
675 | 0 | else |
676 | 0 | res.str= (const char *)""; |
677 | 0 | res.length= str.length; |
678 | 0 | return res; |
679 | 0 | } |
680 | | |
681 | | |
682 | | LEX_STRING lex_string_casedn_root(MEM_ROOT *root, CHARSET_INFO *cs, |
683 | | const char *str, size_t length) |
684 | 0 | { |
685 | 0 | size_t nbytes= length * cs->cset->casedn_multiply(cs); |
686 | 0 | LEX_STRING res= {NULL, 0}; |
687 | 0 | if (!(res.str= alloc_root(root, nbytes + 1))) |
688 | 0 | return res; |
689 | 0 | res.length= cs->cset->casedn(cs, str, length, res.str, nbytes); |
690 | 0 | res.str[res.length]= '\0'; |
691 | 0 | return res; |
692 | 0 | } |