/src/opensips/mem/shm_mem.c
Line | Count | Source |
1 | | /* |
2 | | * Shared memory functions |
3 | | * |
4 | | * Copyright (C) 2001-2003 FhG Fokus |
5 | | * Copyright (C) 2019 OpenSIPS Solutions |
6 | | * |
7 | | * This file is part of opensips, a free SIP server. |
8 | | * |
9 | | * opensips is free software; you can redistribute it and/or modify |
10 | | * it under the terms of the GNU General Public License as published by |
11 | | * the Free Software Foundation; either version 2 of the License, or |
12 | | * (at your option) any later version |
13 | | * |
14 | | * opensips is distributed in the hope that it will be useful, |
15 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | | * GNU General Public License for more details. |
18 | | * |
19 | | * You should have received a copy of the GNU General Public License |
20 | | * along with this program; if not, write to the Free Software |
21 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
22 | | */ |
23 | | |
24 | | |
25 | | #include <stdlib.h> |
26 | | |
27 | | #include "shm_mem.h" |
28 | | #include "../config.h" |
29 | | #include "../globals.h" |
30 | | |
31 | | #ifdef SHM_MMAP |
32 | | |
33 | | #include <unistd.h> |
34 | | #include <sys/mman.h> |
35 | | #include <sys/types.h> /*open*/ |
36 | | #include <sys/stat.h> |
37 | | #include <fcntl.h> |
38 | | |
39 | | #endif |
40 | | |
41 | | enum osips_mm mem_allocator_shm = MM_NONE; |
42 | | |
43 | | #ifndef INLINE_ALLOC |
44 | | #ifdef DBG_MALLOC |
45 | | void *(*gen_shm_malloc)(void *blk, unsigned long size, |
46 | | const char *file, const char *func, unsigned int line); |
47 | | void *(*gen_shm_malloc_unsafe)(void *blk, unsigned long size, |
48 | | const char *file, const char *func, unsigned int line); |
49 | | void *(*gen_shm_realloc)(void *blk, void *p, unsigned long size, |
50 | | const char *file, const char *func, unsigned int line); |
51 | | void *(*gen_shm_realloc_unsafe)(void *blk, void *p, unsigned long size, |
52 | | const char *file, const char *func, unsigned int line); |
53 | | void (*gen_shm_free)(void *blk, void *p, |
54 | | const char *file, const char *func, unsigned int line); |
55 | | void (*gen_shm_free_unsafe)(void *blk, void *p, |
56 | | const char *file, const char *func, unsigned int line); |
57 | | #else |
58 | | void *(*gen_shm_malloc)(void *blk, unsigned long size); |
59 | | void *(*gen_shm_malloc_unsafe)(void *blk, unsigned long size); |
60 | | void *(*gen_shm_realloc)(void *blk, void *p, unsigned long size); |
61 | | void *(*gen_shm_realloc_unsafe)(void *blk, void *p, unsigned long size); |
62 | | void (*gen_shm_free)(void *blk, void *p); |
63 | | void (*gen_shm_free_unsafe)(void *blk, void *p); |
64 | | #endif |
65 | | void (*gen_shm_info)(void *blk, struct mem_info *info); |
66 | | void (*gen_shm_status)(void *blk); |
67 | | unsigned long (*gen_shm_get_size)(void *blk); |
68 | | unsigned long (*gen_shm_get_used)(void *blk); |
69 | | unsigned long (*gen_shm_get_rused)(void *blk); |
70 | | unsigned long (*gen_shm_get_mused)(void *blk); |
71 | | unsigned long (*gen_shm_get_free)(void *blk); |
72 | | unsigned long (*gen_shm_get_frags)(void *blk); |
73 | | #endif |
74 | | |
75 | | #ifdef STATISTICS |
76 | | stat_export_t shm_stats[] = { |
77 | | {"total_size" , STAT_IS_FUNC, (stat_var**)shm_get_size }, |
78 | | {"max_used_size" , STAT_IS_FUNC, (stat_var**)shm_get_mused }, |
79 | | {"free_size" , STAT_IS_FUNC, (stat_var**)shm_get_free }, |
80 | | #if defined HP_MALLOC && defined INLINE_ALLOC && !defined HP_MALLOC_FAST_STATS |
81 | | {"used_size" , STAT_NO_RESET, &shm_used }, |
82 | | {"real_used_size" ,STAT_NO_RESET, &shm_rused }, |
83 | | {"fragments" , STAT_NO_RESET, &shm_frags }, |
84 | | #else |
85 | | /* for HP_MALLOC, these still need to be edited to stats @ startup */ |
86 | | {"used_size" , STAT_IS_FUNC, (stat_var**)shm_get_used }, |
87 | | {"real_used_size" , STAT_IS_FUNC, (stat_var**)shm_get_rused }, |
88 | | {"fragments" , STAT_IS_FUNC, (stat_var**)shm_get_frags }, |
89 | | #endif |
90 | | {0,0,0} |
91 | | }; |
92 | | #endif |
93 | | |
94 | | |
95 | | #ifndef SHM_MMAP |
96 | | static int shm_shmid=-1; /*shared memory id*/ |
97 | | #endif |
98 | | |
99 | | #if defined F_MALLOC || defined Q_MALLOC |
100 | | gen_lock_t *mem_lock; |
101 | | #endif |
102 | | |
103 | | #if defined F_PARALLEL_MALLOC |
104 | | gen_lock_t *hash_locks[TOTAL_F_PARALLEL_POOLS]; |
105 | | /* we allocated TOTAL_F_PARALLEL_POOLS mem blocks */ |
106 | | static void** shm_mempools=NULL; |
107 | | void **shm_blocks; |
108 | | #endif |
109 | | |
110 | | #ifdef HP_MALLOC |
111 | | gen_lock_t *mem_locks; |
112 | | #endif |
113 | | |
114 | | static void* shm_mempool=INVALID_MAP; |
115 | | void *shm_block; |
116 | | |
117 | | int init_done=0; |
118 | | |
119 | | #ifdef DBG_MALLOC |
120 | | gen_lock_t *mem_dbg_lock; |
121 | | unsigned long shm_dbg_pool_size; |
122 | | static void* shm_dbg_mempool=INVALID_MAP; |
123 | | void *shm_dbg_block; |
124 | | |
125 | | struct struct_hist_list *shm_hist; |
126 | | int shm_skip_sh_log = 1; |
127 | | #endif |
128 | | |
129 | | /* |
130 | | * - the memory fragmentation pattern of OpenSIPS |
131 | | * - holds the total number of shm_mallocs requested for each |
132 | | * different possible size since daemon startup |
133 | | * - allows memory warming (preserving the fragmentation pattern on restarts) |
134 | | */ |
135 | | unsigned long long *shm_hash_usage; |
136 | | |
137 | | #include "../mem/mem.h" |
138 | | #include "../locking.h" |
139 | | #ifdef STATISTICS |
140 | | |
141 | | #include "../evi/evi_core.h" |
142 | | #include "../evi/evi_modules.h" |
143 | | |
144 | | /* events information */ |
145 | | long event_shm_threshold = 0; |
146 | | long *event_shm_last = 0; |
147 | | int *event_shm_pending = 0; |
148 | | |
149 | | #ifdef SHM_EXTRA_STATS |
150 | | int mem_skip_stats = 0; |
151 | | #ifndef INLINE_ALLOC |
152 | | void (*shm_stats_core_init)(void *blk, int core_index); |
153 | | unsigned long (*shm_stats_get_index)(void *ptr); |
154 | | void (*shm_stats_set_index)(void *ptr, unsigned long idx); |
155 | | int shm_frag_overhead; |
156 | | const char *(*shm_frag_file)(void *p); |
157 | | const char *(*shm_frag_func)(void *p); |
158 | | unsigned long (*shm_frag_line)(void *p); |
159 | | #endif |
160 | | #endif |
161 | | |
162 | | #ifndef INLINE_ALLOC |
163 | | unsigned long (*shm_frag_size)(void *p); |
164 | | #endif |
165 | | |
166 | | static str shm_usage_str = { "usage", 5 }; |
167 | | static str shm_threshold_str = { "threshold", 9 }; |
168 | | static str shm_used_str = { "used", 4 }; |
169 | | static str shm_size_str = { "size", 4 }; |
170 | | |
171 | | int set_shm_mm(const char *mm_name) |
172 | 0 | { |
173 | | #ifdef INLINE_ALLOC |
174 | | LM_NOTICE("this is an inlined allocator build (see opensips -V), " |
175 | | "cannot set a custom shm allocator (%s)\n", mm_name); |
176 | | return 0; |
177 | | #endif |
178 | |
|
179 | 0 | if (parse_mm(mm_name, &mem_allocator_shm) < 0) |
180 | 0 | return -1; |
181 | | |
182 | 0 | return 0; |
183 | 0 | } |
184 | | |
185 | | void shm_event_raise(long used, long size, long perc) |
186 | 0 | { |
187 | 0 | evi_params_p list = 0; |
188 | |
|
189 | 0 | *event_shm_pending = 1; |
190 | 0 | *event_shm_last = perc; |
191 | | |
192 | | // event has to be triggered - check for subscribers |
193 | 0 | if (!evi_probe_event(EVI_SHM_THRESHOLD_ID)) { |
194 | 0 | goto end; |
195 | 0 | } |
196 | | |
197 | 0 | if (!(list = evi_get_params())) |
198 | 0 | goto end; |
199 | 0 | if (evi_param_add_int(list, &shm_usage_str, (int *)&perc)) { |
200 | 0 | LM_ERR("unable to add usage parameter\n"); |
201 | 0 | goto end; |
202 | 0 | } |
203 | 0 | if (evi_param_add_int(list, &shm_threshold_str, (int *)&event_shm_threshold)) { |
204 | 0 | LM_ERR("unable to add threshold parameter\n"); |
205 | 0 | goto end; |
206 | 0 | } |
207 | 0 | if (evi_param_add_int(list, &shm_used_str, (int *)&used)) { |
208 | 0 | LM_ERR("unable to add used parameter\n"); |
209 | 0 | goto end; |
210 | 0 | } |
211 | 0 | if (evi_param_add_int(list, &shm_size_str, (int *)&size)) { |
212 | 0 | LM_ERR("unable to add size parameter\n"); |
213 | 0 | goto end; |
214 | 0 | } |
215 | | |
216 | | /* |
217 | | * event has to be raised without the lock otherwise a deadlock will be |
218 | | * generated by the transport modules, or by the event_route processing |
219 | | */ |
220 | 0 | shm_unlock(); |
221 | |
|
222 | 0 | if (evi_raise_event(EVI_SHM_THRESHOLD_ID, list)) { |
223 | 0 | LM_ERR("unable to send shm threshold event\n"); |
224 | 0 | } |
225 | |
|
226 | 0 | shm_lock(); |
227 | |
|
228 | 0 | list = 0; |
229 | 0 | end: |
230 | 0 | if (list) |
231 | 0 | evi_free_params(list); |
232 | 0 | *event_shm_pending = 0; |
233 | 0 | } |
234 | | #endif |
235 | | |
236 | | /* |
237 | | * Allocates memory using mmap or sysv shmap |
238 | | * - fd: a handler to a file descriptor pointing to a map file |
239 | | * - force_addr: force mapping to a specific address |
240 | | * - size: how large the mmap should be |
241 | | */ |
242 | | void *shm_getmem(int fd, void *force_addr, unsigned long size) |
243 | 0 | { |
244 | 0 | void *ret_addr; |
245 | 0 | int flags; |
246 | |
|
247 | | #ifndef SHM_MMAP |
248 | | struct shmid_ds shm_info; |
249 | | #endif |
250 | |
|
251 | 0 | #ifdef SHM_MMAP |
252 | 0 | flags = MAP_SHARED; |
253 | 0 | if (force_addr) |
254 | 0 | flags |= MAP_FIXED; |
255 | 0 | if (fd == -1) |
256 | 0 | flags |= MAP_ANON; |
257 | 0 | ret_addr=mmap(force_addr, size, PROT_READ|PROT_WRITE, |
258 | 0 | flags, fd, 0); |
259 | | #else /* USE_MMAP */ |
260 | | /* TODO: handle persistent storage for SysV */ |
261 | | #warn "Cannot have persistent storage using SysV" |
262 | | if (force_addr || fd == -1) |
263 | | return INVALID_MAP; |
264 | | |
265 | | shm_shmid=shmget(IPC_PRIVATE, /* SHM_MEM_SIZE */ shm_mem_size, 0700); |
266 | | if (shm_shmid==-1){ |
267 | | LM_CRIT("could not allocate shared memory segment: %s\n", |
268 | | strerror(errno)); |
269 | | return INVALID_MAP; |
270 | | } |
271 | | shm_mempool=shmat(shm_shmid, 0, 0); |
272 | | #endif |
273 | 0 | return ret_addr; |
274 | 0 | } |
275 | | |
276 | | |
277 | | #if !defined(INLINE_ALLOC) && (defined(HP_MALLOC) || defined(F_PARALLEL_MALLOC)) |
278 | | /* startup optimization */ |
279 | | int shm_use_global_lock = 1; |
280 | | #endif |
281 | | |
282 | | int shm_mem_init_mallocs(void* mempool, unsigned long pool_size,int idx) |
283 | 0 | { |
284 | | #ifdef HP_MALLOC |
285 | | int i; |
286 | | #endif |
287 | |
|
288 | | #ifdef INLINE_ALLOC |
289 | | #if defined F_MALLOC |
290 | | shm_block = fm_malloc_init(mempool, pool_size, "shm"); |
291 | | #elif defined Q_MALLOC |
292 | | shm_block = qm_malloc_init(mempool, pool_size, "shm"); |
293 | | #elif defined HP_MALLOC |
294 | | shm_block = hp_shm_malloc_init(mempool, pool_size, "shm"); |
295 | | #elif define F_PARALEL_MALLOC |
296 | | shm_blocks[idx] = parallel_malloc_init(mempool, pool_size, "shm", idx); |
297 | | #endif |
298 | | #else |
299 | |
|
300 | | #ifdef HP_MALLOC |
301 | | if (mem_allocator_shm == MM_HP_MALLOC |
302 | | || mem_allocator_shm == MM_HP_MALLOC_DBG) { |
303 | | shm_stats[3].flags = STAT_NO_RESET; |
304 | | shm_stats[3].stat_pointer = &shm_used; |
305 | | shm_stats[4].flags = STAT_NO_RESET; |
306 | | shm_stats[4].stat_pointer = &shm_rused; |
307 | | shm_stats[5].flags = STAT_NO_RESET; |
308 | | shm_stats[5].stat_pointer = &shm_frags; |
309 | | |
310 | | shm_use_global_lock = 0; |
311 | | } |
312 | | #endif |
313 | |
|
314 | 0 | #ifdef F_PARALLEL_MALLOC |
315 | 0 | if (mem_allocator_shm == MM_F_PARALLEL_MALLOC || |
316 | 0 | mem_allocator_shm == MM_F_PARALLEL_MALLOC_DBG) { |
317 | 0 | shm_use_global_lock = 0; |
318 | 0 | } |
319 | 0 | #endif |
320 | |
|
321 | | #ifdef SHM_EXTRA_STATS |
322 | | switch (mem_allocator_shm) { |
323 | | #ifdef F_MALLOC |
324 | | case MM_F_MALLOC: |
325 | | case MM_F_MALLOC_DBG: |
326 | | shm_stats_core_init = (osips_shm_stats_init_f)fm_stats_core_init; |
327 | | shm_stats_get_index = fm_stats_get_index; |
328 | | shm_stats_set_index = fm_stats_set_index; |
329 | | shm_frag_overhead = FM_FRAG_OVERHEAD; |
330 | | shm_frag_file = fm_frag_file; |
331 | | shm_frag_func = fm_frag_func; |
332 | | shm_frag_line = fm_frag_line; |
333 | | break; |
334 | | #endif |
335 | | #ifdef Q_MALLOC |
336 | | case MM_Q_MALLOC: |
337 | | case MM_Q_MALLOC_DBG: |
338 | | shm_stats_core_init = (osips_shm_stats_init_f)qm_stats_core_init; |
339 | | shm_stats_get_index = qm_stats_get_index; |
340 | | shm_stats_set_index = qm_stats_set_index; |
341 | | shm_frag_overhead = QM_FRAG_OVERHEAD; |
342 | | shm_frag_file = qm_frag_file; |
343 | | shm_frag_func = qm_frag_func; |
344 | | shm_frag_line = qm_frag_line; |
345 | | break; |
346 | | #endif |
347 | | #ifdef HP_MALLOC |
348 | | case MM_HP_MALLOC: |
349 | | case MM_HP_MALLOC_DBG: |
350 | | shm_stats_core_init = (osips_shm_stats_init_f)hp_stats_core_init; |
351 | | shm_stats_get_index = hp_stats_get_index; |
352 | | shm_stats_set_index = hp_stats_set_index; |
353 | | shm_frag_overhead = HP_FRAG_OVERHEAD; |
354 | | shm_frag_file = hp_frag_file; |
355 | | shm_frag_func = hp_frag_func; |
356 | | shm_frag_line = hp_frag_line; |
357 | | break; |
358 | | #endif |
359 | | #ifdef F_PARALLEL_MALLOC |
360 | | case MM_F_PARALLEL_MALLOC: |
361 | | case MM_F_PARALLEL_MALLOC_DBG: |
362 | | shm_stats_core_init = (osips_shm_stats_init_f)parallel_stats_core_init; |
363 | | shm_stats_get_index = parallel_stats_get_index; |
364 | | shm_stats_set_index = parallel_stats_set_index; |
365 | | shm_frag_overhead = F_PARALLEL_FRAG_OVERHEAD; |
366 | | shm_frag_file = parallel_frag_file; |
367 | | shm_frag_func = parallel_frag_func; |
368 | | shm_frag_line = parallel_frag_line; |
369 | | break; |
370 | | #endif |
371 | | default: |
372 | | LM_ERR("current build does not include support for " |
373 | | "selected allocator (%s)\n", mm_str(mem_allocator_shm)); |
374 | | return -1; |
375 | | } |
376 | | #endif |
377 | |
|
378 | 0 | switch (mem_allocator_shm) { |
379 | | #ifdef F_MALLOC |
380 | | case MM_F_MALLOC: |
381 | | case MM_F_MALLOC_DBG: |
382 | | shm_frag_size = fm_frag_size; |
383 | | break; |
384 | | #endif |
385 | 0 | #ifdef Q_MALLOC |
386 | 0 | case MM_Q_MALLOC: |
387 | 0 | case MM_Q_MALLOC_DBG: |
388 | 0 | shm_frag_size = qm_frag_size; |
389 | 0 | break; |
390 | 0 | #endif |
391 | | #ifdef HP_MALLOC |
392 | | case MM_HP_MALLOC: |
393 | | case MM_HP_MALLOC_DBG: |
394 | | shm_frag_size = hp_frag_size; |
395 | | break; |
396 | | #endif |
397 | 0 | #ifdef F_PARALLEL_MALLOC |
398 | 0 | case MM_F_PARALLEL_MALLOC: |
399 | 0 | case MM_F_PARALLEL_MALLOC_DBG: |
400 | 0 | shm_frag_size = parallel_frag_size; |
401 | 0 | break; |
402 | 0 | #endif |
403 | 0 | default: |
404 | 0 | LM_ERR("current build does not include support for " |
405 | 0 | "selected allocator (%s)\n", mm_str(mem_allocator_shm)); |
406 | 0 | return -1; |
407 | 0 | } |
408 | | |
409 | 0 | switch (mem_allocator_shm) { |
410 | 0 | #ifdef F_PARALLEL_MALLOC |
411 | 0 | case MM_F_PARALLEL_MALLOC: |
412 | 0 | case MM_F_PARALLEL_MALLOC_DBG: |
413 | 0 | shm_blocks[idx] = parallel_malloc_init(mempool, pool_size, "shm", idx); |
414 | 0 | if (!shm_blocks[idx]) { |
415 | 0 | LM_CRIT("parallel alloc init :( \n"); |
416 | 0 | goto err_destroy; |
417 | 0 | } |
418 | 0 | gen_shm_malloc = (osips_block_malloc_f)parallel_malloc; |
419 | 0 | gen_shm_malloc_unsafe = (osips_block_malloc_f)parallel_malloc; |
420 | 0 | gen_shm_realloc = (osips_block_realloc_f)parallel_realloc; |
421 | 0 | gen_shm_realloc_unsafe = (osips_block_realloc_f)parallel_realloc; |
422 | 0 | gen_shm_free = (osips_block_free_f)parallel_free; |
423 | 0 | gen_shm_free_unsafe = (osips_block_free_f)parallel_free; |
424 | 0 | gen_shm_info = (osips_mem_info_f)parallel_info; |
425 | 0 | gen_shm_status = (osips_mem_status_f)parallel_status; |
426 | 0 | gen_shm_get_size = (osips_get_mmstat_f)parallel_get_size; |
427 | 0 | gen_shm_get_used = (osips_get_mmstat_f)parallel_get_used; |
428 | 0 | gen_shm_get_rused = (osips_get_mmstat_f)parallel_get_real_used; |
429 | 0 | gen_shm_get_mused = (osips_get_mmstat_f)parallel_get_max_real_used; |
430 | 0 | gen_shm_get_free = (osips_get_mmstat_f)parallel_get_free; |
431 | 0 | gen_shm_get_frags = (osips_get_mmstat_f)parallel_get_frags; |
432 | 0 | break; |
433 | 0 | #endif |
434 | | #ifdef F_MALLOC |
435 | | case MM_F_MALLOC: |
436 | | shm_block = fm_malloc_init(mempool, pool_size, "shm"); |
437 | | gen_shm_malloc = (osips_block_malloc_f)fm_malloc; |
438 | | gen_shm_malloc_unsafe = (osips_block_malloc_f)fm_malloc; |
439 | | gen_shm_realloc = (osips_block_realloc_f)fm_realloc; |
440 | | gen_shm_realloc_unsafe = (osips_block_realloc_f)fm_realloc; |
441 | | gen_shm_free = (osips_block_free_f)fm_free; |
442 | | gen_shm_free_unsafe = (osips_block_free_f)fm_free; |
443 | | gen_shm_info = (osips_mem_info_f)fm_info; |
444 | | gen_shm_status = (osips_mem_status_f)fm_status; |
445 | | gen_shm_get_size = (osips_get_mmstat_f)fm_get_size; |
446 | | gen_shm_get_used = (osips_get_mmstat_f)fm_get_used; |
447 | | gen_shm_get_rused = (osips_get_mmstat_f)fm_get_real_used; |
448 | | gen_shm_get_mused = (osips_get_mmstat_f)fm_get_max_real_used; |
449 | | gen_shm_get_free = (osips_get_mmstat_f)fm_get_free; |
450 | | gen_shm_get_frags = (osips_get_mmstat_f)fm_get_frags; |
451 | | break; |
452 | | #endif |
453 | 0 | #ifdef Q_MALLOC |
454 | 0 | case MM_Q_MALLOC: |
455 | 0 | shm_block = qm_malloc_init(mempool, pool_size, "shm"); |
456 | 0 | gen_shm_malloc = (osips_block_malloc_f)qm_malloc; |
457 | 0 | gen_shm_malloc_unsafe = (osips_block_malloc_f)qm_malloc; |
458 | 0 | gen_shm_realloc = (osips_block_realloc_f)qm_realloc; |
459 | 0 | gen_shm_realloc_unsafe = (osips_block_realloc_f)qm_realloc; |
460 | 0 | gen_shm_free = (osips_block_free_f)qm_free; |
461 | 0 | gen_shm_free_unsafe = (osips_block_free_f)qm_free; |
462 | 0 | gen_shm_info = (osips_mem_info_f)qm_info; |
463 | 0 | gen_shm_status = (osips_mem_status_f)qm_status; |
464 | 0 | gen_shm_get_size = (osips_get_mmstat_f)qm_get_size; |
465 | 0 | gen_shm_get_used = (osips_get_mmstat_f)qm_get_used; |
466 | 0 | gen_shm_get_rused = (osips_get_mmstat_f)qm_get_real_used; |
467 | 0 | gen_shm_get_mused = (osips_get_mmstat_f)qm_get_max_real_used; |
468 | 0 | gen_shm_get_free = (osips_get_mmstat_f)qm_get_free; |
469 | 0 | gen_shm_get_frags = (osips_get_mmstat_f)qm_get_frags; |
470 | 0 | break; |
471 | 0 | #endif |
472 | | #ifdef HP_MALLOC |
473 | | case MM_HP_MALLOC: |
474 | | shm_block = hp_shm_malloc_init(mempool, pool_size, "shm"); |
475 | | gen_shm_malloc = (osips_block_malloc_f)hp_shm_malloc; |
476 | | gen_shm_malloc_unsafe = (osips_block_malloc_f)hp_shm_malloc_unsafe; |
477 | | gen_shm_realloc = (osips_block_realloc_f)hp_shm_realloc; |
478 | | gen_shm_realloc_unsafe = (osips_block_realloc_f)hp_shm_realloc_unsafe; |
479 | | gen_shm_free = (osips_block_free_f)hp_shm_free; |
480 | | gen_shm_free_unsafe = (osips_block_free_f)hp_shm_free_unsafe; |
481 | | gen_shm_info = (osips_mem_info_f)hp_info; |
482 | | gen_shm_status = (osips_mem_status_f)hp_status; |
483 | | gen_shm_get_size = (osips_get_mmstat_f)hp_shm_get_size; |
484 | | gen_shm_get_used = (osips_get_mmstat_f)hp_shm_get_used; |
485 | | gen_shm_get_rused = (osips_get_mmstat_f)hp_shm_get_real_used; |
486 | | gen_shm_get_mused = (osips_get_mmstat_f)hp_shm_get_max_real_used; |
487 | | gen_shm_get_free = (osips_get_mmstat_f)hp_shm_get_free; |
488 | | gen_shm_get_frags = (osips_get_mmstat_f)hp_shm_get_frags; |
489 | | break; |
490 | | #endif |
491 | | #ifdef DBG_MALLOC |
492 | | #ifdef F_MALLOC |
493 | | case MM_F_MALLOC_DBG: |
494 | | shm_block = fm_malloc_init(mempool, pool_size, "shm"); |
495 | | gen_shm_malloc = (osips_block_malloc_f)fm_malloc_dbg; |
496 | | gen_shm_malloc_unsafe = (osips_block_malloc_f)fm_malloc_dbg; |
497 | | gen_shm_realloc = (osips_block_realloc_f)fm_realloc_dbg; |
498 | | gen_shm_realloc_unsafe = (osips_block_realloc_f)fm_realloc_dbg; |
499 | | gen_shm_free = (osips_block_free_f)fm_free_dbg; |
500 | | gen_shm_free_unsafe = (osips_block_free_f)fm_free_dbg; |
501 | | gen_shm_info = (osips_mem_info_f)fm_info; |
502 | | gen_shm_status = (osips_mem_status_f)fm_status_dbg; |
503 | | gen_shm_get_size = (osips_get_mmstat_f)fm_get_size; |
504 | | gen_shm_get_used = (osips_get_mmstat_f)fm_get_used; |
505 | | gen_shm_get_rused = (osips_get_mmstat_f)fm_get_real_used; |
506 | | gen_shm_get_mused = (osips_get_mmstat_f)fm_get_max_real_used; |
507 | | gen_shm_get_free = (osips_get_mmstat_f)fm_get_free; |
508 | | gen_shm_get_frags = (osips_get_mmstat_f)fm_get_frags; |
509 | | break; |
510 | | #endif |
511 | | #ifdef Q_MALLOC |
512 | | case MM_Q_MALLOC_DBG: |
513 | | shm_block = qm_malloc_init(mempool, pool_size, "shm"); |
514 | | gen_shm_malloc = (osips_block_malloc_f)qm_malloc_dbg; |
515 | | gen_shm_malloc_unsafe = (osips_block_malloc_f)qm_malloc_dbg; |
516 | | gen_shm_realloc = (osips_block_realloc_f)qm_realloc_dbg; |
517 | | gen_shm_realloc_unsafe = (osips_block_realloc_f)qm_realloc_dbg; |
518 | | gen_shm_free = (osips_block_free_f)qm_free_dbg; |
519 | | gen_shm_free_unsafe = (osips_block_free_f)qm_free_dbg; |
520 | | gen_shm_info = (osips_mem_info_f)qm_info; |
521 | | gen_shm_status = (osips_mem_status_f)qm_status_dbg; |
522 | | gen_shm_get_size = (osips_get_mmstat_f)qm_get_size; |
523 | | gen_shm_get_used = (osips_get_mmstat_f)qm_get_used; |
524 | | gen_shm_get_rused = (osips_get_mmstat_f)qm_get_real_used; |
525 | | gen_shm_get_mused = (osips_get_mmstat_f)qm_get_max_real_used; |
526 | | gen_shm_get_free = (osips_get_mmstat_f)qm_get_free; |
527 | | gen_shm_get_frags = (osips_get_mmstat_f)qm_get_frags; |
528 | | break; |
529 | | #endif |
530 | | #ifdef HP_MALLOC |
531 | | case MM_HP_MALLOC_DBG: |
532 | | shm_block = hp_shm_malloc_init(mempool, pool_size, "shm"); |
533 | | gen_shm_malloc = (osips_block_malloc_f)hp_shm_malloc_dbg; |
534 | | gen_shm_malloc_unsafe = (osips_block_malloc_f)hp_shm_malloc_unsafe_dbg; |
535 | | gen_shm_realloc = (osips_block_realloc_f)hp_shm_realloc_dbg; |
536 | | gen_shm_realloc_unsafe = (osips_block_realloc_f)hp_shm_realloc_unsafe_dbg; |
537 | | gen_shm_free = (osips_block_free_f)hp_shm_free_dbg; |
538 | | gen_shm_free_unsafe = (osips_block_free_f)hp_shm_free_unsafe_dbg; |
539 | | gen_shm_info = (osips_mem_info_f)hp_info; |
540 | | gen_shm_status = (osips_mem_status_f)hp_status_dbg; |
541 | | gen_shm_get_size = (osips_get_mmstat_f)hp_shm_get_size; |
542 | | gen_shm_get_used = (osips_get_mmstat_f)hp_shm_get_used; |
543 | | gen_shm_get_rused = (osips_get_mmstat_f)hp_shm_get_real_used; |
544 | | gen_shm_get_mused = (osips_get_mmstat_f)hp_shm_get_max_real_used; |
545 | | gen_shm_get_free = (osips_get_mmstat_f)hp_shm_get_free; |
546 | | gen_shm_get_frags = (osips_get_mmstat_f)hp_shm_get_frags; |
547 | | break; |
548 | | #endif |
549 | | #endif |
550 | 0 | default: |
551 | 0 | LM_ERR("current build does not include support for " |
552 | 0 | "selected allocator (%s)\n", mm_str(mem_allocator_shm)); |
553 | 0 | return -1; |
554 | 0 | } |
555 | 0 | #endif |
556 | | |
557 | 0 | if (mem_allocator_shm != MM_F_PARALLEL_MALLOC && mem_allocator_shm != MM_F_PARALLEL_MALLOC_DBG) { |
558 | 0 | if (!shm_block){ |
559 | 0 | #ifdef F_PARALLEL_MALLOC |
560 | 0 | err_destroy: |
561 | 0 | #endif |
562 | 0 | LM_CRIT("could not initialize shared malloc\n"); |
563 | 0 | shm_mem_destroy(); |
564 | 0 | return -1; |
565 | 0 | } |
566 | 0 | } |
567 | | |
568 | | #if defined(SHM_EXTRA_STATS) && defined(SHM_SHOW_DEFAULT_GROUP) |
569 | | /* we create the the default group statistic where memory alocated untill groups are defined is indexed */ |
570 | | |
571 | | #ifndef DBG_MALLOC |
572 | | memory_mods_stats = MY_MALLOC_UNSAFE(shm_block, sizeof(struct module_info)); |
573 | | #else |
574 | | memory_mods_stats = MY_MALLOC_UNSAFE(shm_block, sizeof(struct module_info), __FILE__, __FUNCTION__, __LINE__ ); |
575 | | #endif |
576 | | |
577 | | if(!memory_mods_stats){ |
578 | | LM_CRIT("could not alloc shared memory"); |
579 | | return -1; |
580 | | } |
581 | | //initialize the new created groups |
582 | | memset((void*)&memory_mods_stats[0], 0, sizeof(struct module_info)); |
583 | | if (init_new_stat((stat_var*)&memory_mods_stats[0].fragments) < 0) |
584 | | return -1; |
585 | | |
586 | | if (init_new_stat((stat_var*)&memory_mods_stats[0].memory_used) < 0) |
587 | | return -1; |
588 | | |
589 | | if (init_new_stat((stat_var*)&memory_mods_stats[0].real_used) < 0) |
590 | | return -1; |
591 | | |
592 | | if (init_new_stat((stat_var*)&memory_mods_stats[0].max_real_used) < 0) |
593 | | return -1; |
594 | | |
595 | | memory_mods_stats[0].lock = shm_malloc_unsafe(sizeof (gen_lock_t)); |
596 | | |
597 | | if (!memory_mods_stats[0].lock) { |
598 | | LM_ERR("Failed to allocate lock \n"); |
599 | | return -1; |
600 | | } |
601 | | |
602 | | if (!lock_init(memory_mods_stats[0].lock)) { |
603 | | LM_ERR("Failed to init lock \n"); |
604 | | return -1; |
605 | | } |
606 | | |
607 | | #ifdef HP_MALLOC |
608 | | update_stat((stat_var*)&memory_mods_stats[0].fragments, shm_block->total_fragments); |
609 | | #else |
610 | | update_stat((stat_var*)&memory_mods_stats[0].fragments, shm_block->fragments); |
611 | | #endif |
612 | | |
613 | | update_stat((stat_var*)&memory_mods_stats[0].memory_used, shm_block->used); |
614 | | update_stat((stat_var*)&memory_mods_stats[0].real_used, shm_block->real_used); |
615 | | #endif |
616 | | |
617 | | #ifdef HP_MALLOC |
618 | | /* lock_alloc cannot be used yet! */ |
619 | | mem_locks = shm_malloc_unsafe(HP_TOTAL_HASH_SIZE * sizeof *mem_locks); |
620 | | if (!mem_locks) { |
621 | | LM_CRIT("could not allocate the shm lock array\n"); |
622 | | shm_mem_destroy(); |
623 | | return -1; |
624 | | } |
625 | | |
626 | | for (i = 0; i < HP_TOTAL_HASH_SIZE; i++) |
627 | | if (!lock_init(&mem_locks[i])) { |
628 | | LM_CRIT("could not initialize lock\n"); |
629 | | shm_mem_destroy(); |
630 | | return -1; |
631 | | } |
632 | | |
633 | | shm_hash_usage = shm_malloc_unsafe(HP_TOTAL_HASH_SIZE * sizeof *shm_hash_usage); |
634 | | if (!shm_hash_usage) { |
635 | | LM_ERR("failed to allocate statistics array\n"); |
636 | | return -1; |
637 | | } |
638 | | |
639 | | memset(shm_hash_usage, 0, HP_TOTAL_HASH_SIZE * sizeof *shm_hash_usage); |
640 | | #endif |
641 | | |
642 | 0 | #if defined F_PARALLEL_MALLOC |
643 | 0 | hash_locks[idx] = shm_malloc_unsafe(sizeof(gen_lock_t)); |
644 | 0 | if (!hash_locks[idx]) { |
645 | 0 | LM_CRIT("could not initialize lock on idx %d\n",idx); |
646 | 0 | shm_mem_destroy(); |
647 | 0 | return -1; |
648 | 0 | } |
649 | | |
650 | 0 | if (!lock_init(hash_locks[idx])) { |
651 | 0 | LM_CRIT("could not initialize lock on idx %d\n",idx); |
652 | 0 | shm_mem_destroy(); |
653 | 0 | return -1; |
654 | 0 | } |
655 | 0 | #endif |
656 | | |
657 | 0 | #if defined F_MALLOC || defined Q_MALLOC |
658 | 0 | mem_lock = shm_malloc_unsafe(sizeof *mem_lock); |
659 | 0 | if (!mem_lock) { |
660 | 0 | LM_CRIT("could not allocate the shm lock\n"); |
661 | 0 | shm_mem_destroy(); |
662 | 0 | return -1; |
663 | 0 | } |
664 | | |
665 | 0 | if (!lock_init(mem_lock)) { |
666 | 0 | LM_CRIT("could not initialize lock\n"); |
667 | 0 | shm_mem_destroy(); |
668 | 0 | return -1; |
669 | 0 | } |
670 | 0 | #endif |
671 | | |
672 | 0 | #ifdef STATISTICS |
673 | 0 | { |
674 | 0 | struct { |
675 | 0 | long last; |
676 | 0 | int pending; |
677 | 0 | } *ev_holders; |
678 | |
|
679 | 0 | ev_holders = shm_malloc_unsafe(sizeof *ev_holders); |
680 | 0 | if (!ev_holders) { |
681 | 0 | LM_CRIT("could not allocate SHM event holders\n"); |
682 | 0 | shm_mem_destroy(); |
683 | 0 | return -1; |
684 | 0 | } |
685 | 0 | memset(ev_holders, 0, sizeof *ev_holders); |
686 | |
|
687 | 0 | event_shm_last = &ev_holders->last; |
688 | 0 | event_shm_pending = &ev_holders->pending; |
689 | 0 | } |
690 | 0 | #endif /* STATISTICS */ |
691 | | |
692 | 0 | LM_DBG("success\n"); |
693 | |
|
694 | 0 | return 0; |
695 | 0 | } |
696 | | |
697 | | #ifdef DBG_MALLOC |
698 | | int shm_dbg_mem_init_mallocs(void* mempool, unsigned long pool_size) |
699 | | { |
700 | | |
701 | | #ifdef INLINE_ALLOC |
702 | | #if defined F_MALLOC |
703 | | shm_dbg_block = fm_malloc_init(mempool, pool_size, "shm_dbg"); |
704 | | #elif defined Q_MALLOC |
705 | | #ifdef DBG_MALLOC |
706 | | shm_dbg_block = qm_malloc_init(mempool, pool_size, "shm_dbg"); |
707 | | #endif |
708 | | #elif defined HP_MALLOC |
709 | | shm_dbg_block = hp_shm_malloc_init(mempool, pool_size, "shm_dbg"); |
710 | | #endif |
711 | | #else |
712 | | if (mem_allocator_shm == MM_NONE) |
713 | | mem_allocator_shm = mem_allocator; |
714 | | |
715 | | switch (mem_allocator_shm) { |
716 | | #ifdef F_MALLOC |
717 | | case MM_F_MALLOC: |
718 | | case MM_F_MALLOC_DBG: |
719 | | shm_dbg_block = fm_malloc_init(mempool, pool_size, "shm_dbg"); |
720 | | break; |
721 | | #endif |
722 | | #ifdef Q_MALLOC |
723 | | case MM_Q_MALLOC: |
724 | | case MM_Q_MALLOC_DBG: |
725 | | shm_dbg_block = qm_malloc_init(mempool, pool_size, "shm_dbg"); |
726 | | break; |
727 | | #endif |
728 | | #ifdef HP_MALLOC |
729 | | case MM_HP_MALLOC: |
730 | | case MM_HP_MALLOC_DBG: |
731 | | shm_dbg_block = hp_shm_malloc_init(mempool, pool_size, "shm_dbg"); |
732 | | break; |
733 | | #endif |
734 | | default: |
735 | | LM_ERR("current build does not include support for " |
736 | | "selected allocator (%s)\n", mm_str(mem_allocator_shm)); |
737 | | return -1; |
738 | | } |
739 | | #endif |
740 | | |
741 | | if (!shm_dbg_block){ |
742 | | LM_CRIT("could not initialize debug shared malloc\n"); |
743 | | shm_mem_destroy(); |
744 | | return -1; |
745 | | } |
746 | | |
747 | | mem_dbg_lock = shm_dbg_malloc_unsafe(sizeof *mem_dbg_lock); |
748 | | if (!mem_dbg_lock) { |
749 | | LM_CRIT("could not allocate the shm dbg lock\n"); |
750 | | shm_mem_destroy(); |
751 | | return -1; |
752 | | } |
753 | | |
754 | | if (!lock_init(mem_dbg_lock)) { |
755 | | LM_CRIT("could not initialize lock\n"); |
756 | | shm_mem_destroy(); |
757 | | return -1; |
758 | | } |
759 | | |
760 | | LM_DBG("success\n"); |
761 | | |
762 | | return 0; |
763 | | } |
764 | | #endif |
765 | | |
766 | | int shm_mem_init(void) |
767 | 0 | { |
768 | 0 | int fd = -1; |
769 | 0 | LM_INFO("allocating SHM block\n"); |
770 | |
|
771 | 0 | #ifdef SHM_MMAP |
772 | 0 | if (shm_mempool && (shm_mempool!=(void*)-1)){ |
773 | | #else |
774 | | if ((shm_shmid!=-1)||(shm_mempool!=(void*)-1)){ |
775 | | #endif |
776 | 0 | LM_CRIT("shm already initialized\n"); |
777 | 0 | return -1; |
778 | 0 | } |
779 | | |
780 | 0 | #ifdef F_PARALLEL_MALLOC |
781 | | /* we will need multiple pools, malloc pointers here */ |
782 | 0 | shm_mempools = malloc(TOTAL_F_PARALLEL_POOLS * sizeof(void*)); |
783 | 0 | if (!shm_mempools) { |
784 | 0 | LM_ERR("Failed to init all the mempools \n"); |
785 | 0 | return -1; |
786 | 0 | } |
787 | 0 | memset(shm_mempools,0,TOTAL_F_PARALLEL_POOLS * sizeof(void *)); |
788 | |
|
789 | 0 | shm_blocks = malloc(TOTAL_F_PARALLEL_POOLS * sizeof(void *)); |
790 | 0 | if (!shm_blocks) { |
791 | 0 | LM_ERR("Failed to init all the blocks \n"); |
792 | 0 | return -1; |
793 | 0 | } |
794 | 0 | memset(shm_blocks,0,TOTAL_F_PARALLEL_POOLS * sizeof(void *)); |
795 | 0 | #endif |
796 | |
|
797 | 0 | #ifndef USE_ANON_MMAP |
798 | 0 | fd=open("/dev/zero", O_RDWR); |
799 | 0 | if (fd==-1){ |
800 | 0 | LM_CRIT("could not open /dev/zero: %s\n", strerror(errno)); |
801 | 0 | return -1; |
802 | 0 | } |
803 | 0 | #endif /* USE_ANON_MMAP */ |
804 | | |
805 | 0 | if (mem_allocator_shm == MM_NONE) |
806 | 0 | mem_allocator_shm = mem_allocator; |
807 | |
|
808 | 0 | #ifdef F_PARALLEL_MALLOC |
809 | 0 | if (mem_allocator_shm == MM_F_PARALLEL_MALLOC || |
810 | 0 | mem_allocator_shm == MM_F_PARALLEL_MALLOC_DBG) { |
811 | 0 | int i; |
812 | 0 | LM_DBG("Paralel malloc, total pools size is %d\n",TOTAL_F_PARALLEL_POOLS); |
813 | 0 | for (i=0;i<TOTAL_F_PARALLEL_POOLS;i++) { |
814 | 0 | unsigned long block_size; |
815 | |
|
816 | 0 | block_size = shm_mem_size/TOTAL_F_PARALLEL_POOLS; |
817 | 0 | shm_mempools[i] = shm_getmem(fd,NULL,block_size); |
818 | 0 | LM_DBG("Allocated %p pool on idx %d with size %ld\n",shm_mempools[i],i,block_size); |
819 | |
|
820 | 0 | if (shm_mempools[i] == INVALID_MAP) { |
821 | 0 | LM_CRIT("could not attach shared memory segment %d: %s\n", |
822 | 0 | i,strerror(errno)); |
823 | 0 | return -1; |
824 | 0 | } |
825 | | |
826 | 0 | if (shm_mem_init_mallocs(shm_mempools[i], block_size,i)) { |
827 | 0 | LM_CRIT("could not init shared memory segment %d\n",i); |
828 | 0 | return -1; |
829 | 0 | } |
830 | 0 | } |
831 | | |
832 | 0 | init_done = 1; |
833 | 0 | return 0; |
834 | 0 | } else { |
835 | 0 | shm_mempool = shm_getmem(fd, NULL, shm_mem_size); |
836 | 0 | #ifndef USE_ANON_MMAP |
837 | 0 | close(fd); |
838 | 0 | #endif /* USE_ANON_MMAP */ |
839 | 0 | if (shm_mempool == INVALID_MAP) { |
840 | 0 | LM_CRIT("could not attach shared memory segment: %s\n", |
841 | 0 | strerror(errno)); |
842 | | /* destroy segment*/ |
843 | 0 | shm_mem_destroy(); |
844 | 0 | return -1; |
845 | 0 | } |
846 | | |
847 | 0 | return shm_mem_init_mallocs(shm_mempool, shm_mem_size,0); |
848 | 0 | } |
849 | | #else |
850 | | shm_mempool = shm_getmem(fd, NULL, shm_mem_size); |
851 | | #ifndef USE_ANON_MMAP |
852 | | close(fd); |
853 | | #endif /* USE_ANON_MMAP */ |
854 | | if (shm_mempool == INVALID_MAP) { |
855 | | LM_CRIT("could not attach shared memory segment: %s\n", |
856 | | strerror(errno)); |
857 | | /* destroy segment*/ |
858 | | shm_mem_destroy(); |
859 | | return -1; |
860 | | } |
861 | | |
862 | | return shm_mem_init_mallocs(shm_mempool, shm_mem_size,0); |
863 | | #endif |
864 | 0 | } |
865 | | |
866 | | #ifdef DBG_MALLOC |
867 | | int shm_dbg_mem_init(void) |
868 | | { |
869 | | int fd_dbg = -1; |
870 | | |
871 | | #ifndef USE_ANON_MMAP |
872 | | fd_dbg=open("/dev/zero", O_RDWR); |
873 | | if (fd_dbg==-1){ |
874 | | LM_CRIT("could not open /dev/zero: %s\n", strerror(errno)); |
875 | | return -1; |
876 | | } |
877 | | #endif |
878 | | |
879 | | #ifdef INLINE_ALLOC |
880 | | #if defined F_MALLOC |
881 | | shm_dbg_pool_size = fm_get_dbg_pool_size(shm_memlog_size); |
882 | | #elif defined Q_MALLOC |
883 | | shm_dbg_pool_size = qm_get_dbg_pool_size(shm_memlog_size); |
884 | | #elif defined HP_MALLOC |
885 | | shm_dbg_pool_size = hp_get_dbg_pool_size(shm_memlog_size); |
886 | | #endif |
887 | | #else |
888 | | switch (mem_allocator_shm) { |
889 | | #ifdef F_MALLOC |
890 | | case MM_F_MALLOC: |
891 | | case MM_F_MALLOC_DBG: |
892 | | shm_dbg_pool_size = fm_get_dbg_pool_size(shm_memlog_size); |
893 | | break; |
894 | | #endif |
895 | | #ifdef Q_MALLOC |
896 | | case MM_Q_MALLOC: |
897 | | case MM_Q_MALLOC_DBG: |
898 | | shm_dbg_pool_size = qm_get_dbg_pool_size(shm_memlog_size); |
899 | | break; |
900 | | #endif |
901 | | #ifdef HP_MALLOC |
902 | | case MM_HP_MALLOC: |
903 | | case MM_HP_MALLOC_DBG: |
904 | | shm_dbg_pool_size = hp_get_dbg_pool_size(shm_memlog_size); |
905 | | break; |
906 | | #endif |
907 | | default: |
908 | | LM_ERR("current build does not include support for " |
909 | | "selected allocator (%s)\n", mm_str(mem_allocator_shm)); |
910 | | close(fd_dbg); |
911 | | return -1; |
912 | | } |
913 | | #endif |
914 | | |
915 | | LM_DBG("Debug SHM pool size: %.2lf GB\n", |
916 | | (double)shm_dbg_pool_size / 1024 / 1024 / 1024); |
917 | | |
918 | | shm_dbg_mempool = shm_getmem(fd_dbg, NULL, shm_dbg_pool_size); |
919 | | |
920 | | #ifndef USE_ANON_MMAP |
921 | | close(fd_dbg); |
922 | | #endif |
923 | | |
924 | | if (shm_dbg_mempool == INVALID_MAP) { |
925 | | LM_CRIT("could not attach shared memory segment: %s\n", |
926 | | strerror(errno)); |
927 | | /* destroy segment*/ |
928 | | shm_mem_destroy(); |
929 | | return -1; |
930 | | } |
931 | | |
932 | | return shm_dbg_mem_init_mallocs(shm_dbg_mempool, shm_dbg_pool_size); |
933 | | } |
934 | | #endif |
935 | | |
936 | | mi_response_t *mi_shm_check(const mi_params_t *params, |
937 | | struct mi_handler *async_hdl) |
938 | 0 | { |
939 | | #if defined(Q_MALLOC) && defined(DBG_MALLOC) |
940 | | mi_response_t *resp; |
941 | | mi_item_t *resp_obj; |
942 | | int ret; |
943 | | |
944 | | #ifndef INLINE_ALLOC |
945 | | if (mem_allocator_shm == MM_Q_MALLOC_DBG) { |
946 | | #endif |
947 | | |
948 | | shm_lock(); |
949 | | ret = qm_mem_check(shm_block); |
950 | | shm_unlock(); |
951 | | |
952 | | /* print the number of fragments */ |
953 | | resp = init_mi_result_object(&resp_obj); |
954 | | if (!resp) |
955 | | return NULL; |
956 | | |
957 | | if (add_mi_number(resp, MI_SSTR("total_fragments"), ret) < 0) { |
958 | | LM_ERR("failed to add MI item\n"); |
959 | | free_mi_response(resp); |
960 | | return NULL; |
961 | | } |
962 | | |
963 | | return resp; |
964 | | |
965 | | #ifndef INLINE_ALLOC |
966 | | } |
967 | | #endif |
968 | | #endif |
969 | |
|
970 | 0 | return NULL; |
971 | 0 | } |
972 | | |
973 | | int init_shm_post_yyparse(void) |
974 | 0 | { |
975 | | #ifdef HP_MALLOC |
976 | | if (mem_allocator_shm == MM_HP_MALLOC || |
977 | | mem_allocator_shm == MM_HP_MALLOC_DBG) { |
978 | | |
979 | | if (mem_warming_enabled && hp_mem_warming(shm_block) != 0) |
980 | | LM_INFO("skipped memory warming\n"); |
981 | | |
982 | | hp_init_shm_statistics(shm_block); |
983 | | } else if (mem_warming_enabled) { |
984 | | LM_WARN("SHM memory warming only makes sense with HP_MALLOC!\n"); |
985 | | } |
986 | | #endif |
987 | |
|
988 | | #ifdef SHM_EXTRA_STATS |
989 | | struct multi_str *mod_name; |
990 | | int i, len; |
991 | | char *full_name = NULL; |
992 | | stat_var *p __attribute__((unused)); |
993 | | |
994 | | if(mem_free_idx != 1){ |
995 | | |
996 | | #ifdef SHM_SHOW_DEFAULT_GROUP |
997 | | p = (stat_var *)&memory_mods_stats[0].fragments; |
998 | | if (register_stat(STAT_PREFIX "default", "fragments", &p, STAT_NO_RESET|STAT_NOT_ALLOCATED)!=0 ) { |
999 | | LM_CRIT("can't add stat variable"); |
1000 | | return -1; |
1001 | | } |
1002 | | p = (stat_var *)&memory_mods_stats[0].memory_used; |
1003 | | if (register_stat(STAT_PREFIX "default", "memory_used", &p, STAT_NO_RESET|STAT_NOT_ALLOCATED)!=0 ) { |
1004 | | LM_CRIT("can't add stat variable"); |
1005 | | return -1; |
1006 | | } |
1007 | | |
1008 | | p = (stat_var *)&memory_mods_stats[0].real_used; |
1009 | | if (register_stat(STAT_PREFIX "default", "real_used", &p, STAT_NO_RESET|STAT_NOT_ALLOCATED)!=0 ) { |
1010 | | LM_CRIT("can't add stat variable"); |
1011 | | return -1; |
1012 | | } |
1013 | | |
1014 | | p = (stat_var *)&memory_mods_stats[0].max_real_used; |
1015 | | if (register_stat(STAT_PREFIX "default", "max_real_used", &p, STAT_NOT_ALLOCATED)!=0 ) { |
1016 | | LM_CRIT("can't add stat variable"); |
1017 | | return -1; |
1018 | | } |
1019 | | |
1020 | | |
1021 | | i = mem_free_idx - 1; |
1022 | | #else |
1023 | | i = mem_free_idx - 2; |
1024 | | #endif |
1025 | | for(mod_name = mod_names; mod_name != NULL; mod_name = mod_name->next){ |
1026 | | len = strlen(mod_name->s); |
1027 | | full_name = pkg_malloc((len + STAT_PREFIX_LEN + 1) * sizeof(char)); |
1028 | | |
1029 | | strcpy(full_name, STAT_PREFIX); |
1030 | | strcat(full_name, mod_name->s); |
1031 | | p = (stat_var *)&memory_mods_stats[i].fragments; |
1032 | | if (register_stat(full_name, "fragments", &p, STAT_NO_RESET|STAT_NOT_ALLOCATED)!=0 ) { |
1033 | | LM_CRIT("can't add stat variable"); |
1034 | | return -1; |
1035 | | } |
1036 | | |
1037 | | p = (stat_var *)&memory_mods_stats[i].memory_used; |
1038 | | if (register_stat(full_name, "memory_used", &p, STAT_NO_RESET|STAT_NOT_ALLOCATED)!=0 ) { |
1039 | | LM_CRIT("can't add stat variable"); |
1040 | | return -1; |
1041 | | } |
1042 | | |
1043 | | p = (stat_var *) &memory_mods_stats[i].real_used; |
1044 | | if (register_stat(full_name, "real_used", &p, STAT_NO_RESET|STAT_NOT_ALLOCATED)!=0 ) { |
1045 | | LM_CRIT("can't add stat variable"); |
1046 | | return -1; |
1047 | | } |
1048 | | |
1049 | | p = (stat_var *) &memory_mods_stats[i].max_real_used; |
1050 | | if (register_stat(full_name, "max_real_used", &p, STAT_NOT_ALLOCATED) != 0) { |
1051 | | LM_CRIT("can't add stat variable"); |
1052 | | return -1; |
1053 | | } |
1054 | | i--; |
1055 | | } |
1056 | | } |
1057 | | #endif |
1058 | |
|
1059 | | #ifdef DBG_MALLOC |
1060 | | if (shm_memlog_size) { |
1061 | | shm_hist = _shl_init("shm hist", shm_memlog_size, 0, 1, |
1062 | | shm_dbg_malloc_func); |
1063 | | if (!shm_hist) { |
1064 | | LM_ERR("oom\n"); |
1065 | | return -1; |
1066 | | } |
1067 | | } |
1068 | | #endif |
1069 | |
|
1070 | 0 | return 0; |
1071 | 0 | } |
1072 | | |
1073 | | void shm_mem_destroy(void) |
1074 | 0 | { |
1075 | | #ifdef SHM_EXTRA_STATS |
1076 | | int i, core_group; |
1077 | | int offset; |
1078 | | #endif |
1079 | |
|
1080 | | #ifndef SHM_MMAP |
1081 | | struct shmid_ds shm_info; |
1082 | | #endif |
1083 | |
|
1084 | 0 | #ifdef F_PARALLEL_MALLOC |
1085 | | /* just let OS free for us, for now */ |
1086 | 0 | return; |
1087 | 0 | #endif |
1088 | | |
1089 | | #ifdef HP_MALLOC |
1090 | | int j; |
1091 | | |
1092 | | if (mem_allocator_shm == MM_HP_MALLOC || |
1093 | | mem_allocator_shm == MM_HP_MALLOC_DBG) |
1094 | | hp_update_shm_pattern_file(); |
1095 | | #endif |
1096 | | |
1097 | | #ifdef SHM_EXTRA_STATS |
1098 | | if (memory_mods_stats && (0 |
1099 | | #ifdef HP_MALLOC |
1100 | | || mem_locks |
1101 | | #endif |
1102 | | #if defined F_MALLOC || defined Q_MALLOC |
1103 | | || mem_lock |
1104 | | #endif |
1105 | | )) { |
1106 | | core_group = -1; |
1107 | | offset = 0; |
1108 | | #ifndef SHM_SHOW_DEFAULT_GROUP |
1109 | | offset = -1; |
1110 | | #endif |
1111 | | |
1112 | | if (core_index) |
1113 | | core_group = core_index + offset; |
1114 | | else { |
1115 | | #ifndef SHM_SHOW_DEFAULT_GROUP |
1116 | | core_group = 0; |
1117 | | #endif |
1118 | | } |
1119 | | |
1120 | | mem_skip_stats = 1; |
1121 | | |
1122 | | for (i = 0; i < mem_free_idx + offset; i++) |
1123 | | if (i != core_group) { |
1124 | | shm_free(memory_mods_stats[i].fragments.u.val); |
1125 | | shm_free(memory_mods_stats[i].memory_used.u.val); |
1126 | | shm_free(memory_mods_stats[i].real_used.u.val); |
1127 | | shm_free(memory_mods_stats[i].max_real_used.u.val); |
1128 | | lock_destroy(memory_mods_stats[i].lock); |
1129 | | lock_dealloc(memory_mods_stats[i].lock); |
1130 | | } |
1131 | | |
1132 | | if (core_group >= 0) { |
1133 | | shm_free(memory_mods_stats[core_group].fragments.u.val); |
1134 | | shm_free(memory_mods_stats[core_group].memory_used.u.val); |
1135 | | shm_free(memory_mods_stats[core_group].real_used.u.val); |
1136 | | lock_destroy(memory_mods_stats[core_group].lock); |
1137 | | lock_dealloc(memory_mods_stats[core_group].lock); |
1138 | | } |
1139 | | |
1140 | | shm_free((void*)memory_mods_stats); |
1141 | | } |
1142 | | #endif |
1143 | | |
1144 | 0 | if (0 |
1145 | 0 | #if defined F_MALLOC || defined Q_MALLOC |
1146 | 0 | || mem_lock |
1147 | 0 | #endif |
1148 | | #ifdef HP_MALLOC |
1149 | | || mem_locks |
1150 | | #endif |
1151 | 0 | ) { |
1152 | 0 | #if defined F_MALLOC || defined Q_MALLOC |
1153 | 0 | if (mem_lock) { |
1154 | 0 | LM_DBG("destroying the shared memory lock\n"); |
1155 | 0 | lock_destroy(mem_lock); /* we don't need to dealloc it*/ |
1156 | 0 | mem_lock = NULL; |
1157 | 0 | } |
1158 | 0 | #endif |
1159 | |
|
1160 | | #if defined HP_MALLOC |
1161 | | if (mem_locks) { |
1162 | | for (j = 0; j < HP_TOTAL_HASH_SIZE; j++) |
1163 | | lock_destroy(&mem_locks[j]); |
1164 | | mem_locks = NULL; |
1165 | | } |
1166 | | #endif |
1167 | |
|
1168 | 0 | #ifdef STATISTICS |
1169 | 0 | if (event_shm_last) |
1170 | 0 | shm_free_unsafe(event_shm_last); |
1171 | 0 | #endif |
1172 | 0 | } |
1173 | 0 | shm_relmem(shm_mempool, shm_mem_size); |
1174 | 0 | shm_mempool=INVALID_MAP; |
1175 | |
|
1176 | | #ifdef DBG_MALLOC |
1177 | | if (shm_memlog_size) { |
1178 | | if (mem_dbg_lock) { |
1179 | | LM_DBG("destroying the shared debug memory lock\n"); |
1180 | | lock_destroy(mem_dbg_lock); /* we don't need to dealloc it*/ |
1181 | | mem_dbg_lock = NULL; |
1182 | | } |
1183 | | |
1184 | | shm_relmem(shm_dbg_mempool, shm_dbg_pool_size); |
1185 | | shm_dbg_mempool=INVALID_MAP; |
1186 | | } |
1187 | | #endif |
1188 | |
|
1189 | | #ifndef SHM_MMAP |
1190 | | if (shm_shmid!=-1) { |
1191 | | shmctl(shm_shmid, IPC_RMID, &shm_info); |
1192 | | shm_shmid=-1; |
1193 | | } |
1194 | | #endif |
1195 | 0 | } |
1196 | | |
1197 | | void shm_relmem(void *mempool, unsigned long size) |
1198 | 0 | { |
1199 | 0 | if (mempool && (mempool!=INVALID_MAP)) { |
1200 | 0 | #ifdef SHM_MMAP |
1201 | 0 | munmap(mempool, size); |
1202 | | #else |
1203 | | shmdt(mempool); |
1204 | | #endif |
1205 | 0 | } |
1206 | 0 | } |
1207 | | |