/src/opensips/mem/q_malloc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2001-2003 FhG Fokus |
3 | | * Copyright (C) 2019 OpenSIPS Solutions |
4 | | * |
5 | | * This file is part of opensips, a free SIP server. |
6 | | * |
7 | | * opensips is free software; you can redistribute it and/or modify |
8 | | * it under the terms of the GNU General Public License as published by |
9 | | * the Free Software Foundation; either version 2 of the License, or |
10 | | * (at your option) any later version |
11 | | * |
12 | | * opensips is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | | * GNU General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU General Public License |
18 | | * along with this program; if not, write to the Free Software |
19 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | | */ |
21 | | |
22 | | #ifdef Q_MALLOC |
23 | | |
24 | | #include <stdlib.h> |
25 | | #include <string.h> |
26 | | |
27 | | #include "q_malloc.h" |
28 | | #include "../dprint.h" |
29 | | #include "../globals.h" |
30 | | #include "../statistics.h" |
31 | | |
32 | | #ifdef DBG_MALLOC |
33 | | #include "mem_dbg_hash.h" |
34 | | #endif |
35 | | |
36 | | #include "../lib/dbg/struct_hist.h" |
37 | | |
38 | | /*useful macros*/ |
39 | | #define FRAG_END(f) \ |
40 | 0 | ((struct qm_frag_end*)(void *)((char*)(f)+sizeof(struct qm_frag)+ \ |
41 | 0 | (f)->size)) |
42 | | |
43 | | #define FRAG_NEXT(f) \ |
44 | 0 | ((struct qm_frag*)(void *)((char*)(f)+sizeof(struct qm_frag)+(f)->size+ \ |
45 | 0 | sizeof(struct qm_frag_end))) |
46 | | |
47 | | #define FRAG_PREV(f) \ |
48 | 0 | ({struct qm_frag_end *ep = (struct qm_frag_end*)(f) - 1; \ |
49 | 0 | (struct qm_frag*)(void *)((char*)ep - ep->size) - 1;}) |
50 | | |
51 | | #define PREV_FRAG_END(f) \ |
52 | | ((struct qm_frag_end*)(f)-1) |
53 | | |
54 | 0 | #define MIN_FRAG_SIZE QM_ROUNDTO |
55 | 0 | #define FRAG_OVERHEAD (sizeof(struct qm_frag)+sizeof(struct qm_frag_end)) |
56 | | |
57 | 0 | #define ROUNDTO_MASK (~((unsigned long)QM_ROUNDTO-1)) |
58 | 0 | #define ROUNDUP(s) (((s)+(QM_ROUNDTO-1))&ROUNDTO_MASK) |
59 | 0 | #define ROUNDDOWN(s) ((s)&ROUNDTO_MASK) |
60 | | |
61 | | /* |
62 | | #define ROUNDUP(s) (((s)%QM_ROUNDTO)?((s)+QM_ROUNDTO)/QM_ROUNDTO*QM_ROUNDTO:(s)) |
63 | | #define ROUNDDOWN(s) (((s)%QM_ROUNDTO)?((s)-QM_ROUNDTO)/QM_ROUNDTO*QM_ROUNDTO:(s)) |
64 | | */ |
65 | | |
66 | | /* finds the hash value for s, s=QM_ROUNDTO multiple*/ |
67 | 0 | #define GET_HASH(s) ( ((unsigned long)(s)<=Q_MALLOC_OPTIMIZE)?\ |
68 | 0 | (unsigned long)(s)/QM_ROUNDTO: \ |
69 | 0 | Q_MALLOC_OPTIMIZE/QM_ROUNDTO+big_hash_idx((s))- \ |
70 | 0 | Q_MALLOC_OPTIMIZE_FACTOR+1 ) |
71 | | |
72 | 0 | #define UN_HASH(h) ( ((unsigned long)(h)<=(Q_MALLOC_OPTIMIZE/QM_ROUNDTO))?\ |
73 | 0 | (unsigned long)(h)*QM_ROUNDTO: \ |
74 | 0 | 1UL<<((h)-Q_MALLOC_OPTIMIZE/QM_ROUNDTO+\ |
75 | 0 | Q_MALLOC_OPTIMIZE_FACTOR-1)\ |
76 | 0 | ) |
77 | | |
78 | | /* mark/test used/unused frags */ |
79 | | #define FRAG_MARK_USED(f) |
80 | | #define FRAG_CLEAR_USED(f) |
81 | 0 | #define FRAG_WAS_USED(f) (1) |
82 | | |
83 | | /* other frag related defines: |
84 | | * MEM_COALESCE_FRAGS |
85 | | */ |
86 | | |
87 | | /* computes hash number for big buckets*/ |
88 | | inline static unsigned long big_hash_idx(unsigned long s) |
89 | 0 | { |
90 | 0 | int idx; |
91 | | /* s is rounded => s = k*2^n (QM_ROUNDTO=2^n) |
92 | | * index= i such that 2^i > s >= 2^(i-1) |
93 | | * |
94 | | * => index = number of the first non null bit in s*/ |
95 | 0 | idx=sizeof(long)*8-1; |
96 | 0 | for (; !(s&(1UL<<(sizeof(long)*8-1))) ; s<<=1, idx--); |
97 | 0 | return idx; |
98 | 0 | } |
99 | | |
100 | | |
101 | | #ifdef DBG_MALLOC |
102 | | |
103 | | #define ST_CHECK_PATTERN (((~0UL) / 255) * 0xf0) /* 0xf0f..0f0f0 */ |
104 | | #define END_CHECK_PATTERN1 (((~0UL) / 255) * 0xc0) /* 0xc0c..0c0c0 */ |
105 | | #define END_CHECK_PATTERN2 ((long)0xabcdefedabcdefed) |
106 | | |
107 | | static void qm_debug_frag(struct qm_block *qm, struct qm_frag *f) |
108 | | { |
109 | | if (f->check!=ST_CHECK_PATTERN){ |
110 | | LM_CRIT("qm_*: fragm. %p (address %p) " |
111 | | "beginning overwritten(%lx)!\n", |
112 | | f, (char*)f+sizeof(struct qm_frag), |
113 | | f->check); |
114 | | abort(); |
115 | | }; |
116 | | if ((FRAG_END(f)->check1!=END_CHECK_PATTERN1)|| |
117 | | (FRAG_END(f)->check2!=END_CHECK_PATTERN2)){ |
118 | | LM_CRIT("qm_*: fragm. %p (address %p)" |
119 | | " end overwritten(%lx, %lx)!\n", |
120 | | f, (char*)f+sizeof(struct qm_frag), |
121 | | FRAG_END(f)->check1, FRAG_END(f)->check2); |
122 | | abort(); |
123 | | } |
124 | | if ((f>qm->first_frag)&& |
125 | | ((PREV_FRAG_END(f)->check1!=END_CHECK_PATTERN1) || |
126 | | (PREV_FRAG_END(f)->check2!=END_CHECK_PATTERN2) ) ){ |
127 | | LM_CRIT(" qm_*: prev. fragm. tail overwritten(%lx, %lx)[%p:%p] (%s, %s:%ld)!\n", |
128 | | PREV_FRAG_END(f)->check1, PREV_FRAG_END(f)->check2, f, |
129 | | (char*)f+sizeof(struct qm_frag), |
130 | | qm_dbg_coords(FRAG_PREV(f))); |
131 | | abort(); |
132 | | } |
133 | | } |
134 | | #endif |
135 | | |
136 | | #ifdef SHM_EXTRA_STATS |
137 | | #include "module_info.h" |
138 | | unsigned long qm_stats_get_index(void *ptr) { |
139 | | return !ptr ? GROUP_IDX_INVALID : QM_FRAG(ptr)->statistic_index; |
140 | | } |
141 | | |
142 | | void qm_stats_set_index(void *ptr, unsigned long idx) { |
143 | | if (!ptr) |
144 | | return; |
145 | | |
146 | | QM_FRAG(ptr)->statistic_index = idx; |
147 | | } |
148 | | #endif |
149 | | |
150 | | unsigned long qm_get_dbg_pool_size(unsigned int hist_size) |
151 | 0 | { |
152 | 0 | return ROUNDUP(sizeof(struct qm_block)) + FRAG_OVERHEAD + |
153 | 0 | FRAG_OVERHEAD + 56 /* sizeof(struct struct_hist_list) */ + 2 * hist_size * |
154 | 0 | (FRAG_OVERHEAD + 88 /* sizeof(struct struct_hist) */ + |
155 | 0 | FRAG_OVERHEAD + sizeof(struct struct_hist_action)); |
156 | 0 | } |
157 | | |
158 | | static inline void qm_insert_free(struct qm_block *qm, struct qm_frag *frag) |
159 | 0 | { |
160 | 0 | struct qm_frag *f; |
161 | 0 | struct qm_frag *prev; |
162 | 0 | int hash; |
163 | |
|
164 | 0 | hash=GET_HASH(frag->size); |
165 | 0 | for(f=qm->free_hash[hash].head.u.nxt_free; f!=&(qm->free_hash[hash].head); |
166 | 0 | f=f->u.nxt_free){ |
167 | 0 | if (frag->size <= f->size) break; |
168 | 0 | } |
169 | | /*insert it here*/ |
170 | 0 | prev=FRAG_END(f)->prev_free; |
171 | 0 | prev->u.nxt_free=frag; |
172 | 0 | FRAG_END(frag)->prev_free=prev; |
173 | 0 | frag->u.nxt_free=f; |
174 | 0 | FRAG_END(f)->prev_free=frag; |
175 | 0 | qm->free_hash[hash].no++; |
176 | |
|
177 | 0 | qm->real_used-=frag->size; |
178 | 0 | #if defined(DBG_MALLOC) || defined(STATISTICS) |
179 | 0 | qm->used-=frag->size; |
180 | 0 | #endif |
181 | 0 | } |
182 | | |
183 | | |
184 | | /* init malloc and return a qm_block*/ |
185 | | struct qm_block *qm_malloc_init(char *address, unsigned long size, char *name) |
186 | 0 | { |
187 | 0 | char *start; |
188 | 0 | char *end; |
189 | 0 | struct qm_block *qm; |
190 | 0 | unsigned long init_overhead; |
191 | 0 | int h; |
192 | | |
193 | | /* make address and size multiple of 8*/ |
194 | 0 | start=(char*)ROUNDUP((unsigned long) address); |
195 | 0 | LM_DBG("QM_OPTIMIZE=%lu, /ROUNDTO=%lu, %lu-bytes aligned\n", |
196 | 0 | Q_MALLOC_OPTIMIZE, Q_MALLOC_OPTIMIZE/QM_ROUNDTO, |
197 | 0 | (unsigned long)QM_ROUNDTO); |
198 | 0 | LM_DBG("QM_HASH_SIZE=%lu, qm_block size=%zu, frag_size=%zu\n", |
199 | 0 | QM_HASH_SIZE, sizeof(struct qm_block), FRAG_OVERHEAD); |
200 | 0 | LM_DBG("params (%p, %lu), start=%p\n", address, size, start); |
201 | 0 | if (size<start-address) return 0; |
202 | 0 | size-=(start-address); |
203 | 0 | if (size <(MIN_FRAG_SIZE+FRAG_OVERHEAD)) return 0; |
204 | 0 | size=ROUNDDOWN(size); |
205 | |
|
206 | 0 | init_overhead=ROUNDUP(sizeof(struct qm_block))+sizeof(struct qm_frag)+ |
207 | 0 | sizeof(struct qm_frag_end); |
208 | 0 | LM_DBG("size= %lu, init_overhead=%lu\n", size, init_overhead); |
209 | |
|
210 | 0 | if (size < init_overhead) |
211 | 0 | { |
212 | | /* not enough mem to create our control structures !!!*/ |
213 | 0 | return 0; |
214 | 0 | } |
215 | 0 | end=start+size; |
216 | 0 | qm=(struct qm_block*)(void *)start; |
217 | 0 | memset(qm, 0, sizeof(struct qm_block)); |
218 | 0 | qm->name=name; |
219 | 0 | qm->size=size; |
220 | 0 | qm->used=size-init_overhead; |
221 | 0 | qm->fragments = 0; |
222 | |
|
223 | 0 | qm->real_used=size; |
224 | 0 | qm->max_real_used = 0; |
225 | 0 | size-=init_overhead; |
226 | |
|
227 | 0 | qm->first_frag=(struct qm_frag*)(void *)(start+ROUNDUP(sizeof(struct qm_block))); |
228 | 0 | qm->last_frag_end=(struct qm_frag_end*)(void *)end-1; |
229 | | /* init initial fragment*/ |
230 | 0 | qm->first_frag->size=size; |
231 | 0 | qm->last_frag_end->size=size; |
232 | |
|
233 | | #ifdef DBG_MALLOC |
234 | | qm->first_frag->check=ST_CHECK_PATTERN; |
235 | | qm->last_frag_end->check1=END_CHECK_PATTERN1; |
236 | | qm->last_frag_end->check2=END_CHECK_PATTERN2; |
237 | | #endif |
238 | | /* init free_hash* */ |
239 | 0 | for (h=0; h<QM_HASH_SIZE;h++){ |
240 | 0 | qm->free_hash[h].head.u.nxt_free=&(qm->free_hash[h].head); |
241 | 0 | qm->free_hash[h].tail.prev_free=&(qm->free_hash[h].head); |
242 | 0 | qm->free_hash[h].head.size=0; |
243 | 0 | qm->free_hash[h].tail.size=0; |
244 | 0 | } |
245 | | |
246 | | /* link initial fragment into the free list*/ |
247 | |
|
248 | 0 | qm_insert_free(qm, qm->first_frag); |
249 | | |
250 | | /*qm->first_frag->u.nxt_free=&(qm->free_lst); |
251 | | qm->last_frag_end->prev_free=&(qm->free_lst); |
252 | | */ |
253 | |
|
254 | 0 | return qm; |
255 | 0 | } |
256 | | |
257 | | |
258 | | |
259 | | static inline void qm_detach_free(struct qm_block *qm, struct qm_frag *frag) |
260 | 0 | { |
261 | 0 | struct qm_frag *prev; |
262 | 0 | struct qm_frag *next; |
263 | |
|
264 | 0 | prev=FRAG_END(frag)->prev_free; |
265 | 0 | next=frag->u.nxt_free; |
266 | 0 | prev->u.nxt_free=next; |
267 | 0 | FRAG_END(next)->prev_free=prev; |
268 | |
|
269 | 0 | qm->real_used+=frag->size; |
270 | 0 | #if defined(DBG_MALLOC) || defined(STATISTICS) |
271 | 0 | qm->used+=frag->size; |
272 | 0 | #endif |
273 | 0 | } |
274 | | |
275 | | |
276 | | |
277 | | #ifdef DBG_MALLOC |
278 | | static inline struct qm_frag *qm_find_free(struct qm_block *qm, |
279 | | unsigned long size, |
280 | | int *h, |
281 | | unsigned int *count) |
282 | | #else |
283 | | static inline struct qm_frag *qm_find_free(struct qm_block *qm, |
284 | | unsigned long size, |
285 | | int *h) |
286 | | #endif |
287 | 0 | { |
288 | 0 | int hash; |
289 | 0 | struct qm_frag *f; |
290 | |
|
291 | 0 | for (hash=GET_HASH(size); hash<QM_HASH_SIZE; hash++){ |
292 | 0 | for (f=qm->free_hash[hash].head.u.nxt_free; |
293 | 0 | f!=&(qm->free_hash[hash].head); f=f->u.nxt_free){ |
294 | | #ifdef DBG_MALLOC |
295 | | *count+=1; /* *count++ generates a warning with gcc 2.9* -Wall */ |
296 | | #endif |
297 | 0 | if (f->size>=size){ *h=hash; return f; } |
298 | 0 | } |
299 | | /*try in a bigger bucket*/ |
300 | 0 | } |
301 | | /* not found */ |
302 | 0 | return 0; |
303 | 0 | } |
304 | | |
305 | | #include "q_malloc_dyn.h" |
306 | | |
307 | | #if !defined INLINE_ALLOC && defined DBG_MALLOC |
308 | | #undef DBG_MALLOC |
309 | | #include "q_malloc_dyn.h" |
310 | | #define DBG_MALLOC |
311 | | #endif |
312 | | |
313 | | #ifdef SHM_EXTRA_STATS |
314 | | void qm_stats_core_init(struct qm_block *qm, int core_index) |
315 | | { |
316 | | struct qm_frag *f; |
317 | | |
318 | | for (f=qm->first_frag; (char*)f<(char*)qm->last_frag_end; f=FRAG_NEXT(f)) |
319 | | if (!f->u.is_free) |
320 | | f->statistic_index = core_index; |
321 | | } |
322 | | #endif |
323 | | |
324 | | |
325 | | |
326 | | /* fills a malloc info structure with info about the block |
327 | | * if a parameter is not supported, it will be filled with 0 */ |
328 | | void qm_info(struct qm_block *qm, struct mem_info *info) |
329 | 0 | { |
330 | 0 | int r; |
331 | 0 | long total_frags; |
332 | |
|
333 | 0 | total_frags=0; |
334 | 0 | memset(info,0, sizeof(*info)); |
335 | 0 | info->total_size=qm->size; |
336 | 0 | info->min_frag=MIN_FRAG_SIZE; |
337 | 0 | info->free=qm->size-qm->real_used; |
338 | 0 | info->used=qm->used; |
339 | 0 | info->real_used=qm->real_used; |
340 | 0 | info->max_used=qm->max_real_used; |
341 | 0 | for(r=0;r<QM_HASH_SIZE; r++){ |
342 | 0 | total_frags+=qm->free_hash[r].no; |
343 | 0 | } |
344 | 0 | info->total_frags=total_frags; |
345 | 0 | } |
346 | | |
347 | | #ifdef DBG_MALLOC |
348 | | int qm_mem_check(struct qm_block *qm) |
349 | | { |
350 | | struct qm_frag *f; |
351 | | int i = 0; |
352 | | |
353 | | for (f = qm->first_frag; (char *)f < (char *)qm->last_frag_end; |
354 | | f = FRAG_NEXT(f), i++) { |
355 | | |
356 | | qm_debug_frag(qm, f); |
357 | | } |
358 | | |
359 | | LM_DBG("fragments: %d\n", i); |
360 | | |
361 | | return i; |
362 | | } |
363 | | #endif |
364 | | |
365 | | |
366 | | #endif |