/src/lvm2/libdm/mm/pool-fast.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved. |
3 | | * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. |
4 | | * |
5 | | * This file is part of the device-mapper userspace tools. |
6 | | * |
7 | | * This copyrighted material is made available to anyone wishing to use, |
8 | | * modify, copy, or redistribute it subject to the terms and conditions |
9 | | * of the GNU Lesser General Public License v.2.1. |
10 | | * |
11 | | * You should have received a copy of the GNU Lesser General Public License |
12 | | * along with this program; if not, write to the Free Software Foundation, |
13 | | * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
14 | | */ |
15 | | |
16 | | #ifdef VALGRIND_POOL |
17 | | #include <memcheck.h> |
18 | | #endif |
19 | | |
20 | | #include "base/memory/zalloc.h" |
21 | | #include "libdm/misc/dmlib.h" |
22 | | #include <stddef.h> /* For musl libc */ |
23 | | #include <malloc.h> |
24 | | |
25 | | struct chunk { |
26 | | char *begin, *end; |
27 | | struct chunk *prev; |
28 | | } __attribute__((aligned(8))); |
29 | | |
30 | | struct dm_pool { |
31 | | struct dm_list list; |
32 | | struct chunk *chunk, *spare_chunk; /* spare_chunk is a one entry free |
33 | | list to stop 'bobbling' */ |
34 | | const char *name; |
35 | | size_t chunk_size; |
36 | | size_t object_len; |
37 | | unsigned object_alignment; |
38 | | int locked; |
39 | | long crc; |
40 | | }; |
41 | | |
42 | | static void _align_chunk(struct chunk *c, unsigned alignment); |
43 | | static struct chunk *_new_chunk(struct dm_pool *p, size_t s); |
44 | | static void _free_chunk(struct chunk *c); |
45 | | |
46 | | /* by default things come out aligned for doubles */ |
47 | 0 | #define DEFAULT_ALIGNMENT __alignof__ (double) |
48 | | |
49 | | struct dm_pool *dm_pool_create(const char *name, size_t chunk_hint) |
50 | 0 | { |
51 | 0 | size_t new_size = 1024; |
52 | 0 | struct dm_pool *p = dm_zalloc(sizeof(*p)); |
53 | |
|
54 | 0 | if (!p) { |
55 | 0 | log_error("Couldn't create memory pool %s (size %" |
56 | 0 | PRIsize_t ")", name, sizeof(*p)); |
57 | 0 | return 0; |
58 | 0 | } |
59 | | |
60 | 0 | p->name = name; |
61 | | /* round chunk_hint up to the next power of 2 */ |
62 | 0 | p->chunk_size = chunk_hint + sizeof(struct chunk); |
63 | 0 | while (new_size < p->chunk_size) |
64 | 0 | new_size <<= 1; |
65 | 0 | p->chunk_size = new_size; |
66 | 0 | pthread_mutex_lock(&_dm_pools_mutex); |
67 | 0 | dm_list_add(&_dm_pools, &p->list); |
68 | 0 | pthread_mutex_unlock(&_dm_pools_mutex); |
69 | 0 | return p; |
70 | 0 | } |
71 | | |
72 | | void dm_pool_destroy(struct dm_pool *p) |
73 | 0 | { |
74 | 0 | struct chunk *c, *pr; |
75 | 0 | _free_chunk(p->spare_chunk); |
76 | 0 | c = p->chunk; |
77 | 0 | while (c) { |
78 | 0 | pr = c->prev; |
79 | 0 | _free_chunk(c); |
80 | 0 | c = pr; |
81 | 0 | } |
82 | |
|
83 | 0 | pthread_mutex_lock(&_dm_pools_mutex); |
84 | 0 | dm_list_del(&p->list); |
85 | 0 | pthread_mutex_unlock(&_dm_pools_mutex); |
86 | 0 | dm_free(p); |
87 | 0 | } |
88 | | |
89 | | void *dm_pool_alloc(struct dm_pool *p, size_t s) |
90 | 0 | { |
91 | 0 | return dm_pool_alloc_aligned(p, s, DEFAULT_ALIGNMENT); |
92 | 0 | } |
93 | | |
94 | | void *dm_pool_alloc_aligned(struct dm_pool *p, size_t s, unsigned alignment) |
95 | 0 | { |
96 | 0 | struct chunk *c = p->chunk; |
97 | 0 | void *r; |
98 | | |
99 | | /* realign begin */ |
100 | 0 | if (c) |
101 | 0 | _align_chunk(c, alignment); |
102 | | |
103 | | /* have we got room ? */ |
104 | 0 | if (!c || (c->begin > c->end) || ((c->end - c->begin) < (int) s)) { |
105 | | /* allocate new chunk */ |
106 | 0 | size_t needed = s + alignment + sizeof(struct chunk); |
107 | 0 | c = _new_chunk(p, (needed > p->chunk_size) ? |
108 | 0 | needed : p->chunk_size); |
109 | |
|
110 | 0 | if (!c) |
111 | 0 | return_NULL; |
112 | | |
113 | 0 | _align_chunk(c, alignment); |
114 | 0 | } |
115 | | |
116 | 0 | r = c->begin; |
117 | 0 | c->begin += s; |
118 | |
|
119 | | #ifdef VALGRIND_POOL |
120 | | VALGRIND_MAKE_MEM_UNDEFINED(r, s); |
121 | | #endif |
122 | |
|
123 | 0 | return r; |
124 | 0 | } |
125 | | |
126 | | void dm_pool_empty(struct dm_pool *p) |
127 | 0 | { |
128 | 0 | struct chunk *c; |
129 | |
|
130 | 0 | for (c = p->chunk; c && c->prev; c = c->prev) |
131 | 0 | ; |
132 | |
|
133 | 0 | if (c) |
134 | 0 | dm_pool_free(p, (char *) (c + 1)); |
135 | 0 | } |
136 | | |
137 | | void dm_pool_free(struct dm_pool *p, void *ptr) |
138 | 0 | { |
139 | 0 | struct chunk *c = p->chunk; |
140 | |
|
141 | 0 | while (c) { |
142 | 0 | if (((char *) c < (char *) ptr) && |
143 | 0 | ((char *) c->end > (char *) ptr)) { |
144 | 0 | c->begin = ptr; |
145 | | #ifdef VALGRIND_POOL |
146 | | VALGRIND_MAKE_MEM_NOACCESS(c->begin, c->end - c->begin); |
147 | | #endif |
148 | 0 | break; |
149 | 0 | } |
150 | | |
151 | 0 | if (p->spare_chunk) |
152 | 0 | _free_chunk(p->spare_chunk); |
153 | |
|
154 | 0 | c->begin = (char *) (c + 1); |
155 | | #ifdef VALGRIND_POOL |
156 | | VALGRIND_MAKE_MEM_NOACCESS(c->begin, c->end - c->begin); |
157 | | #endif |
158 | |
|
159 | 0 | p->spare_chunk = c; |
160 | 0 | c = c->prev; |
161 | 0 | } |
162 | |
|
163 | 0 | if (!c) |
164 | 0 | log_error(INTERNAL_ERROR "pool_free asked to free pointer " |
165 | 0 | "not in pool"); |
166 | 0 | else |
167 | 0 | p->chunk = c; |
168 | 0 | } |
169 | | |
170 | | int dm_pool_begin_object(struct dm_pool *p, size_t hint) |
171 | 0 | { |
172 | 0 | struct chunk *c = p->chunk; |
173 | 0 | const size_t align = DEFAULT_ALIGNMENT; |
174 | |
|
175 | 0 | p->object_len = 0; |
176 | 0 | p->object_alignment = align; |
177 | |
|
178 | 0 | if (c) |
179 | 0 | _align_chunk(c, align); |
180 | |
|
181 | 0 | if (!c || (c->begin > c->end) || ((c->end - c->begin) < (int) hint)) { |
182 | | /* allocate a new chunk */ |
183 | 0 | c = _new_chunk(p, |
184 | 0 | hint > (p->chunk_size - sizeof(struct chunk)) ? |
185 | 0 | hint + sizeof(struct chunk) + align : |
186 | 0 | p->chunk_size); |
187 | |
|
188 | 0 | if (!c) |
189 | 0 | return 0; |
190 | | |
191 | 0 | _align_chunk(c, align); |
192 | 0 | } |
193 | | |
194 | 0 | return 1; |
195 | 0 | } |
196 | | |
197 | | int dm_pool_grow_object(struct dm_pool *p, const void *extra, size_t delta) |
198 | 0 | { |
199 | 0 | struct chunk *c = p->chunk, *nc; |
200 | |
|
201 | 0 | if (!delta) |
202 | 0 | delta = strlen(extra); |
203 | |
|
204 | 0 | if ((c->end - (c->begin + p->object_len)) < (int) delta) { |
205 | | /* move into a new chunk */ |
206 | 0 | if (p->object_len + delta > (p->chunk_size / 2)) |
207 | 0 | nc = _new_chunk(p, (p->object_len + delta) * 2); |
208 | 0 | else |
209 | 0 | nc = _new_chunk(p, p->chunk_size); |
210 | |
|
211 | 0 | if (!nc) |
212 | 0 | return 0; |
213 | | |
214 | 0 | _align_chunk(p->chunk, p->object_alignment); |
215 | |
|
216 | | #ifdef VALGRIND_POOL |
217 | | VALGRIND_MAKE_MEM_UNDEFINED(p->chunk->begin, p->object_len); |
218 | | #endif |
219 | |
|
220 | 0 | memcpy(p->chunk->begin, c->begin, p->object_len); |
221 | |
|
222 | | #ifdef VALGRIND_POOL |
223 | | VALGRIND_MAKE_MEM_NOACCESS(c->begin, p->object_len); |
224 | | #endif |
225 | |
|
226 | 0 | c = p->chunk; |
227 | 0 | } |
228 | | |
229 | | #ifdef VALGRIND_POOL |
230 | | VALGRIND_MAKE_MEM_UNDEFINED(p->chunk->begin + p->object_len, delta); |
231 | | #endif |
232 | | |
233 | 0 | memcpy(c->begin + p->object_len, extra, delta); |
234 | 0 | p->object_len += delta; |
235 | 0 | return 1; |
236 | 0 | } |
237 | | |
238 | | void *dm_pool_end_object(struct dm_pool *p) |
239 | 0 | { |
240 | 0 | struct chunk *c = p->chunk; |
241 | 0 | void *r = c->begin; |
242 | 0 | c->begin += p->object_len; |
243 | 0 | p->object_len = 0u; |
244 | 0 | p->object_alignment = DEFAULT_ALIGNMENT; |
245 | 0 | return r; |
246 | 0 | } |
247 | | |
248 | | void dm_pool_abandon_object(struct dm_pool *p) |
249 | 0 | { |
250 | | #ifdef VALGRIND_POOL |
251 | | VALGRIND_MAKE_MEM_NOACCESS(p->chunk, p->object_len); |
252 | | #endif |
253 | 0 | p->object_len = 0; |
254 | 0 | p->object_alignment = DEFAULT_ALIGNMENT; |
255 | 0 | } |
256 | | |
257 | | static void _align_chunk(struct chunk *c, unsigned alignment) |
258 | 0 | { |
259 | 0 | c->begin += alignment - ((unsigned long) c->begin & (alignment - 1)); |
260 | 0 | } |
261 | | |
262 | | static struct chunk *_new_chunk(struct dm_pool *p, size_t s) |
263 | 0 | { |
264 | 0 | struct chunk *c; |
265 | |
|
266 | 0 | if (p->spare_chunk && |
267 | 0 | ((p->spare_chunk->end - p->spare_chunk->begin) >= (ptrdiff_t)s)) { |
268 | | /* reuse old chunk */ |
269 | 0 | c = p->spare_chunk; |
270 | 0 | p->spare_chunk = 0; |
271 | 0 | } else { |
272 | | #ifdef DEBUG_ENFORCE_POOL_LOCKING |
273 | | if (!_pagesize) { |
274 | | _pagesize = getpagesize(); /* lvm_pagesize(); */ |
275 | | _pagesize_mask = _pagesize - 1; |
276 | | } |
277 | | /* |
278 | | * Allocate page aligned size so malloc could work. |
279 | | * Otherwise page fault would happen from pool unrelated |
280 | | * memory writes of internal malloc pointers. |
281 | | */ |
282 | | # define aligned_malloc(s) (posix_memalign((void**)&c, _pagesize, \ |
283 | | ALIGN_ON_PAGE(s)) == 0) |
284 | | #else |
285 | 0 | # define aligned_malloc(s) (c = dm_malloc(s)) |
286 | 0 | #endif /* DEBUG_ENFORCE_POOL_LOCKING */ |
287 | 0 | if (!aligned_malloc(s)) { |
288 | 0 | #undef aligned_malloc |
289 | 0 | log_error("Out of memory. Requested %" PRIsize_t |
290 | 0 | " bytes.", s); |
291 | 0 | return NULL; |
292 | 0 | } |
293 | | |
294 | 0 | c->begin = (char *) (c + 1); |
295 | 0 | c->end = (char *) c + s; |
296 | |
|
297 | | #ifdef VALGRIND_POOL |
298 | | VALGRIND_MAKE_MEM_NOACCESS(c->begin, c->end - c->begin); |
299 | | #endif |
300 | 0 | } |
301 | | |
302 | 0 | c->prev = p->chunk; |
303 | 0 | p->chunk = c; |
304 | 0 | return c; |
305 | 0 | } |
306 | | |
307 | | static void _free_chunk(struct chunk *c) |
308 | 0 | { |
309 | | #ifdef VALGRIND_POOL |
310 | | # ifdef DEBUG_MEM |
311 | | if (c) |
312 | | VALGRIND_MAKE_MEM_UNDEFINED(c + 1, c->end - (char *) (c + 1)); |
313 | | # endif |
314 | | #endif |
315 | | #ifdef DEBUG_ENFORCE_POOL_LOCKING |
316 | | /* since DEBUG_MEM is using own memory list */ |
317 | | free(c); /* for posix_memalign() */ |
318 | | #else |
319 | 0 | dm_free(c); |
320 | 0 | #endif |
321 | 0 | } |
322 | | |
323 | | |
324 | | /** |
325 | | * Calc crc/hash from pool's memory chunks with internal pointers |
326 | | */ |
327 | | static long _pool_crc(const struct dm_pool *p) |
328 | 0 | { |
329 | 0 | long crc_hash = 0; |
330 | 0 | #ifndef DEBUG_ENFORCE_POOL_LOCKING |
331 | 0 | const struct chunk *c; |
332 | 0 | const long *ptr, *end; |
333 | |
|
334 | 0 | for (c = p->chunk; c; c = c->prev) { |
335 | 0 | end = (const long *) (c->begin < c->end ? (long) c->begin & ~7: (long) c->end); |
336 | 0 | ptr = (const long *) c; |
337 | | #ifdef VALGRIND_POOL |
338 | | VALGRIND_MAKE_MEM_DEFINED(ptr, (end - ptr) * sizeof(*end)); |
339 | | #endif |
340 | 0 | while (ptr < end) { |
341 | 0 | crc_hash += *ptr++; |
342 | 0 | crc_hash += (crc_hash << 10); |
343 | 0 | crc_hash ^= (crc_hash >> 6); |
344 | 0 | } |
345 | 0 | } |
346 | 0 | #endif /* DEBUG_ENFORCE_POOL_LOCKING */ |
347 | |
|
348 | 0 | return crc_hash; |
349 | 0 | } |
350 | | |
351 | | static int _pool_protect(struct dm_pool *p, int prot) |
352 | 0 | { |
353 | | #ifdef DEBUG_ENFORCE_POOL_LOCKING |
354 | | struct chunk *c; |
355 | | |
356 | | for (c = p->chunk; c; c = c->prev) { |
357 | | if (mprotect(c, (size_t) ((c->end - (char *) c) - 1), prot) != 0) { |
358 | | log_sys_error("mprotect", ""); |
359 | | return 0; |
360 | | } |
361 | | } |
362 | | #endif |
363 | 0 | return 1; |
364 | 0 | } |