/src/h2o/lib/common/memory.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2014 DeNA Co., Ltd. |
3 | | * |
4 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | | * of this software and associated documentation files (the "Software"), to |
6 | | * deal in the Software without restriction, including without limitation the |
7 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
8 | | * sell copies of the Software, and to permit persons to whom the Software is |
9 | | * furnished to do so, subject to the following conditions: |
10 | | * |
11 | | * The above copyright notice and this permission notice shall be included in |
12 | | * all copies or substantial portions of the Software. |
13 | | * |
14 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | | * IN THE SOFTWARE. |
21 | | */ |
22 | | #include <assert.h> |
23 | | #include <errno.h> |
24 | | #include <fcntl.h> |
25 | | #include <stddef.h> |
26 | | #include <stdio.h> |
27 | | #include <stdint.h> |
28 | | #include <stdlib.h> |
29 | | #include <string.h> |
30 | | #include <stdarg.h> |
31 | | #include <sys/mman.h> |
32 | | #include <unistd.h> |
33 | | #include "h2o/memory.h" |
34 | | #include "h2o/file.h" |
35 | | |
36 | | #if defined(__linux__) |
37 | | #if defined(__ANDROID__) && (__ANDROID_API__ < 21) |
38 | | #define USE_POSIX_FALLOCATE 0 |
39 | | #else |
40 | | #define USE_POSIX_FALLOCATE 1 |
41 | | #endif |
42 | | #elif __FreeBSD__ >= 9 |
43 | | #define USE_POSIX_FALLOCATE 1 |
44 | | #elif __NetBSD__ >= 7 |
45 | | #define USE_POSIX_FALLOCATE 1 |
46 | | #else |
47 | | #define USE_POSIX_FALLOCATE 0 |
48 | | #endif |
49 | | |
50 | | #if defined(__clang__) |
51 | | #if __has_feature(address_sanitizer) |
52 | | #define ASAN_IN_USE 1 |
53 | | #endif |
54 | | #elif __SANITIZE_ADDRESS__ /* gcc */ |
55 | | #define ASAN_IN_USE 1 |
56 | | #else |
57 | | #define ASAN_IN_USE 0 |
58 | | #endif |
59 | | |
60 | | struct st_h2o_mem_recycle_chunk_t { |
61 | | struct st_h2o_mem_recycle_chunk_t *next; |
62 | | }; |
63 | | |
64 | | union un_h2o_mem_pool_chunk_t { |
65 | | union un_h2o_mem_pool_chunk_t *next; |
66 | | char bytes[4096]; |
67 | | }; |
68 | | |
69 | | struct st_h2o_mem_pool_direct_t { |
70 | | struct st_h2o_mem_pool_direct_t *next; |
71 | | size_t _dummy; /* align to 2*sizeof(void*) */ |
72 | | char bytes[1]; |
73 | | }; |
74 | | |
75 | | struct st_h2o_mem_pool_shared_ref_t { |
76 | | struct st_h2o_mem_pool_shared_ref_t *next; |
77 | | struct st_h2o_mem_pool_shared_entry_t *entry; |
78 | | }; |
79 | | |
80 | | void *(*volatile h2o_mem__set_secure)(void *, int, size_t) = memset; |
81 | | |
82 | | static const h2o_mem_recycle_conf_t mem_pool_allocator_conf = {.memsize = sizeof(union un_h2o_mem_pool_chunk_t)}; |
83 | | __thread h2o_mem_recycle_t h2o_mem_pool_allocator = {&mem_pool_allocator_conf}; |
84 | | size_t h2o_mmap_errors = 0; |
85 | | |
86 | | static H2O_NORETURN void default_h2o_fatal(const char *file, int line, const char *msg, ...) |
87 | 0 | { |
88 | 0 | char buf[1024]; |
89 | 0 | va_list args; |
90 | |
|
91 | 0 | va_start(args, msg); |
92 | 0 | vsnprintf(buf, sizeof(buf), msg, args); |
93 | 0 | va_end(args); |
94 | |
|
95 | 0 | h2o_error_printf("fatal:%s:%d:%s\n", file, line, buf); |
96 | |
|
97 | 0 | abort(); |
98 | 0 | } |
99 | | |
100 | | H2O_NORETURN void (*h2o__fatal)(const char *, int, const char *, ...) = default_h2o_fatal; |
101 | | |
102 | | void *h2o_mem_alloc_recycle(h2o_mem_recycle_t *allocator) |
103 | 30.0k | { |
104 | 30.0k | if (allocator->chunks.size == 0) |
105 | 120 | return h2o_mem_aligned_alloc(1 << allocator->conf->align_bits, allocator->conf->memsize); |
106 | | |
107 | | /* detach and return the pooled pointer */ |
108 | 29.8k | void *p = allocator->chunks.entries[--allocator->chunks.size]; |
109 | | |
110 | | /* adjust low watermark */ |
111 | 29.8k | if (allocator->low_watermark > allocator->chunks.size) |
112 | 0 | allocator->low_watermark = allocator->chunks.size; |
113 | | |
114 | 29.8k | return p; |
115 | 30.0k | } |
116 | | |
117 | | void h2o_mem_free_recycle(h2o_mem_recycle_t *allocator, void *p) |
118 | 30.0k | { |
119 | 30.0k | #if !ASAN_IN_USE |
120 | | /* register the pointer to the pool and return unless the pool is full */ |
121 | 30.0k | h2o_vector_reserve(NULL, &allocator->chunks, allocator->chunks.size + 1); |
122 | 30.0k | allocator->chunks.entries[allocator->chunks.size++] = p; |
123 | | #else |
124 | | free(p); |
125 | | #endif |
126 | 30.0k | } |
127 | | |
128 | | void h2o_mem_clear_recycle(h2o_mem_recycle_t *allocator, int full) |
129 | 0 | { |
130 | | /* Bail out if the allocator is in the initial (cleared) state. */ |
131 | 0 | if (allocator->chunks.capacity == 0) |
132 | 0 | return; |
133 | | |
134 | 0 | if (full) { |
135 | 0 | allocator->low_watermark = 0; |
136 | 0 | } else { |
137 | | /* Since the last invocation of `h2o_mem_clear_recycle`, at any given point, there was at least `low_watermark` buffers |
138 | | * being cached for reuse. Release half of them. Division by 2 is rounded up so that `low_watermark` eventually reaches zero |
139 | | * (instead of one) when there is no traffic. */ |
140 | 0 | size_t delta = (allocator->low_watermark + 1) / 2; |
141 | 0 | assert(allocator->chunks.size >= delta); |
142 | 0 | allocator->low_watermark = allocator->chunks.size - delta; |
143 | 0 | } |
144 | | |
145 | 0 | while (allocator->chunks.size > allocator->low_watermark) |
146 | 0 | free(allocator->chunks.entries[--allocator->chunks.size]); |
147 | |
|
148 | 0 | if (allocator->chunks.size == 0) { |
149 | 0 | free(allocator->chunks.entries); |
150 | 0 | memset(&allocator->chunks, 0, sizeof(allocator->chunks)); |
151 | 0 | } |
152 | 0 | } |
153 | | |
154 | | void h2o_mem_init_pool(h2o_mem_pool_t *pool) |
155 | 21.7k | { |
156 | 21.7k | pool->chunks = NULL; |
157 | 21.7k | pool->chunk_offset = sizeof(pool->chunks->bytes); |
158 | 21.7k | pool->directs = NULL; |
159 | 21.7k | pool->shared_refs = NULL; |
160 | 21.7k | } |
161 | | |
162 | | void h2o_mem_clear_pool(h2o_mem_pool_t *pool) |
163 | 21.7k | { |
164 | | /* release the refcounted chunks */ |
165 | 21.7k | if (pool->shared_refs != NULL) { |
166 | 6.78k | struct st_h2o_mem_pool_shared_ref_t *ref = pool->shared_refs; |
167 | 295k | do { |
168 | 295k | h2o_mem_release_shared(ref->entry->bytes); |
169 | 295k | } while ((ref = ref->next) != NULL); |
170 | 6.78k | pool->shared_refs = NULL; |
171 | 6.78k | } |
172 | | /* release the direct chunks */ |
173 | 21.7k | if (pool->directs != NULL) { |
174 | 1.35k | struct st_h2o_mem_pool_direct_t *direct = pool->directs, *next; |
175 | 3.71k | do { |
176 | 3.71k | next = direct->next; |
177 | 3.71k | free(direct); |
178 | 3.71k | } while ((direct = next) != NULL); |
179 | 1.35k | pool->directs = NULL; |
180 | 1.35k | } |
181 | | /* free chunks, and reset the first chunk */ |
182 | 29.8k | while (pool->chunks != NULL) { |
183 | 8.05k | union un_h2o_mem_pool_chunk_t *next = pool->chunks->next; |
184 | 8.05k | h2o_mem_free_recycle(&h2o_mem_pool_allocator, pool->chunks); |
185 | 8.05k | pool->chunks = next; |
186 | 8.05k | } |
187 | 21.7k | pool->chunk_offset = sizeof(pool->chunks->bytes); |
188 | 21.7k | } |
189 | | |
190 | | void *h2o_mem__do_alloc_pool_aligned(h2o_mem_pool_t *pool, size_t alignment, size_t sz) |
191 | 433k | { |
192 | 437k | #define ALIGN_TO(x, a) (((x) + (a) - 1) & ~((a) - 1)) |
193 | 433k | void *ret; |
194 | | |
195 | 433k | if (sz >= (sizeof(pool->chunks->bytes) - sizeof(pool->chunks->next)) / 4) { |
196 | | /* allocate large requests directly */ |
197 | 3.71k | struct st_h2o_mem_pool_direct_t *newp = h2o_mem_alloc(offsetof(struct st_h2o_mem_pool_direct_t, bytes) + sz); |
198 | 3.71k | newp->next = pool->directs; |
199 | 3.71k | pool->directs = newp; |
200 | 3.71k | return newp->bytes; |
201 | 3.71k | } |
202 | | |
203 | | /* return a valid pointer even for 0 sized allocs */ |
204 | 429k | if (H2O_UNLIKELY(sz == 0)) |
205 | 0 | sz = 1; |
206 | | |
207 | 429k | pool->chunk_offset = ALIGN_TO(pool->chunk_offset, alignment); |
208 | 429k | if (sizeof(pool->chunks->bytes) - pool->chunk_offset < sz) { |
209 | | /* allocate new chunk */ |
210 | 8.05k | union un_h2o_mem_pool_chunk_t *newp = h2o_mem_alloc_recycle(&h2o_mem_pool_allocator); |
211 | 8.05k | newp->next = pool->chunks; |
212 | 8.05k | pool->chunks = newp; |
213 | 8.05k | pool->chunk_offset = ALIGN_TO(sizeof(newp->next), alignment); |
214 | 8.05k | } |
215 | | |
216 | 429k | ret = pool->chunks->bytes + pool->chunk_offset; |
217 | 429k | pool->chunk_offset += sz; |
218 | 429k | return ret; |
219 | 433k | #undef ALIGN_TO |
220 | 433k | } |
221 | | |
222 | | static void link_shared(h2o_mem_pool_t *pool, struct st_h2o_mem_pool_shared_entry_t *entry) |
223 | 295k | { |
224 | 295k | struct st_h2o_mem_pool_shared_ref_t *ref = h2o_mem_alloc_pool(pool, *ref, 1); |
225 | 295k | ref->entry = entry; |
226 | 295k | ref->next = pool->shared_refs; |
227 | 295k | pool->shared_refs = ref; |
228 | 295k | } |
229 | | |
230 | | void *h2o_mem_alloc_shared(h2o_mem_pool_t *pool, size_t sz, void (*dispose)(void *)) |
231 | 116k | { |
232 | 116k | struct st_h2o_mem_pool_shared_entry_t *entry = h2o_mem_alloc(offsetof(struct st_h2o_mem_pool_shared_entry_t, bytes) + sz); |
233 | 116k | entry->refcnt = 1; |
234 | 116k | entry->dispose = dispose; |
235 | 116k | if (pool != NULL) |
236 | 112k | link_shared(pool, entry); |
237 | 116k | return entry->bytes; |
238 | 116k | } |
239 | | |
240 | | void h2o_mem_link_shared(h2o_mem_pool_t *pool, void *p) |
241 | 183k | { |
242 | 183k | h2o_mem_addref_shared(p); |
243 | 183k | link_shared(pool, H2O_STRUCT_FROM_MEMBER(struct st_h2o_mem_pool_shared_entry_t, bytes, p)); |
244 | 183k | } |
245 | | |
246 | | static size_t topagesize(size_t capacity) |
247 | 0 | { |
248 | 0 | size_t pagesize = getpagesize(); |
249 | 0 | return (offsetof(h2o_buffer_t, _buf) + capacity + pagesize - 1) / pagesize * pagesize; |
250 | 0 | } |
251 | | |
252 | | /** |
253 | | * size of the smallest bin is 4096 bytes (1<<12) |
254 | | */ |
255 | 86.2k | #define H2O_BUFFER_MIN_ALLOC_POWER 12 |
256 | | |
257 | | static const h2o_mem_recycle_conf_t buffer_recycle_bins_zero_sized_conf = {.memsize = sizeof(h2o_buffer_t)}; |
258 | | /** |
259 | | * Retains recycle bins for `h2o_buffer_t`. |
260 | | */ |
261 | | static __thread struct { |
262 | | /** |
263 | | * Holds recycle bins for `h2o_buffer_t`. Bin for capacity 2^x is located at x - H2O_BUFFER_MIN_ALLOC_POWER. |
264 | | */ |
265 | | struct buffer_recycle_bin_t { |
266 | | h2o_mem_recycle_conf_t conf; |
267 | | h2o_mem_recycle_t recycle; |
268 | | } *bins; |
269 | | /** |
270 | | * Bins for capacicties no greater than this value exist. |
271 | | */ |
272 | | size_t largest_power; |
273 | | /** |
274 | | * Bin containing chunks of sizeof(h2o_buffer_t). This is used by empties buffers to retain the previous capacity. |
275 | | */ |
276 | | h2o_mem_recycle_t zero_sized; |
277 | | } buffer_recycle_bins = {NULL, H2O_BUFFER_MIN_ALLOC_POWER - 1, {&buffer_recycle_bins_zero_sized_conf}}; |
278 | | |
279 | | static unsigned buffer_size_to_power(size_t sz) |
280 | 43.1k | { |
281 | 43.1k | assert(sz != 0); |
282 | | |
283 | 43.1k | unsigned power = sizeof(unsigned long long) * 8 - __builtin_clzll(sz) - 1; |
284 | 43.1k | if (power < H2O_BUFFER_MIN_ALLOC_POWER) { |
285 | 0 | power = H2O_BUFFER_MIN_ALLOC_POWER; |
286 | 43.1k | } else if (sz != (1 << power)) { |
287 | 21.5k | ++power; |
288 | 21.5k | } |
289 | 43.1k | return power; |
290 | 43.1k | } |
291 | | |
292 | | void h2o_buffer_clear_recycle(int full) |
293 | 0 | { |
294 | 0 | for (unsigned i = H2O_BUFFER_MIN_ALLOC_POWER; i <= buffer_recycle_bins.largest_power; ++i) |
295 | 0 | h2o_mem_clear_recycle(&buffer_recycle_bins.bins[i - H2O_BUFFER_MIN_ALLOC_POWER].recycle, full); |
296 | |
|
297 | 0 | if (full) { |
298 | 0 | free(buffer_recycle_bins.bins); |
299 | 0 | buffer_recycle_bins.bins = NULL; |
300 | 0 | buffer_recycle_bins.largest_power = H2O_BUFFER_MIN_ALLOC_POWER - 1; |
301 | 0 | } |
302 | |
|
303 | 0 | h2o_mem_clear_recycle(&buffer_recycle_bins.zero_sized, full); |
304 | 0 | } |
305 | | |
306 | | int h2o_buffer_recycle_is_empty(void) |
307 | 0 | { |
308 | 0 | for (unsigned i = H2O_BUFFER_MIN_ALLOC_POWER; i <= buffer_recycle_bins.largest_power; ++i) { |
309 | 0 | if (!h2o_mem_recycle_is_empty(&buffer_recycle_bins.bins[i - H2O_BUFFER_MIN_ALLOC_POWER].recycle)) |
310 | 0 | return 0; |
311 | 0 | } |
312 | 0 | if (!h2o_mem_recycle_is_empty(&buffer_recycle_bins.zero_sized)) |
313 | 0 | return 0; |
314 | 0 | return 1; |
315 | 0 | } |
316 | | |
317 | | static h2o_mem_recycle_t *buffer_get_recycle(unsigned power, int only_if_exists) |
318 | 43.1k | { |
319 | 43.1k | if (power > buffer_recycle_bins.largest_power) { |
320 | 2 | if (only_if_exists) |
321 | 0 | return NULL; |
322 | 2 | buffer_recycle_bins.bins = |
323 | 2 | h2o_mem_realloc(buffer_recycle_bins.bins, sizeof(*buffer_recycle_bins.bins) * (power - H2O_BUFFER_MIN_ALLOC_POWER + 1)); |
324 | 4 | for (size_t p = H2O_BUFFER_MIN_ALLOC_POWER; p <= buffer_recycle_bins.largest_power; ++p) { |
325 | 2 | struct buffer_recycle_bin_t *bin = buffer_recycle_bins.bins + p - H2O_BUFFER_MIN_ALLOC_POWER; |
326 | 2 | bin->recycle.conf = &bin->conf; |
327 | 2 | } |
328 | 6 | do { |
329 | 6 | ++buffer_recycle_bins.largest_power; |
330 | 6 | struct buffer_recycle_bin_t *newbin = |
331 | 6 | buffer_recycle_bins.bins + buffer_recycle_bins.largest_power - H2O_BUFFER_MIN_ALLOC_POWER; |
332 | 6 | newbin->conf = (h2o_mem_recycle_conf_t){.memsize = (size_t)1 << buffer_recycle_bins.largest_power}; |
333 | 6 | newbin->recycle = (h2o_mem_recycle_t){&newbin->conf}; |
334 | 6 | } while (buffer_recycle_bins.largest_power < power); |
335 | 2 | } |
336 | | |
337 | 43.1k | return &buffer_recycle_bins.bins[power - H2O_BUFFER_MIN_ALLOC_POWER].recycle; |
338 | 43.1k | } |
339 | | |
340 | | static void buffer_init(h2o_buffer_t *buf, size_t size, char *bytes, size_t capacity, h2o_buffer_prototype_t *prototype, int fd) |
341 | 21.9k | { |
342 | 21.9k | buf->size = size; |
343 | 21.9k | buf->bytes = bytes; |
344 | 21.9k | buf->capacity = capacity; |
345 | 21.9k | buf->_prototype = prototype; |
346 | 21.9k | buf->_fd = fd; |
347 | 21.9k | } |
348 | | |
349 | | void h2o_buffer__do_free(h2o_buffer_t *buffer) |
350 | 21.9k | { |
351 | 21.9k | assert(buffer->_prototype != NULL); |
352 | | |
353 | 21.9k | if (buffer->_fd != -1) { |
354 | 0 | close(buffer->_fd); |
355 | 0 | munmap((void *)buffer, topagesize(buffer->capacity)); |
356 | 21.9k | } else { |
357 | 21.9k | h2o_mem_recycle_t *allocator; |
358 | 21.9k | if (buffer->bytes == NULL) { |
359 | 395 | allocator = &buffer_recycle_bins.zero_sized; |
360 | 21.5k | } else { |
361 | 21.5k | unsigned power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + buffer->capacity); |
362 | 21.5k | assert(((size_t)1 << power) == offsetof(h2o_buffer_t, _buf) + buffer->capacity); |
363 | 21.5k | allocator = buffer_get_recycle(power, 0); |
364 | 21.5k | assert(allocator != NULL); |
365 | 21.5k | } |
366 | 21.9k | h2o_mem_free_recycle(allocator, buffer); |
367 | 21.9k | } |
368 | 21.9k | } |
369 | | |
370 | | h2o_iovec_t h2o_buffer_reserve(h2o_buffer_t **_inbuf, size_t min_guarantee) |
371 | 45.4k | { |
372 | 45.4k | h2o_iovec_t reserved = h2o_buffer_try_reserve(_inbuf, min_guarantee); |
373 | 45.4k | if (reserved.base == NULL) { |
374 | 0 | h2o_fatal("failed to reserve buffer; capacity: %zu, min_guarantee: %zu", (*_inbuf)->capacity, min_guarantee); |
375 | 0 | } |
376 | 45.4k | return reserved; |
377 | 45.4k | } |
378 | | |
379 | | static h2o_buffer_t *buffer_allocate(h2o_buffer_prototype_t *prototype, size_t min_capacity, size_t desired_capacity) |
380 | 21.4k | { |
381 | 21.4k | h2o_buffer_t *newp; |
382 | 21.4k | unsigned alloc_power; |
383 | | |
384 | | /* normalize */ |
385 | 21.4k | if (min_capacity < prototype->_initial_buf.capacity) |
386 | 10.5k | min_capacity = prototype->_initial_buf.capacity; |
387 | | |
388 | | /* try to allocate at first using `desired_capacity`, otherwise bail out to AllocNormal */ |
389 | 21.4k | if (desired_capacity <= min_capacity) |
390 | 21.4k | goto AllocNormal; |
391 | 0 | alloc_power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + desired_capacity); |
392 | 0 | h2o_mem_recycle_t *allocator = buffer_get_recycle(alloc_power, 1); |
393 | 0 | if (allocator == NULL || allocator->chunks.size == 0) |
394 | 0 | goto AllocNormal; |
395 | 0 | assert(allocator->conf->memsize == (size_t)1 << alloc_power); |
396 | 0 | newp = h2o_mem_alloc_recycle(allocator); |
397 | 0 | goto AllocDone; |
398 | | |
399 | 21.4k | AllocNormal: |
400 | | /* allocate using `min_capacity` */ |
401 | 21.4k | alloc_power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + min_capacity); |
402 | 21.4k | newp = h2o_mem_alloc_recycle(buffer_get_recycle(alloc_power, 0)); |
403 | | |
404 | 21.4k | AllocDone: |
405 | 21.4k | buffer_init(newp, 0, newp->_buf, ((size_t)1 << alloc_power) - offsetof(h2o_buffer_t, _buf), prototype, -1); |
406 | 21.4k | return newp; |
407 | 21.4k | } |
408 | | |
409 | | h2o_iovec_t h2o_buffer_try_reserve(h2o_buffer_t **_inbuf, size_t min_guarantee) |
410 | 60.0k | { |
411 | 60.0k | h2o_buffer_t *inbuf = *_inbuf; |
412 | 60.0k | h2o_iovec_t ret; |
413 | | |
414 | 60.0k | if (inbuf->bytes == NULL) { |
415 | 21.4k | h2o_buffer_prototype_t *prototype; |
416 | 21.4k | size_t desired_capacity; |
417 | 21.4k | if (inbuf->_prototype == NULL) { |
418 | 21.4k | prototype = H2O_STRUCT_FROM_MEMBER(h2o_buffer_prototype_t, _initial_buf, inbuf); |
419 | 21.4k | desired_capacity = 0; |
420 | 21.4k | } else { |
421 | 0 | prototype = inbuf->_prototype; |
422 | 0 | desired_capacity = inbuf->capacity; |
423 | 0 | h2o_mem_free_recycle(&buffer_recycle_bins.zero_sized, inbuf); |
424 | 0 | } |
425 | 21.4k | inbuf = buffer_allocate(prototype, min_guarantee, desired_capacity); |
426 | 21.4k | *_inbuf = inbuf; |
427 | 38.6k | } else { |
428 | 38.6k | if (min_guarantee <= inbuf->capacity - inbuf->size - (inbuf->bytes - inbuf->_buf)) { |
429 | | /* ok */ |
430 | 38.5k | } else if ((inbuf->size + min_guarantee) * 2 <= inbuf->capacity) { |
431 | | /* the capacity should be less than or equal to 2 times of: size + guarantee */ |
432 | 1 | memmove(inbuf->_buf, inbuf->bytes, inbuf->size); |
433 | 1 | inbuf->bytes = inbuf->_buf; |
434 | 137 | } else { |
435 | 137 | size_t new_capacity = inbuf->capacity; |
436 | 137 | do { |
437 | 137 | new_capacity *= 2; |
438 | 137 | } while (new_capacity - inbuf->size < min_guarantee); |
439 | 137 | if (inbuf->_prototype->mmap_settings != NULL && inbuf->_prototype->mmap_settings->threshold <= new_capacity) { |
440 | 0 | size_t new_allocsize = topagesize(new_capacity); |
441 | 0 | int fd; |
442 | 0 | h2o_buffer_t *newp; |
443 | 0 | if (inbuf->_fd == -1) { |
444 | 0 | if ((fd = h2o_file_mktemp(inbuf->_prototype->mmap_settings->fn_template)) == -1) { |
445 | 0 | h2o_perror("failed to create temporary file"); |
446 | 0 | goto MapError; |
447 | 0 | } |
448 | 0 | } else { |
449 | 0 | fd = inbuf->_fd; |
450 | 0 | } |
451 | 0 | int fallocate_ret; |
452 | 0 | #if USE_POSIX_FALLOCATE |
453 | 0 | fallocate_ret = posix_fallocate(fd, 0, new_allocsize); |
454 | 0 | if (fallocate_ret != EINVAL) { |
455 | 0 | errno = fallocate_ret; |
456 | 0 | } else |
457 | 0 | #endif |
458 | 0 | fallocate_ret = ftruncate(fd, new_allocsize); |
459 | 0 | if (fallocate_ret != 0) { |
460 | 0 | h2o_perror("failed to resize temporary file"); |
461 | 0 | goto MapError; |
462 | 0 | } |
463 | 0 | if ((newp = (void *)mmap(NULL, new_allocsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)) == MAP_FAILED) { |
464 | 0 | h2o_perror("mmap failed"); |
465 | 0 | goto MapError; |
466 | 0 | } |
467 | 0 | if (inbuf->_fd == -1) { |
468 | | /* copy data (moving from malloc to mmap) */ |
469 | 0 | buffer_init(newp, inbuf->size, newp->_buf, new_capacity, inbuf->_prototype, fd); |
470 | 0 | memcpy(newp->_buf, inbuf->bytes, inbuf->size); |
471 | 0 | h2o_buffer__do_free(inbuf); |
472 | 0 | *_inbuf = inbuf = newp; |
473 | 0 | } else { |
474 | | /* munmap */ |
475 | 0 | size_t offset = inbuf->bytes - inbuf->_buf; |
476 | 0 | munmap((void *)inbuf, topagesize(inbuf->capacity)); |
477 | 0 | *_inbuf = inbuf = newp; |
478 | 0 | inbuf->capacity = new_capacity; |
479 | 0 | inbuf->bytes = newp->_buf + offset; |
480 | 0 | } |
481 | 137 | } else { |
482 | 137 | unsigned alloc_power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + new_capacity); |
483 | 137 | new_capacity = ((size_t)1 << alloc_power) - offsetof(h2o_buffer_t, _buf); |
484 | 137 | h2o_buffer_t *newp = h2o_mem_alloc_recycle(buffer_get_recycle(alloc_power, 0)); |
485 | 137 | buffer_init(newp, inbuf->size, newp->_buf, new_capacity, inbuf->_prototype, -1); |
486 | 137 | memcpy(newp->_buf, inbuf->bytes, inbuf->size); |
487 | 137 | h2o_buffer__do_free(inbuf); |
488 | 137 | *_inbuf = inbuf = newp; |
489 | 137 | } |
490 | 137 | } |
491 | 38.6k | } |
492 | | |
493 | 60.0k | ret.base = inbuf->bytes + inbuf->size; |
494 | 60.0k | ret.len = inbuf->_buf + inbuf->capacity - ret.base; |
495 | | |
496 | 60.0k | return ret; |
497 | | |
498 | 0 | MapError: |
499 | 0 | __sync_add_and_fetch(&h2o_mmap_errors, 1); |
500 | 0 | ret.base = NULL; |
501 | 0 | ret.len = 0; |
502 | 0 | return ret; |
503 | 60.0k | } |
504 | | |
505 | | void h2o_buffer_consume(h2o_buffer_t **inbuf, size_t delta) |
506 | 60.1k | { |
507 | 60.1k | if (delta != 0) { |
508 | 60.1k | if ((*inbuf)->size == delta) { |
509 | 3.31k | h2o_buffer_consume_all(inbuf, 0); |
510 | 56.8k | } else { |
511 | 56.8k | assert((*inbuf)->bytes != NULL); |
512 | 56.8k | (*inbuf)->size -= delta; |
513 | 56.8k | (*inbuf)->bytes += delta; |
514 | 56.8k | } |
515 | 60.1k | } |
516 | 60.1k | } |
517 | | |
518 | | void h2o_buffer_consume_all(h2o_buffer_t **inbuf, int record_capacity) |
519 | 3.71k | { |
520 | 3.71k | if ((*inbuf)->size != 0) { |
521 | 3.71k | if (record_capacity) { |
522 | 395 | h2o_buffer_t *newp = h2o_mem_alloc_recycle(&buffer_recycle_bins.zero_sized); |
523 | 395 | buffer_init(newp, 0, NULL, (*inbuf)->capacity, (*inbuf)->_prototype, -1); |
524 | 395 | h2o_buffer__do_free(*inbuf); |
525 | 395 | *inbuf = newp; |
526 | 3.31k | } else { |
527 | 3.31k | h2o_buffer_t *prototype_buf = &(*inbuf)->_prototype->_initial_buf; |
528 | 3.31k | h2o_buffer__do_free(*inbuf); |
529 | 3.31k | *inbuf = prototype_buf; |
530 | 3.31k | } |
531 | 3.71k | } |
532 | 3.71k | } |
533 | | |
534 | | void h2o_buffer__dispose_linked(void *p) |
535 | 0 | { |
536 | 0 | h2o_buffer_t **buf = p; |
537 | 0 | h2o_buffer_dispose(buf); |
538 | 0 | } |
539 | | |
540 | | void h2o_vector__expand(h2o_mem_pool_t *pool, h2o_vector_t *vector, size_t alignment, size_t element_size, size_t new_capacity) |
541 | 216k | { |
542 | 216k | void *new_entries; |
543 | 216k | assert(vector->capacity < new_capacity); |
544 | 216k | if (vector->capacity == 0) |
545 | 77.8k | vector->capacity = 4; |
546 | 355k | while (vector->capacity < new_capacity) |
547 | 138k | vector->capacity *= 2; |
548 | 216k | if (pool != NULL) { |
549 | 21.1k | new_entries = h2o_mem_alloc_pool_aligned(pool, alignment, element_size * vector->capacity); |
550 | 21.1k | h2o_memcpy(new_entries, vector->entries, element_size * vector->size); |
551 | 195k | } else { |
552 | 195k | new_entries = h2o_mem_realloc(vector->entries, element_size * vector->capacity); |
553 | 195k | } |
554 | 216k | vector->entries = new_entries; |
555 | 216k | } |
556 | | |
557 | | void h2o_mem_swap(void *_x, void *_y, size_t len) |
558 | 0 | { |
559 | 0 | char *x = _x, *y = _y; |
560 | 0 | char buf[256]; |
561 | |
|
562 | 0 | while (len != 0) { |
563 | 0 | size_t blocksz = len < sizeof(buf) ? len : sizeof(buf); |
564 | 0 | memcpy(buf, x, blocksz); |
565 | 0 | memcpy(x, y, blocksz); |
566 | 0 | memcpy(y, buf, blocksz); |
567 | 0 | len -= blocksz; |
568 | 0 | x += blocksz; |
569 | 0 | y += blocksz; |
570 | 0 | } |
571 | 0 | } |
572 | | |
573 | | void h2o_dump_memory(FILE *fp, const char *buf, size_t len) |
574 | 0 | { |
575 | 0 | size_t i, j; |
576 | |
|
577 | 0 | for (i = 0; i < len; i += 16) { |
578 | 0 | fprintf(fp, "%08zx", i); |
579 | 0 | for (j = 0; j != 16; ++j) { |
580 | 0 | if (i + j < len) |
581 | 0 | fprintf(fp, " %02x", (int)(unsigned char)buf[i + j]); |
582 | 0 | else |
583 | 0 | fprintf(fp, " "); |
584 | 0 | } |
585 | 0 | fprintf(fp, " "); |
586 | 0 | for (j = 0; j != 16 && i + j < len; ++j) { |
587 | 0 | int ch = buf[i + j]; |
588 | 0 | fputc(' ' <= ch && ch < 0x7f ? ch : '.', fp); |
589 | 0 | } |
590 | 0 | fprintf(fp, "\n"); |
591 | 0 | } |
592 | 0 | } |
593 | | |
594 | | void h2o_append_to_null_terminated_list(void ***list, void *element) |
595 | 1 | { |
596 | 1 | size_t cnt; |
597 | | |
598 | 1 | for (cnt = 0; (*list)[cnt] != NULL; ++cnt) |
599 | 0 | ; |
600 | 1 | *list = h2o_mem_realloc(*list, (cnt + 2) * sizeof(void *)); |
601 | 1 | (*list)[cnt++] = element; |
602 | 1 | (*list)[cnt] = NULL; |
603 | 1 | } |
604 | | |
605 | | char *h2o_strerror_r(int err, char *buf, size_t len) |
606 | 0 | { |
607 | | #if !(defined(_GNU_SOURCE) && defined(__gnu_linux__)) |
608 | | strerror_r(err, buf, len); |
609 | | return buf; |
610 | | #else |
611 | | /** |
612 | | * The GNU-specific strerror_r() returns a pointer to a string containing the error message. |
613 | | * This may be either a pointer to a string that the function stores in buf, |
614 | | * or a pointer to some (immutable) static string (in which case buf is unused) |
615 | | */ |
616 | 0 | return strerror_r(err, buf, len); |
617 | 0 | #endif |
618 | 0 | } |
619 | | |
620 | | void h2o_perror(const char *msg) |
621 | 0 | { |
622 | 0 | char buf[128]; |
623 | |
|
624 | 0 | h2o_error_printf("%s: %s\n", msg, h2o_strerror_r(errno, buf, sizeof(buf))); |
625 | 0 | } |