Coverage Report

Created: 2024-02-25 06:15

/src/h2o/lib/common/memory.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2014 DeNA Co., Ltd.
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a copy
5
 * of this software and associated documentation files (the "Software"), to
6
 * deal in the Software without restriction, including without limitation the
7
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8
 * sell copies of the Software, and to permit persons to whom the Software is
9
 * furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice shall be included in
12
 * all copies or substantial portions of the Software.
13
 *
14
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * IN THE SOFTWARE.
21
 */
22
#include <assert.h>
23
#include <errno.h>
24
#include <fcntl.h>
25
#include <stddef.h>
26
#include <stdio.h>
27
#include <stdint.h>
28
#include <stdlib.h>
29
#include <string.h>
30
#include <stdarg.h>
31
#include <sys/mman.h>
32
#include <unistd.h>
33
#include "h2o/memory.h"
34
#include "h2o/file.h"
35
36
#if defined(__linux__)
37
#if defined(__ANDROID__) && (__ANDROID_API__ < 21)
38
#define USE_POSIX_FALLOCATE 0
39
#else
40
#define USE_POSIX_FALLOCATE 1
41
#endif
42
#elif __FreeBSD__ >= 9
43
#define USE_POSIX_FALLOCATE 1
44
#elif __NetBSD__ >= 7
45
#define USE_POSIX_FALLOCATE 1
46
#else
47
#define USE_POSIX_FALLOCATE 0
48
#endif
49
50
#if defined(__clang__)
51
#if __has_feature(address_sanitizer)
52
#define ASAN_IN_USE 1
53
#endif
54
#elif __SANITIZE_ADDRESS__ /* gcc */
55
#define ASAN_IN_USE 1
56
#else
57
#define ASAN_IN_USE 0
58
#endif
59
60
struct st_h2o_mem_recycle_chunk_t {
61
    struct st_h2o_mem_recycle_chunk_t *next;
62
};
63
64
union un_h2o_mem_pool_chunk_t {
65
    union un_h2o_mem_pool_chunk_t *next;
66
    char bytes[4096];
67
};
68
69
struct st_h2o_mem_pool_direct_t {
70
    struct st_h2o_mem_pool_direct_t *next;
71
    size_t _dummy; /* align to 2*sizeof(void*) */
72
    char bytes[1];
73
};
74
75
struct st_h2o_mem_pool_shared_ref_t {
76
    struct st_h2o_mem_pool_shared_ref_t *next;
77
    struct st_h2o_mem_pool_shared_entry_t *entry;
78
};
79
80
void *(*volatile h2o_mem__set_secure)(void *, int, size_t) = memset;
81
82
static const h2o_mem_recycle_conf_t mem_pool_allocator_conf = {.memsize = sizeof(union un_h2o_mem_pool_chunk_t)};
83
__thread h2o_mem_recycle_t h2o_mem_pool_allocator = {&mem_pool_allocator_conf};
84
size_t h2o_mmap_errors = 0;
85
86
void h2o__fatal(const char *file, int line, const char *msg, ...)
87
0
{
88
0
    char buf[1024];
89
0
    va_list args;
90
91
0
    va_start(args, msg);
92
0
    vsnprintf(buf, sizeof(buf), msg, args);
93
0
    va_end(args);
94
95
0
    h2o_error_printf("fatal:%s:%d:%s\n", file, line, buf);
96
97
0
    abort();
98
0
}
99
100
void *h2o_mem_alloc_recycle(h2o_mem_recycle_t *allocator)
101
115k
{
102
115k
    if (allocator->chunks.size == 0)
103
278
        return h2o_mem_aligned_alloc(1 << allocator->conf->align_bits, allocator->conf->memsize);
104
105
    /* detach and return the pooled pointer */
106
114k
    void *p = allocator->chunks.entries[--allocator->chunks.size];
107
108
    /* adjust low watermark */
109
114k
    if (allocator->low_watermark > allocator->chunks.size)
110
0
        allocator->low_watermark = allocator->chunks.size;
111
112
114k
    return p;
113
115k
}
114
115
void h2o_mem_free_recycle(h2o_mem_recycle_t *allocator, void *p)
116
115k
{
117
115k
#if !ASAN_IN_USE
118
    /* register the pointer to the pool and return unless the pool is full */
119
115k
    h2o_vector_reserve(NULL, &allocator->chunks, allocator->chunks.size + 1);
120
115k
    allocator->chunks.entries[allocator->chunks.size++] = p;
121
#else
122
    free(p);
123
#endif
124
115k
}
125
126
void h2o_mem_clear_recycle(h2o_mem_recycle_t *allocator, int full)
127
0
{
128
    /* Bail out if the allocator is in the initial (cleared) state. */
129
0
    if (allocator->chunks.capacity == 0)
130
0
        return;
131
132
0
    if (full) {
133
0
        allocator->low_watermark = 0;
134
0
    } else {
135
        /* Since the last invocation of `h2o_mem_clear_recycle`, at any given point, there was at least `low_watermark` buffers
136
         * being cached for reuse. Release half of them. Division by 2 is rounded up so that `low_watermark` eventually reaches zero
137
         * (instead of one) when there is no traffic. */
138
0
        size_t delta = (allocator->low_watermark + 1) / 2;
139
0
        assert(allocator->chunks.size >= delta);
140
0
        allocator->low_watermark = allocator->chunks.size - delta;
141
0
    }
142
143
0
    while (allocator->chunks.size > allocator->low_watermark)
144
0
        free(allocator->chunks.entries[--allocator->chunks.size]);
145
146
0
    if (allocator->chunks.size == 0) {
147
0
        free(allocator->chunks.entries);
148
0
        memset(&allocator->chunks, 0, sizeof(allocator->chunks));
149
0
    }
150
0
}
151
152
void h2o_mem_init_pool(h2o_mem_pool_t *pool)
153
68.7k
{
154
68.7k
    pool->chunks = NULL;
155
68.7k
    pool->chunk_offset = sizeof(pool->chunks->bytes);
156
68.7k
    pool->directs = NULL;
157
68.7k
    pool->shared_refs = NULL;
158
68.7k
}
159
160
void h2o_mem_clear_pool(h2o_mem_pool_t *pool)
161
68.7k
{
162
    /* release the refcounted chunks */
163
68.7k
    if (pool->shared_refs != NULL) {
164
24.7k
        struct st_h2o_mem_pool_shared_ref_t *ref = pool->shared_refs;
165
474k
        do {
166
474k
            h2o_mem_release_shared(ref->entry->bytes);
167
474k
        } while ((ref = ref->next) != NULL);
168
24.7k
        pool->shared_refs = NULL;
169
24.7k
    }
170
    /* release the direct chunks */
171
68.7k
    if (pool->directs != NULL) {
172
6.13k
        struct st_h2o_mem_pool_direct_t *direct = pool->directs, *next;
173
25.4k
        do {
174
25.4k
            next = direct->next;
175
25.4k
            free(direct);
176
25.4k
        } while ((direct = next) != NULL);
177
6.13k
        pool->directs = NULL;
178
6.13k
    }
179
    /* free chunks, and reset the first chunk */
180
100k
    while (pool->chunks != NULL) {
181
31.3k
        union un_h2o_mem_pool_chunk_t *next = pool->chunks->next;
182
31.3k
        h2o_mem_free_recycle(&h2o_mem_pool_allocator, pool->chunks);
183
31.3k
        pool->chunks = next;
184
31.3k
    }
185
68.7k
    pool->chunk_offset = sizeof(pool->chunks->bytes);
186
68.7k
}
187
188
void *h2o_mem__do_alloc_pool_aligned(h2o_mem_pool_t *pool, size_t alignment, size_t sz)
189
1.17M
{
190
1.18M
#define ALIGN_TO(x, a) (((x) + (a)-1) & ~((a)-1))
191
1.17M
    void *ret;
192
193
1.17M
    if (sz >= (sizeof(pool->chunks->bytes) - sizeof(pool->chunks->next)) / 4) {
194
        /* allocate large requests directly */
195
25.4k
        struct st_h2o_mem_pool_direct_t *newp = h2o_mem_alloc(offsetof(struct st_h2o_mem_pool_direct_t, bytes) + sz);
196
25.4k
        newp->next = pool->directs;
197
25.4k
        pool->directs = newp;
198
25.4k
        return newp->bytes;
199
25.4k
    }
200
201
    /* return a valid pointer even for 0 sized allocs */
202
1.15M
    if (H2O_UNLIKELY(sz == 0))
203
92
        sz = 1;
204
205
1.15M
    pool->chunk_offset = ALIGN_TO(pool->chunk_offset, alignment);
206
1.15M
    if (sizeof(pool->chunks->bytes) - pool->chunk_offset < sz) {
207
        /* allocate new chunk */
208
31.3k
        union un_h2o_mem_pool_chunk_t *newp = h2o_mem_alloc_recycle(&h2o_mem_pool_allocator);
209
31.3k
        newp->next = pool->chunks;
210
31.3k
        pool->chunks = newp;
211
31.3k
        pool->chunk_offset = ALIGN_TO(sizeof(newp->next), alignment);
212
31.3k
    }
213
214
1.15M
    ret = pool->chunks->bytes + pool->chunk_offset;
215
1.15M
    pool->chunk_offset += sz;
216
1.15M
    return ret;
217
1.17M
#undef ALIGN_TO
218
1.17M
}
219
220
static void link_shared(h2o_mem_pool_t *pool, struct st_h2o_mem_pool_shared_entry_t *entry)
221
474k
{
222
474k
    struct st_h2o_mem_pool_shared_ref_t *ref = h2o_mem_alloc_pool(pool, *ref, 1);
223
474k
    ref->entry = entry;
224
474k
    ref->next = pool->shared_refs;
225
474k
    pool->shared_refs = ref;
226
474k
}
227
228
void *h2o_mem_alloc_shared(h2o_mem_pool_t *pool, size_t sz, void (*dispose)(void *))
229
146k
{
230
146k
    struct st_h2o_mem_pool_shared_entry_t *entry = h2o_mem_alloc(offsetof(struct st_h2o_mem_pool_shared_entry_t, bytes) + sz);
231
146k
    entry->refcnt = 1;
232
146k
    entry->dispose = dispose;
233
146k
    if (pool != NULL)
234
138k
        link_shared(pool, entry);
235
146k
    return entry->bytes;
236
146k
}
237
238
void h2o_mem_link_shared(h2o_mem_pool_t *pool, void *p)
239
335k
{
240
335k
    h2o_mem_addref_shared(p);
241
335k
    link_shared(pool, H2O_STRUCT_FROM_MEMBER(struct st_h2o_mem_pool_shared_entry_t, bytes, p));
242
335k
}
243
244
static size_t topagesize(size_t capacity)
245
0
{
246
0
    size_t pagesize = getpagesize();
247
0
    return (offsetof(h2o_buffer_t, _buf) + capacity + pagesize - 1) / pagesize * pagesize;
248
0
}
249
250
/**
251
 * size of the smallest bin is 4096 bytes (1<<12)
252
 */
253
325k
#define H2O_BUFFER_MIN_ALLOC_POWER 12
254
255
static const h2o_mem_recycle_conf_t buffer_recycle_bins_zero_sized_conf = {.memsize = sizeof(h2o_buffer_t)};
256
/**
257
 * Retains recycle bins for `h2o_buffer_t`.
258
 */
259
static __thread struct {
260
    /**
261
     * Holds recycle bins for `h2o_buffer_t`. Bin for capacity 2^x is located at x - H2O_BUFFER_MIN_ALLOC_POWER.
262
     */
263
    struct buffer_recycle_bin_t {
264
        h2o_mem_recycle_conf_t conf;
265
        h2o_mem_recycle_t recycle;
266
    } *bins;
267
    /**
268
     * Bins for capacicties no greater than this value exist.
269
     */
270
    size_t largest_power;
271
    /**
272
     * Bin containing chunks of sizeof(h2o_buffer_t). This is used by empties buffers to retain the previous capacity.
273
     */
274
    h2o_mem_recycle_t zero_sized;
275
} buffer_recycle_bins = {NULL, H2O_BUFFER_MIN_ALLOC_POWER - 1, {&buffer_recycle_bins_zero_sized_conf}};
276
277
static unsigned buffer_size_to_power(size_t sz)
278
162k
{
279
162k
    assert(sz != 0);
280
281
0
    unsigned power = sizeof(unsigned long long) * 8 - __builtin_clzll(sz) - 1;
282
162k
    if (power < H2O_BUFFER_MIN_ALLOC_POWER) {
283
0
        power = H2O_BUFFER_MIN_ALLOC_POWER;
284
162k
    } else if (sz != (1 << power)) {
285
81.3k
        ++power;
286
81.3k
    }
287
162k
    return power;
288
162k
}
289
290
void h2o_buffer_clear_recycle(int full)
291
0
{
292
0
    for (unsigned i = H2O_BUFFER_MIN_ALLOC_POWER; i <= buffer_recycle_bins.largest_power; ++i)
293
0
        h2o_mem_clear_recycle(&buffer_recycle_bins.bins[i - H2O_BUFFER_MIN_ALLOC_POWER].recycle, full);
294
295
0
    if (full) {
296
0
        free(buffer_recycle_bins.bins);
297
0
        buffer_recycle_bins.bins = NULL;
298
0
        buffer_recycle_bins.largest_power = H2O_BUFFER_MIN_ALLOC_POWER - 1;
299
0
    }
300
301
0
    h2o_mem_clear_recycle(&buffer_recycle_bins.zero_sized, full);
302
0
}
303
304
int h2o_buffer_recycle_is_empty(void)
305
0
{
306
0
    for (unsigned i = H2O_BUFFER_MIN_ALLOC_POWER; i <= buffer_recycle_bins.largest_power; ++i) {
307
0
        if (!h2o_mem_recycle_is_empty(&buffer_recycle_bins.bins[i - H2O_BUFFER_MIN_ALLOC_POWER].recycle))
308
0
            return 0;
309
0
    }
310
0
    if (!h2o_mem_recycle_is_empty(&buffer_recycle_bins.zero_sized))
311
0
        return 0;
312
0
    return 1;
313
0
}
314
315
static h2o_mem_recycle_t *buffer_get_recycle(unsigned power, int only_if_exists)
316
162k
{
317
162k
    if (power > buffer_recycle_bins.largest_power) {
318
7
        if (only_if_exists)
319
0
            return NULL;
320
7
        buffer_recycle_bins.bins =
321
7
            h2o_mem_realloc(buffer_recycle_bins.bins, sizeof(*buffer_recycle_bins.bins) * (power - H2O_BUFFER_MIN_ALLOC_POWER + 1));
322
16
        for (size_t p = H2O_BUFFER_MIN_ALLOC_POWER; p <= buffer_recycle_bins.largest_power; ++p) {
323
9
            struct buffer_recycle_bin_t *bin = buffer_recycle_bins.bins + p - H2O_BUFFER_MIN_ALLOC_POWER;
324
9
            bin->recycle.conf = &bin->conf;
325
9
        }
326
16
        do {
327
16
            ++buffer_recycle_bins.largest_power;
328
16
            struct buffer_recycle_bin_t *newbin =
329
16
                buffer_recycle_bins.bins + buffer_recycle_bins.largest_power - H2O_BUFFER_MIN_ALLOC_POWER;
330
16
            newbin->conf = (h2o_mem_recycle_conf_t){.memsize = (size_t)1 << buffer_recycle_bins.largest_power};
331
16
            newbin->recycle = (h2o_mem_recycle_t){&newbin->conf};
332
16
        } while (buffer_recycle_bins.largest_power < power);
333
7
    }
334
335
162k
    return &buffer_recycle_bins.bins[power - H2O_BUFFER_MIN_ALLOC_POWER].recycle;
336
162k
}
337
338
static void buffer_init(h2o_buffer_t *buf, size_t size, char *bytes, size_t capacity, h2o_buffer_prototype_t *prototype, int fd)
339
83.7k
{
340
83.7k
    buf->size = size;
341
83.7k
    buf->bytes = bytes;
342
83.7k
    buf->capacity = capacity;
343
83.7k
    buf->_prototype = prototype;
344
83.7k
    buf->_fd = fd;
345
83.7k
}
346
347
void h2o_buffer__do_free(h2o_buffer_t *buffer)
348
83.7k
{
349
83.7k
    assert(buffer->_prototype != NULL);
350
351
83.7k
    if (buffer->_fd != -1) {
352
0
        close(buffer->_fd);
353
0
        munmap((void *)buffer, topagesize(buffer->capacity));
354
83.7k
    } else {
355
83.7k
        h2o_mem_recycle_t *allocator;
356
83.7k
        if (buffer->bytes == NULL) {
357
2.39k
            allocator = &buffer_recycle_bins.zero_sized;
358
81.3k
        } else {
359
81.3k
            unsigned power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + buffer->capacity);
360
81.3k
            assert(((size_t)1 << power) == offsetof(h2o_buffer_t, _buf) + buffer->capacity);
361
0
            allocator = buffer_get_recycle(power, 0);
362
81.3k
            assert(allocator != NULL);
363
81.3k
        }
364
0
        h2o_mem_free_recycle(allocator, buffer);
365
83.7k
    }
366
83.7k
}
367
368
h2o_iovec_t h2o_buffer_reserve(h2o_buffer_t **_inbuf, size_t min_guarantee)
369
113k
{
370
113k
    h2o_iovec_t reserved = h2o_buffer_try_reserve(_inbuf, min_guarantee);
371
113k
    if (reserved.base == NULL) {
372
0
        h2o_fatal("failed to reserve buffer; capacity: %zu, min_guarantee: %zu", (*_inbuf)->capacity, min_guarantee);
373
0
    }
374
113k
    return reserved;
375
113k
}
376
377
static h2o_buffer_t *buffer_allocate(h2o_buffer_prototype_t *prototype, size_t min_capacity, size_t desired_capacity)
378
81.0k
{
379
81.0k
    h2o_buffer_t *newp;
380
81.0k
    unsigned alloc_power;
381
382
    /* normalize */
383
81.0k
    if (min_capacity < prototype->_initial_buf.capacity)
384
46.6k
        min_capacity = prototype->_initial_buf.capacity;
385
386
    /* try to allocate at first using `desired_capacity`, otherwise bail out to AllocNormal */
387
81.0k
    if (desired_capacity <= min_capacity)
388
81.0k
        goto AllocNormal;
389
0
    alloc_power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + desired_capacity);
390
0
    h2o_mem_recycle_t *allocator = buffer_get_recycle(alloc_power, 1);
391
0
    if (allocator == NULL || allocator->chunks.size == 0)
392
0
        goto AllocNormal;
393
0
    assert(allocator->conf->memsize == (size_t)1 << alloc_power);
394
0
    newp = h2o_mem_alloc_recycle(allocator);
395
0
    goto AllocDone;
396
397
81.0k
AllocNormal:
398
    /* allocate using `min_capacity` */
399
81.0k
    alloc_power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + min_capacity);
400
81.0k
    newp = h2o_mem_alloc_recycle(buffer_get_recycle(alloc_power, 0));
401
402
81.0k
AllocDone:
403
81.0k
    buffer_init(newp, 0, newp->_buf, ((size_t)1 << alloc_power) - offsetof(h2o_buffer_t, _buf), prototype, -1);
404
81.0k
    return newp;
405
81.0k
}
406
407
h2o_iovec_t h2o_buffer_try_reserve(h2o_buffer_t **_inbuf, size_t min_guarantee)
408
159k
{
409
159k
    h2o_buffer_t *inbuf = *_inbuf;
410
159k
    h2o_iovec_t ret;
411
412
159k
    if (inbuf->bytes == NULL) {
413
81.0k
        h2o_buffer_prototype_t *prototype;
414
81.0k
        size_t desired_capacity;
415
81.0k
        if (inbuf->_prototype == NULL) {
416
81.0k
            prototype = H2O_STRUCT_FROM_MEMBER(h2o_buffer_prototype_t, _initial_buf, inbuf);
417
81.0k
            desired_capacity = 0;
418
81.0k
        } else {
419
0
            prototype = inbuf->_prototype;
420
0
            desired_capacity = inbuf->capacity;
421
0
            h2o_mem_free_recycle(&buffer_recycle_bins.zero_sized, inbuf);
422
0
        }
423
81.0k
        inbuf = buffer_allocate(prototype, min_guarantee, desired_capacity);
424
81.0k
        *_inbuf = inbuf;
425
81.0k
    } else {
426
78.5k
        if (min_guarantee <= inbuf->capacity - inbuf->size - (inbuf->bytes - inbuf->_buf)) {
427
            /* ok */
428
78.2k
        } else if ((inbuf->size + min_guarantee) * 2 <= inbuf->capacity) {
429
            /* the capacity should be less than or equal to 2 times of: size + guarantee */
430
2
            memmove(inbuf->_buf, inbuf->bytes, inbuf->size);
431
2
            inbuf->bytes = inbuf->_buf;
432
297
        } else {
433
297
            size_t new_capacity = inbuf->capacity;
434
300
            do {
435
300
                new_capacity *= 2;
436
300
            } while (new_capacity - inbuf->size < min_guarantee);
437
297
            if (inbuf->_prototype->mmap_settings != NULL && inbuf->_prototype->mmap_settings->threshold <= new_capacity) {
438
0
                size_t new_allocsize = topagesize(new_capacity);
439
0
                int fd;
440
0
                h2o_buffer_t *newp;
441
0
                if (inbuf->_fd == -1) {
442
0
                    if ((fd = h2o_file_mktemp(inbuf->_prototype->mmap_settings->fn_template)) == -1) {
443
0
                        h2o_perror("failed to create temporary file");
444
0
                        goto MapError;
445
0
                    }
446
0
                } else {
447
0
                    fd = inbuf->_fd;
448
0
                }
449
0
                int fallocate_ret;
450
0
#if USE_POSIX_FALLOCATE
451
0
                fallocate_ret = posix_fallocate(fd, 0, new_allocsize);
452
0
                if (fallocate_ret != EINVAL) {
453
0
                    errno = fallocate_ret;
454
0
                } else
455
0
#endif
456
0
                    fallocate_ret = ftruncate(fd, new_allocsize);
457
0
                if (fallocate_ret != 0) {
458
0
                    h2o_perror("failed to resize temporary file");
459
0
                    goto MapError;
460
0
                }
461
0
                if ((newp = (void *)mmap(NULL, new_allocsize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0)) == MAP_FAILED) {
462
0
                    h2o_perror("mmap failed");
463
0
                    goto MapError;
464
0
                }
465
0
                if (inbuf->_fd == -1) {
466
                    /* copy data (moving from malloc to mmap) */
467
0
                    buffer_init(newp, inbuf->size, newp->_buf, new_capacity, inbuf->_prototype, fd);
468
0
                    memcpy(newp->_buf, inbuf->bytes, inbuf->size);
469
0
                    h2o_buffer__do_free(inbuf);
470
0
                    *_inbuf = inbuf = newp;
471
0
                } else {
472
                    /* munmap */
473
0
                    size_t offset = inbuf->bytes - inbuf->_buf;
474
0
                    munmap((void *)inbuf, topagesize(inbuf->capacity));
475
0
                    *_inbuf = inbuf = newp;
476
0
                    inbuf->capacity = new_capacity;
477
0
                    inbuf->bytes = newp->_buf + offset;
478
0
                }
479
297
            } else {
480
297
                unsigned alloc_power = buffer_size_to_power(offsetof(h2o_buffer_t, _buf) + new_capacity);
481
297
                new_capacity = ((size_t)1 << alloc_power) - offsetof(h2o_buffer_t, _buf);
482
297
                h2o_buffer_t *newp = h2o_mem_alloc_recycle(buffer_get_recycle(alloc_power, 0));
483
297
                buffer_init(newp, inbuf->size, newp->_buf, new_capacity, inbuf->_prototype, -1);
484
297
                memcpy(newp->_buf, inbuf->bytes, inbuf->size);
485
297
                h2o_buffer__do_free(inbuf);
486
297
                *_inbuf = inbuf = newp;
487
297
            }
488
297
        }
489
78.5k
    }
490
491
159k
    ret.base = inbuf->bytes + inbuf->size;
492
159k
    ret.len = inbuf->_buf + inbuf->capacity - ret.base;
493
494
159k
    return ret;
495
496
0
MapError:
497
0
    __sync_add_and_fetch(&h2o_mmap_errors, 1);
498
0
    ret.base = NULL;
499
0
    ret.len = 0;
500
0
    return ret;
501
159k
}
502
503
void h2o_buffer_consume(h2o_buffer_t **inbuf, size_t delta)
504
132k
{
505
132k
    if (delta != 0) {
506
131k
        if ((*inbuf)->size == delta) {
507
14.8k
            h2o_buffer_consume_all(inbuf, 0);
508
116k
        } else {
509
116k
            assert((*inbuf)->bytes != NULL);
510
0
            (*inbuf)->size -= delta;
511
116k
            (*inbuf)->bytes += delta;
512
116k
        }
513
131k
    }
514
132k
}
515
516
void h2o_buffer_consume_all(h2o_buffer_t **inbuf, int record_capacity)
517
17.1k
{
518
17.1k
    if ((*inbuf)->size != 0) {
519
17.1k
        if (record_capacity) {
520
2.39k
            h2o_buffer_t *newp = h2o_mem_alloc_recycle(&buffer_recycle_bins.zero_sized);
521
2.39k
            buffer_init(newp, 0, NULL, (*inbuf)->capacity, (*inbuf)->_prototype, -1);
522
2.39k
            h2o_buffer__do_free(*inbuf);
523
2.39k
            *inbuf = newp;
524
14.8k
        } else {
525
14.8k
            h2o_buffer_t *prototype_buf = &(*inbuf)->_prototype->_initial_buf;
526
14.8k
            h2o_buffer__do_free(*inbuf);
527
14.8k
            *inbuf = prototype_buf;
528
14.8k
        }
529
17.1k
    }
530
17.1k
}
531
532
void h2o_buffer__dispose_linked(void *p)
533
0
{
534
0
    h2o_buffer_t **buf = p;
535
0
    h2o_buffer_dispose(buf);
536
0
}
537
538
void h2o_vector__expand(h2o_mem_pool_t *pool, h2o_vector_t *vector, size_t alignment, size_t element_size, size_t new_capacity)
539
503k
{
540
503k
    void *new_entries;
541
503k
    assert(vector->capacity < new_capacity);
542
503k
    if (vector->capacity == 0)
543
253k
        vector->capacity = 4;
544
769k
    while (vector->capacity < new_capacity)
545
266k
        vector->capacity *= 2;
546
503k
    if (pool != NULL) {
547
103k
        new_entries = h2o_mem_alloc_pool_aligned(pool, alignment, element_size * vector->capacity);
548
103k
        h2o_memcpy(new_entries, vector->entries, element_size * vector->size);
549
400k
    } else {
550
400k
        new_entries = h2o_mem_realloc(vector->entries, element_size * vector->capacity);
551
400k
    }
552
503k
    vector->entries = new_entries;
553
503k
}
554
555
void h2o_mem_swap(void *_x, void *_y, size_t len)
556
0
{
557
0
    char *x = _x, *y = _y;
558
0
    char buf[256];
559
560
0
    while (len != 0) {
561
0
        size_t blocksz = len < sizeof(buf) ? len : sizeof(buf);
562
0
        memcpy(buf, x, blocksz);
563
0
        memcpy(x, y, blocksz);
564
0
        memcpy(y, buf, blocksz);
565
0
        len -= blocksz;
566
0
        x += blocksz;
567
0
        y += blocksz;
568
0
    }
569
0
}
570
571
void h2o_dump_memory(FILE *fp, const char *buf, size_t len)
572
0
{
573
0
    size_t i, j;
574
575
0
    for (i = 0; i < len; i += 16) {
576
0
        fprintf(fp, "%08zx", i);
577
0
        for (j = 0; j != 16; ++j) {
578
0
            if (i + j < len)
579
0
                fprintf(fp, " %02x", (int)(unsigned char)buf[i + j]);
580
0
            else
581
0
                fprintf(fp, "   ");
582
0
        }
583
0
        fprintf(fp, " ");
584
0
        for (j = 0; j != 16 && i + j < len; ++j) {
585
0
            int ch = buf[i + j];
586
0
            fputc(' ' <= ch && ch < 0x7f ? ch : '.', fp);
587
0
        }
588
0
        fprintf(fp, "\n");
589
0
    }
590
0
}
591
592
void h2o_append_to_null_terminated_list(void ***list, void *element)
593
3
{
594
3
    size_t cnt;
595
596
3
    for (cnt = 0; (*list)[cnt] != NULL; ++cnt)
597
0
        ;
598
3
    *list = h2o_mem_realloc(*list, (cnt + 2) * sizeof(void *));
599
3
    (*list)[cnt++] = element;
600
3
    (*list)[cnt] = NULL;
601
3
}
602
603
char *h2o_strerror_r(int err, char *buf, size_t len)
604
0
{
605
#if !(defined(_GNU_SOURCE) && defined(__gnu_linux__))
606
    strerror_r(err, buf, len);
607
    return buf;
608
#else
609
    /**
610
     * The GNU-specific strerror_r() returns a pointer to a string containing the error message.
611
     * This may be either a pointer to a string that the function stores in  buf,
612
     * or a pointer to some (immutable) static string (in which case buf is unused)
613
     */
614
0
    return strerror_r(err, buf, len);
615
0
#endif
616
0
}
617
618
void h2o_perror(const char *msg)
619
0
{
620
0
    char buf[128];
621
622
0
    h2o_error_printf("%s: %s\n", msg, h2o_strerror_r(errno, buf, sizeof(buf)));
623
0
}