Coverage Report

Created: 2025-02-15 06:25

/src/wireshark/wsutil/wmem/wmem_allocator_block.c
Line
Count
Source (jump to first uncovered line)
1
/* wmem_allocator_block.c
2
 * Wireshark Memory Manager Large-Block Allocator (version 3)
3
 * Copyright 2013, Evan Huus <eapache@gmail.com>
4
 *
5
 * Wireshark - Network traffic analyzer
6
 * By Gerald Combs <gerald@wireshark.org>
7
 * Copyright 1998 Gerald Combs
8
 *
9
 * SPDX-License-Identifier: GPL-2.0-or-later
10
 */
11
12
#include <stdio.h>
13
#include <string.h>
14
15
#include <glib.h>
16
17
#include "wmem_core.h"
18
#include "wmem_allocator.h"
19
#include "wmem_allocator_block.h"
20
21
/* This has turned into a very interesting exercise in algorithms and data
22
 * structures.
23
 *
24
 * HISTORY
25
 *
26
 * Version 1 of this allocator was embedded in the original emem framework. It
27
 * didn't have to handle realloc or free, so it was very simple: it just grabbed
28
 * a block from the OS and served allocations sequentially out of that until it
29
 * ran out, then allocated a new block. The old block was never revisited, so
30
 * it generally had a bit of wasted space at the end, but the waste was
31
 * small enough that it was simply ignored. This allocator provided very fast
32
 * constant-time allocation for any request that didn't require a new block from
33
 * the OS, and that cost could be amortized away.
34
 *
35
 * Version 2 of this allocator was prompted by the need to support realloc and
36
 * free in wmem. The original version simply didn't save enough metadata to do
37
 * this, so I added a layer on top to make it possible. The primary principle
38
 * was the same (allocate sequentially out of big blocks) with a bit of extra
39
 * magic. Allocations were still fast constant-time, and frees were as well.
40
 * Large parts of that design are still present in this one, but for more
41
 * details see older versions of this file from git or svn.
42
 *
43
 * Version 3 of this allocator was written to address some issues that
44
 * eventually showed up with version 2 under real-world usage. Specifically,
45
 * version 2 dealt very poorly with memory fragmentation, almost never reusing
46
 * freed blocks and choosing to just keep allocating from the master block
47
 * instead. This led to particularly poor behaviour under the tick-tock loads
48
 * (alloc/free/alloc/free or alloc/alloc/free/alloc/alloc/free/ or ...) that
49
 * showed up in a couple of different protocol dissectors (TCP, Kafka).
50
 *
51
 * BLOCKS AND CHUNKS
52
 *
53
 * As in previous versions, allocations typically happen sequentially out of
54
 * large OS-level blocks. Each block has a short embedded header used to
55
 * maintain a doubly-linked list of all blocks (used or not) currently owned by
56
 * the allocator. Each block is divided into chunks, which represent allocations
57
 * and free sections (a block is initialized with one large, free, chunk). Each
58
 * chunk is prefixed with a wmem_block_chunk_t structure, which is a short
59
 * metadata header (8 bytes, regardless of 32 or 64-bit architecture unless
60
 * alignment requires it to be padded) that contains the length of the chunk,
61
 * the length of the previous chunk, a flag marking the chunk as free or used,
62
 * and a flag marking the last chunk in a block. This serves to implement an
63
 * inline sequential doubly-linked list of all the chunks in each block. A block
64
 * with three chunks might look something like this:
65
 *
66
 *          0                    _________________________
67
 *          ^      ___________  /        ______________   \       __________
68
 * ||---||--|-----/-----------||--------/--------------||--\-----/----------||
69
 * ||hdr|| prv | len |  body  || prv | len |   body    || prv | len | body  ||
70
 * ||---||--------------------||--/--------------------||-------------------||
71
 *        \______________________/
72
 *
73
 *
74
 * When allocating, a free chunk is found (more on that later) and split into
75
 * two chunks: the first of the requested size and the second containing any
76
 * remaining free. The first is marked used and returned to the caller.
77
 *
78
 * When freeing, the chunk in question is marked as free. Its neighbouring
79
 * chunks are then checked; if either of them are free, the consecutive free
80
 * chunks are merged into a single larger free chunk. Induction can show that
81
 * applying this operation consistently prevents us ever having consecutive
82
 * free chunks.
83
 *
84
 * Free chunks (because they are not being used for anything else) each store an
85
 * additional pair of pointers (see the wmem_block_free_t structure) that form
86
 * the backbone of the data structures used to track free chunks.
87
 *
88
 * MASTER AND RECYCLER
89
 *
90
 * The extra pair of pointers in free chunks are used to build two doubly-linked
91
 * lists: the master and the recycler. The recycler is circular, the master is
92
 * a stack.
93
 *
94
 * The master stack is only populated by chunks from new OS-level blocks,
95
 * so every chunk in this list is guaranteed to be able to serve any allocation
96
 * request (the allocator will not serve requests larger than its block size).
97
 * The chunk at the head of the master list shrinks as it serves requests. When
98
 * it is too small to serve the current request, it is popped and inserted into
99
 * the recycler. If the master list is empty, a new OS-level block is allocated,
100
 * and its chunk is pushed onto the master stack.
101
 *
102
 * The recycler is populated by 'leftovers' from the master, as well as any
103
 * chunks that were returned to the allocator via a call to free(). Although the
104
 * recycler is circular, we will refer to the element referenced from the
105
 * allocator as the 'head' of the list for convenience. The primary operation on
106
 * the recycler is called cycling it. In this operation, the head is compared
107
 * with its clockwise neighbour. If the neighbour is as large or larger, it
108
 * becomes the head (the list rotates counter-clockwise). If the neighbour is
109
 * smaller, then it is removed from its location and inserted as the counter-
110
 * clockwise neighbour of the head (the list still rotates counter-clockwise,
111
 * but the head element is held fixed while the rest of the list spins). This
112
 * operation has the following properties:
113
 *  - fast constant time
114
 *  - once the largest chunk is at the head, it remains at the head
115
 *  - more iterations increases the probability that the largest chunk will be
116
 *    the head (for a list with n items, n iterations guarantees that the
117
 *    largest chunk will be the head).
118
 *
119
 * ALLOCATING
120
 *
121
 * When an allocation request is received, the allocator first attempts to
122
 * satisfy it with the chunk at the head of the recycler. If that does not
123
 * succeed, the request is satisfied by the master list instead. Regardless of
124
 * which chunk satisfied the request, the recycler is always cycled.
125
 */
126
127
/* https://mail.gnome.org/archives/gtk-devel-list/2004-December/msg00091.html
128
 * The 2*sizeof(size_t) alignment here is borrowed from GNU libc, so it should
129
 * be good most everywhere. It is more conservative than is needed on some
130
 * 64-bit platforms, but ia64 does require a 16-byte alignment. The SIMD
131
 * extensions for x86 and ppc32 would want a larger alignment than this, but
132
 * we don't need to do better than malloc.
133
 */
134
0
#define WMEM_ALIGN_AMOUNT (2 * sizeof (size_t))
135
0
#define WMEM_ALIGN_SIZE(SIZE) ((~(WMEM_ALIGN_AMOUNT-1)) & \
136
0
        ((SIZE) + (WMEM_ALIGN_AMOUNT-1)))
137
138
/* When required, allocate more memory from the OS in chunks of this size.
139
 * 8MB is a pretty arbitrary value - it's big enough that it should last a while
140
 * and small enough that a mostly-unused one doesn't waste *too* much. It's
141
 * also a nice power of two, of course. */
142
0
#define WMEM_BLOCK_SIZE (8 * 1024 * 1024)
143
144
/* The header for an entire OS-level 'block' of memory */
145
typedef struct _wmem_block_hdr_t {
146
    struct _wmem_block_hdr_t *prev, *next;
147
} wmem_block_hdr_t;
148
149
/* The header for a single 'chunk' of memory as returned from alloc/realloc.
150
 * The 'jumbo' flag indicates an allocation larger than a normal-sized block
151
 * would be capable of serving. If this is set, it is the only chunk in the
152
 * block and the other chunk header fields are irrelevant.
153
 */
154
typedef struct _wmem_block_chunk_t {
155
    uint32_t prev;
156
157
    /* flags */
158
    uint32_t last:1;
159
    uint32_t used:1;
160
    uint32_t jumbo:1;
161
162
    uint32_t len:29;
163
} wmem_block_chunk_t;
164
165
/* Handy macros for navigating the chunks in a block as if they were a
166
 * doubly-linked list. */
167
0
#define WMEM_CHUNK_PREV(CHUNK) ((CHUNK)->prev \
168
0
        ? ((wmem_block_chunk_t*)(((uint8_t*)(CHUNK)) - (CHUNK)->prev)) \
169
0
        : NULL)
170
171
0
#define WMEM_CHUNK_NEXT(CHUNK) ((CHUNK)->last \
172
0
        ? NULL \
173
0
        : ((wmem_block_chunk_t*)(((uint8_t*)(CHUNK)) + (CHUNK)->len)))
174
175
0
#define WMEM_CHUNK_HEADER_SIZE WMEM_ALIGN_SIZE(sizeof(wmem_block_chunk_t))
176
177
0
#define WMEM_BLOCK_MAX_ALLOC_SIZE (WMEM_BLOCK_SIZE - \
178
0
        (WMEM_BLOCK_HEADER_SIZE + WMEM_CHUNK_HEADER_SIZE))
179
180
/* other handy chunk macros */
181
0
#define WMEM_CHUNK_TO_DATA(CHUNK) ((void*)((uint8_t*)(CHUNK) + WMEM_CHUNK_HEADER_SIZE))
182
0
#define WMEM_DATA_TO_CHUNK(DATA) ((wmem_block_chunk_t*)((uint8_t*)(DATA) - WMEM_CHUNK_HEADER_SIZE))
183
0
#define WMEM_CHUNK_DATA_LEN(CHUNK) ((CHUNK)->len - WMEM_CHUNK_HEADER_SIZE)
184
185
/* some handy block macros */
186
0
#define WMEM_BLOCK_HEADER_SIZE WMEM_ALIGN_SIZE(sizeof(wmem_block_hdr_t))
187
0
#define WMEM_BLOCK_TO_CHUNK(BLOCK) ((wmem_block_chunk_t*)((uint8_t*)(BLOCK) + WMEM_BLOCK_HEADER_SIZE))
188
0
#define WMEM_CHUNK_TO_BLOCK(CHUNK) ((wmem_block_hdr_t*)((uint8_t*)(CHUNK) - WMEM_BLOCK_HEADER_SIZE))
189
190
/* This is what the 'data' section of a chunk contains if it is free. */
191
typedef struct _wmem_block_free_t {
192
    wmem_block_chunk_t *prev, *next;
193
} wmem_block_free_t;
194
195
/* Handy macro for accessing the free-header of a chunk */
196
0
#define WMEM_GET_FREE(CHUNK) ((wmem_block_free_t*)WMEM_CHUNK_TO_DATA(CHUNK))
197
198
typedef struct _wmem_block_allocator_t {
199
    wmem_block_hdr_t   *block_list;
200
    wmem_block_chunk_t *master_head;
201
    wmem_block_chunk_t *recycler_head;
202
} wmem_block_allocator_t;
203
204
/* DEBUG AND TEST */
205
static int
206
wmem_block_verify_block(wmem_block_hdr_t *block)
207
0
{
208
0
    int                 total_free_space = 0;
209
0
    uint32_t            total_len;
210
0
    wmem_block_chunk_t *chunk;
211
212
0
    chunk     = WMEM_BLOCK_TO_CHUNK(block);
213
0
    total_len = WMEM_BLOCK_HEADER_SIZE;
214
215
0
    if (chunk->jumbo) {
216
        /* We can tell nothing else about jumbo chunks except that they are
217
         * always used. */
218
0
        return 0;
219
0
    }
220
221
0
    g_assert_true(chunk->prev == 0);
222
223
0
    do {
224
0
        total_len += chunk->len;
225
226
0
        g_assert_true(chunk->len >= WMEM_CHUNK_HEADER_SIZE);
227
0
        g_assert_true(!chunk->jumbo);
228
229
0
        if (WMEM_CHUNK_NEXT(chunk)) {
230
0
            g_assert_true(chunk->len == WMEM_CHUNK_NEXT(chunk)->prev);
231
0
        }
232
233
0
        if (!chunk->used &&
234
0
                WMEM_CHUNK_DATA_LEN(chunk) >= sizeof(wmem_block_free_t)) {
235
236
0
            total_free_space += chunk->len;
237
238
0
            if (!chunk->last) {
239
0
                g_assert_true(WMEM_GET_FREE(chunk)->next);
240
0
                g_assert_true(WMEM_GET_FREE(chunk)->prev);
241
0
            }
242
0
        }
243
244
0
        chunk = WMEM_CHUNK_NEXT(chunk);
245
0
    } while (chunk);
246
247
0
    g_assert_true(total_len == WMEM_BLOCK_SIZE);
248
249
0
    return total_free_space;
250
0
}
251
252
static int
253
wmem_block_verify_master_list(wmem_block_allocator_t *allocator)
254
0
{
255
0
    wmem_block_chunk_t *cur;
256
0
    wmem_block_free_t  *cur_free;
257
0
    int                 free_space = 0;
258
259
0
    cur = allocator->master_head;
260
0
    if (!cur) {
261
0
        return 0;
262
0
    }
263
264
0
    g_assert_true(WMEM_GET_FREE(cur)->prev == NULL);
265
266
0
    while (cur) {
267
0
        free_space += cur->len;
268
269
0
        cur_free = WMEM_GET_FREE(cur);
270
271
0
        g_assert_true(! cur->used);
272
273
0
        if (cur_free->next) {
274
0
            g_assert_true(WMEM_GET_FREE(cur_free->next)->prev == cur);
275
0
        }
276
277
0
        if (cur != allocator->master_head) {
278
0
            g_assert_true(cur->len == WMEM_BLOCK_SIZE);
279
0
        }
280
281
0
        cur = cur_free->next;
282
0
    }
283
284
0
    return free_space;
285
0
}
286
287
static int
288
wmem_block_verify_recycler(wmem_block_allocator_t *allocator)
289
0
{
290
0
    wmem_block_chunk_t *cur;
291
0
    wmem_block_free_t  *cur_free;
292
0
    int                 free_space = 0;
293
294
0
    cur = allocator->recycler_head;
295
0
    if (!cur) {
296
0
        return 0;
297
0
    }
298
299
0
    do {
300
0
        free_space += cur->len;
301
302
0
        cur_free = WMEM_GET_FREE(cur);
303
304
0
        g_assert_true(! cur->used);
305
306
0
        g_assert_true(cur_free->prev);
307
0
        g_assert_true(cur_free->next);
308
309
0
        g_assert_true(WMEM_GET_FREE(cur_free->prev)->next == cur);
310
0
        g_assert_true(WMEM_GET_FREE(cur_free->next)->prev == cur);
311
312
0
        cur = cur_free->next;
313
0
    } while (cur != allocator->recycler_head);
314
315
0
    return free_space;
316
0
}
317
318
void
319
wmem_block_verify(wmem_allocator_t *allocator)
320
0
{
321
0
    wmem_block_hdr_t       *cur;
322
0
    wmem_block_allocator_t *private_allocator;
323
0
    int                     master_free, recycler_free, chunk_free = 0;
324
325
    /* Normally it would be bad for an allocator helper function to depend
326
     * on receiving the right type of allocator, but this is for testing only
327
     * and is not part of any real API. */
328
0
    g_assert_true(allocator->type == WMEM_ALLOCATOR_BLOCK);
329
330
0
    private_allocator = (wmem_block_allocator_t*) allocator->private_data;
331
332
0
    if (private_allocator->block_list == NULL) {
333
0
        g_assert_true(! private_allocator->master_head);
334
0
        g_assert_true(! private_allocator->recycler_head);
335
0
        return;
336
0
    }
337
338
0
    master_free   = wmem_block_verify_master_list(private_allocator);
339
0
    recycler_free = wmem_block_verify_recycler(private_allocator);
340
341
0
    cur = private_allocator->block_list;
342
0
    g_assert_true(cur->prev == NULL);
343
0
    while (cur) {
344
0
        if (cur->next) {
345
0
            g_assert_true(cur->next->prev == cur);
346
0
        }
347
0
        chunk_free += wmem_block_verify_block(cur);
348
0
        cur = cur->next;
349
0
    }
350
351
0
    g_assert_true(chunk_free == master_free + recycler_free);
352
0
}
353
354
/* MASTER/RECYCLER HELPERS */
355
356
/* Cycles the recycler. See the design notes at the top of this file for more
357
 * details. */
358
static void
359
wmem_block_cycle_recycler(wmem_block_allocator_t *allocator)
360
0
{
361
0
    wmem_block_chunk_t *chunk;
362
0
    wmem_block_free_t  *free_chunk;
363
364
0
    chunk = allocator->recycler_head;
365
366
0
    if (chunk == NULL) {
367
0
        return;
368
0
    }
369
370
0
    free_chunk = WMEM_GET_FREE(chunk);
371
372
0
    if (free_chunk->next->len < chunk->len) {
373
        /* Hold the current head fixed during rotation. */
374
0
        WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
375
0
        WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
376
377
0
        free_chunk->prev = free_chunk->next;
378
0
        free_chunk->next = WMEM_GET_FREE(free_chunk->next)->next;
379
380
0
        WMEM_GET_FREE(free_chunk->next)->prev = chunk;
381
0
        WMEM_GET_FREE(free_chunk->prev)->next = chunk;
382
0
    }
383
0
    else {
384
        /* Just rotate everything. */
385
0
        allocator->recycler_head = free_chunk->next;
386
0
    }
387
0
}
388
389
/* Adds a chunk from the recycler. */
390
static void
391
wmem_block_add_to_recycler(wmem_block_allocator_t *allocator,
392
                           wmem_block_chunk_t *chunk)
393
0
{
394
0
    wmem_block_free_t *free_chunk;
395
396
0
    if (WMEM_CHUNK_DATA_LEN(chunk) < sizeof(wmem_block_free_t)) {
397
0
        return;
398
0
    }
399
400
0
    free_chunk = WMEM_GET_FREE(chunk);
401
402
0
    if (! allocator->recycler_head) {
403
        /* First one */
404
0
        free_chunk->next         = chunk;
405
0
        free_chunk->prev         = chunk;
406
0
        allocator->recycler_head = chunk;
407
0
    }
408
0
    else {
409
0
        free_chunk->next = allocator->recycler_head;
410
0
        free_chunk->prev = WMEM_GET_FREE(allocator->recycler_head)->prev;
411
412
0
        WMEM_GET_FREE(free_chunk->next)->prev = chunk;
413
0
        WMEM_GET_FREE(free_chunk->prev)->next = chunk;
414
415
0
        if (chunk->len > allocator->recycler_head->len) {
416
0
            allocator->recycler_head = chunk;
417
0
        }
418
0
    }
419
0
}
420
421
/* Removes a chunk from the recycler. */
422
static void
423
wmem_block_remove_from_recycler(wmem_block_allocator_t *allocator,
424
                                wmem_block_chunk_t *chunk)
425
0
{
426
0
    wmem_block_free_t *free_chunk;
427
428
0
    free_chunk = WMEM_GET_FREE(chunk);
429
430
0
    if (free_chunk->prev == chunk && free_chunk->next == chunk) {
431
        /* Only one item in recycler, just empty it. */
432
0
        allocator->recycler_head = NULL;
433
0
    }
434
0
    else {
435
        /* Two or more items, usual doubly-linked-list removal. It's circular
436
         * so we don't need to worry about null-checking anything, which is
437
         * nice. */
438
0
        WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
439
0
        WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
440
0
        if (allocator->recycler_head == chunk) {
441
0
            allocator->recycler_head = free_chunk->next;
442
0
        }
443
0
    }
444
0
}
445
446
/* Pushes a chunk onto the master stack. */
447
static void
448
wmem_block_push_master(wmem_block_allocator_t *allocator,
449
                       wmem_block_chunk_t *chunk)
450
0
{
451
0
    wmem_block_free_t *free_chunk;
452
453
0
    free_chunk = WMEM_GET_FREE(chunk);
454
0
    free_chunk->prev = NULL;
455
0
    free_chunk->next = allocator->master_head;
456
0
    if (free_chunk->next) {
457
0
        WMEM_GET_FREE(free_chunk->next)->prev = chunk;
458
0
    }
459
0
    allocator->master_head = chunk;
460
0
}
461
462
/* Removes the top chunk from the master stack. */
463
static void
464
wmem_block_pop_master(wmem_block_allocator_t *allocator)
465
0
{
466
0
    wmem_block_chunk_t *chunk;
467
0
    wmem_block_free_t  *free_chunk;
468
469
0
    chunk = allocator->master_head;
470
471
0
    free_chunk = WMEM_GET_FREE(chunk);
472
473
0
    allocator->master_head = free_chunk->next;
474
0
    if (free_chunk->next) {
475
0
        WMEM_GET_FREE(free_chunk->next)->prev = NULL;
476
0
    }
477
0
}
478
479
/* CHUNK HELPERS */
480
481
/* Takes a free chunk and checks the chunks to its immediate right and left in
482
 * the block. If they are also free, the contiguous free chunks are merged into
483
 * a single free chunk. The resulting chunk ends up in either the master list or
484
 * the recycler, depending on where the merged chunks were originally.
485
 */
486
static void
487
wmem_block_merge_free(wmem_block_allocator_t *allocator,
488
                      wmem_block_chunk_t *chunk)
489
0
{
490
0
    wmem_block_chunk_t *tmp;
491
0
    wmem_block_chunk_t *left_free  = NULL;
492
0
    wmem_block_chunk_t *right_free = NULL;
493
494
    /* Check the chunk to our right. If it is free, merge it into our current
495
     * chunk. If it is big enough to hold a free-header, save it for later (we
496
     * need to know about the left chunk before we decide what goes where). */
497
0
    tmp = WMEM_CHUNK_NEXT(chunk);
498
0
    if (tmp && !tmp->used) {
499
0
        if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
500
0
            right_free = tmp;
501
0
        }
502
0
        chunk->len += tmp->len;
503
0
        chunk->last = tmp->last;
504
0
    }
505
506
    /* Check the chunk to our left. If it is free, merge our current chunk into
507
     * it (thus chunk = tmp). As before, save it if it has enough space to
508
     * hold a free-header. */
509
0
    tmp = WMEM_CHUNK_PREV(chunk);
510
0
    if (tmp && !tmp->used) {
511
0
        if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
512
0
            left_free = tmp;
513
0
        }
514
0
        tmp->len += chunk->len;
515
0
        tmp->last = chunk->last;
516
0
        chunk = tmp;
517
0
    }
518
519
    /* The length of our chunk may have changed. If we have a chunk following,
520
     * update its 'prev' count. */
521
0
    if (!chunk->last) {
522
0
        WMEM_CHUNK_NEXT(chunk)->prev = chunk->len;
523
0
    }
524
525
    /* Now that the chunk headers are merged and consistent, we need to figure
526
     * out what goes where in which free list. */
527
0
    if (right_free && right_free == allocator->master_head) {
528
        /* If we merged right, and that chunk was the head of the master list,
529
         * then we leave the resulting chunk at the head of the master list. */
530
0
        wmem_block_free_t *moved;
531
0
        if (left_free) {
532
0
            wmem_block_remove_from_recycler(allocator, left_free);
533
0
        }
534
0
        moved = WMEM_GET_FREE(chunk);
535
0
        moved->prev = NULL;
536
0
        moved->next = WMEM_GET_FREE(right_free)->next;
537
0
        allocator->master_head = chunk;
538
0
        if (moved->next) {
539
0
            WMEM_GET_FREE(moved->next)->prev = chunk;
540
0
        }
541
0
    }
542
0
    else {
543
        /* Otherwise, we remove the right-merged chunk (if there was one) from
544
         * the recycler. Then, if we merged left we have nothing to do, since
545
         * that recycler entry is still valid. If not, we add the chunk. */
546
0
        if (right_free) {
547
0
            wmem_block_remove_from_recycler(allocator, right_free);
548
0
        }
549
0
        if (!left_free) {
550
0
            wmem_block_add_to_recycler(allocator, chunk);
551
0
        }
552
0
    }
553
0
}
554
555
/* Takes an unused chunk and a size, and splits it into two chunks if possible.
556
 * The first chunk (at the same address as the input chunk) is guaranteed to
557
 * hold at least `size` bytes of data, and to not be in either the master or
558
 * recycler lists.
559
 *
560
 * The second chunk gets whatever data is left over. It is marked unused and
561
 * replaces the input chunk in whichever list it originally inhabited. */
562
static void
563
wmem_block_split_free_chunk(wmem_block_allocator_t *allocator,
564
                            wmem_block_chunk_t *chunk,
565
                            const size_t size)
566
0
{
567
0
    wmem_block_chunk_t *extra;
568
0
    wmem_block_free_t  *old_blk, *new_blk;
569
0
    size_t aligned_size, available;
570
0
    bool last;
571
572
0
    aligned_size = WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE;
573
574
0
    if (WMEM_CHUNK_DATA_LEN(chunk) < aligned_size + sizeof(wmem_block_free_t)) {
575
        /* If the available space is not enought to store all of
576
         * (hdr + requested size + alignment padding + hdr + free-header) then
577
         * just remove the current chunk from the free list and return, since we
578
         * can't usefully split it. */
579
0
        if (chunk == allocator->master_head) {
580
0
            wmem_block_pop_master(allocator);
581
0
        }
582
0
        else if (WMEM_CHUNK_DATA_LEN(chunk) >= sizeof(wmem_block_free_t)) {
583
0
            wmem_block_remove_from_recycler(allocator, chunk);
584
0
        }
585
0
        return;
586
0
    }
587
588
    /* preserve a few values from chunk that we'll need to manipulate */
589
0
    last      = chunk->last;
590
0
    available = chunk->len - aligned_size;
591
592
    /* set new values for chunk */
593
0
    chunk->len  = (uint32_t) aligned_size;
594
0
    chunk->last = false;
595
596
    /* with chunk's values set, we can use the standard macro to calculate
597
     * the location and size of the new free chunk */
598
0
    extra = WMEM_CHUNK_NEXT(chunk);
599
600
    /* Now we move the free chunk's address without changing its location
601
     * in whichever list it is in.
602
     *
603
     * Note that the new chunk header 'extra' may overlap the old free header,
604
     * so we have to copy the free header before we write anything to extra.
605
     */
606
0
    old_blk = WMEM_GET_FREE(chunk);
607
0
    new_blk = WMEM_GET_FREE(extra);
608
609
0
    if (allocator->master_head == chunk) {
610
0
        new_blk->prev = old_blk->prev;
611
0
        new_blk->next = old_blk->next;
612
613
0
        if (old_blk->next) {
614
0
            WMEM_GET_FREE(old_blk->next)->prev = extra;
615
0
        }
616
617
0
        allocator->master_head = extra;
618
0
    }
619
0
    else {
620
0
        if (old_blk->prev == chunk) {
621
0
            new_blk->prev = extra;
622
0
            new_blk->next = extra;
623
0
        }
624
0
        else {
625
0
            new_blk->prev = old_blk->prev;
626
0
            new_blk->next = old_blk->next;
627
628
0
            WMEM_GET_FREE(old_blk->prev)->next = extra;
629
0
            WMEM_GET_FREE(old_blk->next)->prev = extra;
630
0
        }
631
632
0
        if (allocator->recycler_head == chunk) {
633
0
            allocator->recycler_head = extra;
634
0
        }
635
0
    }
636
637
    /* Now that we've copied over the free-list stuff (which may have overlapped
638
     * with our new chunk header) we can safely write our new chunk header. */
639
0
    extra->len   = (uint32_t) available;
640
0
    extra->last  = last;
641
0
    extra->prev  = chunk->len;
642
0
    extra->used  = false;
643
0
    extra->jumbo = false;
644
645
    /* Correctly update the following chunk's back-pointer */
646
0
    if (!last) {
647
0
        WMEM_CHUNK_NEXT(extra)->prev = extra->len;
648
0
    }
649
0
}
650
651
/* Takes a used chunk and a size, and splits it into two chunks if possible.
652
 * The first chunk can hold at least `size` bytes of data, while the second gets
653
 * whatever's left over. The second is marked as unused and is added to the
654
 * recycler. */
655
static void
656
wmem_block_split_used_chunk(wmem_block_allocator_t *allocator,
657
                            wmem_block_chunk_t *chunk,
658
                            const size_t size)
659
0
{
660
0
    wmem_block_chunk_t *extra;
661
0
    size_t aligned_size, available;
662
0
    bool last;
663
664
0
    aligned_size = WMEM_ALIGN_SIZE(size) + WMEM_CHUNK_HEADER_SIZE;
665
666
0
    if (aligned_size > WMEM_CHUNK_DATA_LEN(chunk)) {
667
        /* in this case we don't have enough space to really split it, so
668
         * it's basically a no-op */
669
0
        return;
670
0
    }
671
    /* otherwise, we have room to split it, though the remaining free chunk
672
     * may still not be usefully large */
673
674
    /* preserve a few values from chunk that we'll need to manipulate */
675
0
    last      = chunk->last;
676
0
    available = chunk->len - aligned_size;
677
678
    /* set new values for chunk */
679
0
    chunk->len  = (uint32_t) aligned_size;
680
0
    chunk->last = false;
681
682
    /* with chunk's values set, we can use the standard macro to calculate
683
     * the location and size of the new free chunk */
684
0
    extra = WMEM_CHUNK_NEXT(chunk);
685
686
    /* set the new values for the chunk */
687
0
    extra->len   = (uint32_t) available;
688
0
    extra->last  = last;
689
0
    extra->prev  = chunk->len;
690
0
    extra->used  = false;
691
0
    extra->jumbo = false;
692
693
    /* Correctly update the following chunk's back-pointer */
694
0
    if (!last) {
695
0
        WMEM_CHUNK_NEXT(extra)->prev = extra->len;
696
0
    }
697
698
    /* Merge it to its right if possible (it can't be merged left, obviously).
699
     * This also adds it to the recycler. */
700
0
    wmem_block_merge_free(allocator, extra);
701
0
}
702
703
/* BLOCK HELPERS */
704
705
/* Add a block to the allocator's embedded doubly-linked list of OS-level blocks
706
 * that it owns. */
707
static void
708
wmem_block_add_to_block_list(wmem_block_allocator_t *allocator,
709
                             wmem_block_hdr_t *block)
710
0
{
711
0
    block->prev = NULL;
712
0
    block->next = allocator->block_list;
713
0
    if (block->next) {
714
0
        block->next->prev = block;
715
0
    }
716
0
    allocator->block_list = block;
717
0
}
718
719
/* Remove a block from the allocator's embedded doubly-linked list of OS-level
720
 * blocks that it owns. */
721
static void
722
wmem_block_remove_from_block_list(wmem_block_allocator_t *allocator,
723
                                  wmem_block_hdr_t *block)
724
0
{
725
0
    if (block->prev) {
726
0
        block->prev->next = block->next;
727
0
    }
728
0
    else {
729
0
        allocator->block_list = block->next;
730
0
    }
731
732
0
    if (block->next) {
733
0
        block->next->prev = block->prev;
734
0
    }
735
0
}
736
737
/* Initializes a single unused chunk at the beginning of the block, and
738
 * adds that chunk to the free list. */
739
static void
740
wmem_block_init_block(wmem_block_allocator_t *allocator,
741
                      wmem_block_hdr_t *block)
742
0
{
743
0
    wmem_block_chunk_t *chunk;
744
745
    /* a new block contains one chunk, right at the beginning */
746
0
    chunk = WMEM_BLOCK_TO_CHUNK(block);
747
748
0
    chunk->used  = false;
749
0
    chunk->jumbo = false;
750
0
    chunk->last  = true;
751
0
    chunk->prev  = 0;
752
0
    chunk->len   = WMEM_BLOCK_SIZE - WMEM_BLOCK_HEADER_SIZE;
753
754
    /* now push that chunk onto the master list */
755
0
    wmem_block_push_master(allocator, chunk);
756
0
}
757
758
/* Creates a new block, and initializes it. */
759
static void
760
wmem_block_new_block(wmem_block_allocator_t *allocator)
761
0
{
762
0
    wmem_block_hdr_t *block;
763
764
    /* allocate the new block and add it to the block list */
765
0
    block = (wmem_block_hdr_t *)wmem_alloc(NULL, WMEM_BLOCK_SIZE);
766
0
    wmem_block_add_to_block_list(allocator, block);
767
768
    /* initialize it */
769
0
    wmem_block_init_block(allocator, block);
770
0
}
771
772
/* JUMBO ALLOCATIONS */
773
774
/* Allocates special 'jumbo' blocks for sizes that won't fit normally. */
775
static void *
776
wmem_block_alloc_jumbo(wmem_block_allocator_t *allocator, const size_t size)
777
0
{
778
0
    wmem_block_hdr_t   *block;
779
0
    wmem_block_chunk_t *chunk;
780
781
    /* allocate a new block of exactly the right size */
782
0
    block = (wmem_block_hdr_t *) wmem_alloc(NULL, size
783
0
            + WMEM_BLOCK_HEADER_SIZE
784
0
            + WMEM_CHUNK_HEADER_SIZE);
785
786
    /* add it to the block list */
787
0
    wmem_block_add_to_block_list(allocator, block);
788
789
    /* the new block contains a single jumbo chunk */
790
0
    chunk = WMEM_BLOCK_TO_CHUNK(block);
791
0
    chunk->last  = true;
792
0
    chunk->used  = true;
793
0
    chunk->jumbo = true;
794
0
    chunk->len   = 0;
795
0
    chunk->prev  = 0;
796
797
    /* and return the data pointer */
798
0
    return WMEM_CHUNK_TO_DATA(chunk);
799
0
}
800
801
/* Frees special 'jumbo' blocks of sizes that won't fit normally. */
802
static void
803
wmem_block_free_jumbo(wmem_block_allocator_t *allocator,
804
                      wmem_block_chunk_t *chunk)
805
0
{
806
0
    wmem_block_hdr_t *block;
807
808
0
    block = WMEM_CHUNK_TO_BLOCK(chunk);
809
810
0
    wmem_block_remove_from_block_list(allocator, block);
811
812
0
    wmem_free(NULL, block);
813
0
}
814
815
/* Reallocs special 'jumbo' blocks of sizes that won't fit normally. */
816
static void *
817
wmem_block_realloc_jumbo(wmem_block_allocator_t *allocator,
818
                         wmem_block_chunk_t *chunk,
819
                         const size_t size)
820
0
{
821
0
    wmem_block_hdr_t *block;
822
823
0
    block = WMEM_CHUNK_TO_BLOCK(chunk);
824
825
0
    block = (wmem_block_hdr_t *) wmem_realloc(NULL, block, size
826
0
            + WMEM_BLOCK_HEADER_SIZE
827
0
            + WMEM_CHUNK_HEADER_SIZE);
828
829
0
    if (block->next) {
830
0
        block->next->prev = block;
831
0
    }
832
833
0
    if (block->prev) {
834
0
        block->prev->next = block;
835
0
    }
836
0
    else {
837
0
        allocator->block_list = block;
838
0
    }
839
840
0
    return WMEM_CHUNK_TO_DATA(WMEM_BLOCK_TO_CHUNK(block));
841
0
}
842
843
/* API */
844
845
static void *
846
wmem_block_alloc(void *private_data, const size_t size)
847
0
{
848
0
    wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
849
0
    wmem_block_chunk_t     *chunk;
850
851
0
    if (size > WMEM_BLOCK_MAX_ALLOC_SIZE) {
852
0
        return wmem_block_alloc_jumbo(allocator, size);
853
0
    }
854
855
0
    if (allocator->recycler_head &&
856
0
            WMEM_CHUNK_DATA_LEN(allocator->recycler_head) >= size) {
857
858
        /* If we can serve it from the recycler, do so. */
859
0
        chunk = allocator->recycler_head;
860
0
    }
861
0
    else {
862
0
        if (allocator->master_head &&
863
0
                WMEM_CHUNK_DATA_LEN(allocator->master_head) < size) {
864
865
            /* Recycle the head of the master list if necessary. */
866
0
            chunk = allocator->master_head;
867
0
            wmem_block_pop_master(allocator);
868
0
            wmem_block_add_to_recycler(allocator, chunk);
869
0
        }
870
871
0
        if (!allocator->master_head) {
872
            /* Allocate a new block if necessary. */
873
0
            wmem_block_new_block(allocator);
874
0
        }
875
876
0
        chunk = allocator->master_head;
877
0
    }
878
879
    /* Split our chunk into two to preserve any trailing free space */
880
0
    wmem_block_split_free_chunk(allocator, chunk, size);
881
882
    /* Now cycle the recycler */
883
0
    wmem_block_cycle_recycler(allocator);
884
885
    /* mark it as used */
886
0
    chunk->used = true;
887
888
    /* and return the user's pointer */
889
0
    return WMEM_CHUNK_TO_DATA(chunk);
890
0
}
891
892
static void
893
wmem_block_free(void *private_data, void *ptr)
894
0
{
895
0
    wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
896
0
    wmem_block_chunk_t     *chunk;
897
898
0
    chunk = WMEM_DATA_TO_CHUNK(ptr);
899
900
0
    if (chunk->jumbo) {
901
0
        wmem_block_free_jumbo(allocator, chunk);
902
0
        return;
903
0
    }
904
905
    /* mark it as unused */
906
0
    chunk->used = false;
907
908
    /* merge it with any other free chunks adjacent to it, so that contiguous
909
     * free space doesn't get fragmented */
910
0
    wmem_block_merge_free(allocator, chunk);
911
912
    /* Now cycle the recycler */
913
0
    wmem_block_cycle_recycler(allocator);
914
0
}
915
916
static void *
917
wmem_block_realloc(void *private_data, void *ptr, const size_t size)
918
0
{
919
0
    wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
920
0
    wmem_block_chunk_t     *chunk;
921
922
0
    chunk = WMEM_DATA_TO_CHUNK(ptr);
923
924
0
    if (chunk->jumbo) {
925
0
        return wmem_block_realloc_jumbo(allocator, chunk, size);
926
0
    }
927
928
0
    if (size > WMEM_CHUNK_DATA_LEN(chunk)) {
929
        /* grow */
930
0
        wmem_block_chunk_t *tmp;
931
932
0
        tmp = WMEM_CHUNK_NEXT(chunk);
933
934
0
        if (tmp && (!tmp->used) &&
935
0
            (size < WMEM_CHUNK_DATA_LEN(chunk) + tmp->len)) {
936
            /* the next chunk is free and has enough extra, so just grab
937
             * from that */
938
0
            size_t split_size;
939
940
            /* we ask for the next chunk to be split, but we don't end up
941
             * using the split chunk header (it just gets merged into this one),
942
             * so we want the split to be of (size - curdatalen - header_size).
943
             * However, this can underflow by header_size, so we do a quick
944
             * check here and floor the value to 0. */
945
0
            split_size = size - WMEM_CHUNK_DATA_LEN(chunk);
946
947
0
            if (split_size < WMEM_CHUNK_HEADER_SIZE) {
948
0
                split_size = 0;
949
0
            }
950
0
            else {
951
0
                split_size -= WMEM_CHUNK_HEADER_SIZE;
952
0
            }
953
954
0
            wmem_block_split_free_chunk(allocator, tmp, split_size);
955
956
            /* Now do a 'quickie' merge between the current block and the left-
957
             * hand side of the split. Simply calling wmem_block_merge_free
958
             * might confuse things, since we may temporarily have two blocks
959
             * to our right that are both free (and it isn't guaranteed to
960
             * handle that case). Update our 'next' count and last flag, and
961
             * our (new) successor's 'prev' count */
962
0
            chunk->len += tmp->len;
963
0
            chunk->last = tmp->last;
964
0
            tmp = WMEM_CHUNK_NEXT(chunk);
965
0
            if (tmp) {
966
0
                tmp->prev = chunk->len;
967
0
            }
968
969
            /* Now cycle the recycler */
970
0
            wmem_block_cycle_recycler(allocator);
971
972
            /* And return the same old pointer */
973
0
            return ptr;
974
0
        }
975
0
        else {
976
            /* no room to grow, need to alloc, copy, free */
977
0
            void *newptr;
978
979
0
            newptr = wmem_block_alloc(private_data, size);
980
0
            memcpy(newptr, ptr, WMEM_CHUNK_DATA_LEN(chunk));
981
0
            wmem_block_free(private_data, ptr);
982
983
            /* No need to cycle the recycler, alloc and free both did that
984
             * already */
985
986
0
            return newptr;
987
0
        }
988
0
    }
989
0
    else if (size < WMEM_CHUNK_DATA_LEN(chunk)) {
990
        /* shrink */
991
0
        wmem_block_split_used_chunk(allocator, chunk, size);
992
993
        /* Now cycle the recycler */
994
0
        wmem_block_cycle_recycler(allocator);
995
996
0
        return ptr;
997
0
    }
998
999
    /* no-op */
1000
0
    return ptr;
1001
0
}
1002
1003
static void
1004
wmem_block_free_all(void *private_data)
1005
0
{
1006
0
    wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
1007
0
    wmem_block_hdr_t       *cur;
1008
0
    wmem_block_chunk_t     *chunk;
1009
1010
    /* the existing free lists are entirely irrelevant */
1011
0
    allocator->master_head   = NULL;
1012
0
    allocator->recycler_head = NULL;
1013
1014
    /* iterate through the blocks, reinitializing each one */
1015
0
    cur = allocator->block_list;
1016
1017
0
    while (cur) {
1018
0
        chunk = WMEM_BLOCK_TO_CHUNK(cur);
1019
0
        if (chunk->jumbo) {
1020
0
            wmem_block_remove_from_block_list(allocator, cur);
1021
0
            cur = cur->next;
1022
0
            wmem_free(NULL, WMEM_CHUNK_TO_BLOCK(chunk));
1023
0
        }
1024
0
        else {
1025
0
            wmem_block_init_block(allocator, cur);
1026
0
            cur = cur->next;
1027
0
        }
1028
0
    }
1029
0
}
1030
1031
static void
1032
wmem_block_gc(void *private_data)
1033
0
{
1034
0
    wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
1035
0
    wmem_block_hdr_t   *cur, *next;
1036
0
    wmem_block_chunk_t *chunk;
1037
0
    wmem_block_free_t  *free_chunk;
1038
1039
    /* Walk through the blocks, adding used blocks to the new list and
1040
     * completely destroying unused blocks. */
1041
0
    cur = allocator->block_list;
1042
0
    allocator->block_list = NULL;
1043
1044
0
    while (cur) {
1045
0
        chunk = WMEM_BLOCK_TO_CHUNK(cur);
1046
0
        next  = cur->next;
1047
1048
0
        if (!chunk->jumbo && !chunk->used && chunk->last) {
1049
            /* If the first chunk is also the last, and is unused, then
1050
             * the block as a whole is entirely unused, so return it to
1051
             * the OS and remove it from whatever lists it is in. */
1052
0
            free_chunk = WMEM_GET_FREE(chunk);
1053
0
            if (free_chunk->next) {
1054
0
                WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
1055
0
            }
1056
0
            if (free_chunk->prev) {
1057
0
                WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
1058
0
            }
1059
0
            if (allocator->recycler_head == chunk) {
1060
0
                if (free_chunk->next == chunk) {
1061
0
                    allocator->recycler_head = NULL;
1062
0
                }
1063
0
                else {
1064
0
                    allocator->recycler_head = free_chunk->next;
1065
0
                }
1066
0
            }
1067
0
            else if (allocator->master_head == chunk) {
1068
0
                allocator->master_head = free_chunk->next;
1069
0
            }
1070
0
            wmem_free(NULL, cur);
1071
0
        }
1072
0
        else {
1073
            /* part of this block is used, so add it to the new block list */
1074
0
            wmem_block_add_to_block_list(allocator, cur);
1075
0
        }
1076
1077
0
        cur = next;
1078
0
    }
1079
0
}
1080
1081
static void
1082
wmem_block_allocator_cleanup(void *private_data)
1083
0
{
1084
    /* wmem guarantees that free_all() is called directly before this, so
1085
     * calling gc will return all our blocks to the OS automatically */
1086
0
    wmem_block_gc(private_data);
1087
1088
    /* then just free the allocator structs */
1089
0
    wmem_free(NULL, private_data);
1090
0
}
1091
1092
void
1093
wmem_block_allocator_init(wmem_allocator_t *allocator)
1094
0
{
1095
0
    wmem_block_allocator_t *block_allocator;
1096
1097
0
    block_allocator = wmem_new(NULL, wmem_block_allocator_t);
1098
1099
0
    allocator->walloc   = &wmem_block_alloc;
1100
0
    allocator->wrealloc = &wmem_block_realloc;
1101
0
    allocator->wfree    = &wmem_block_free;
1102
1103
0
    allocator->free_all = &wmem_block_free_all;
1104
0
    allocator->gc       = &wmem_block_gc;
1105
0
    allocator->cleanup  = &wmem_block_allocator_cleanup;
1106
1107
0
    allocator->private_data = (void*) block_allocator;
1108
1109
0
    block_allocator->block_list    = NULL;
1110
0
    block_allocator->master_head   = NULL;
1111
0
    block_allocator->recycler_head = NULL;
1112
0
}
1113
1114
/*
1115
 * Editor modelines  -  https://www.wireshark.org/tools/modelines.html
1116
 *
1117
 * Local variables:
1118
 * c-basic-offset: 4
1119
 * tab-width: 8
1120
 * indent-tabs-mode: nil
1121
 * End:
1122
 *
1123
 * vi: set shiftwidth=4 tabstop=8 expandtab:
1124
 * :indentSize=4:tabSize=8:noTabs=true:
1125
 */