Coverage Report

Created: 2025-11-11 06:57

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/unit/src/nxt_mp.c
Line
Count
Source
1
2
/*
3
 * Copyright (C) Igor Sysoev
4
 * Copyright (C) NGINX, Inc.
5
 */
6
7
#include <nxt_main.h>
8
9
10
/*
11
 * A memory pool allocates memory in clusters of specified size and aligned
12
 * to page_alignment.  A cluster is divided on pages of specified size.  Page
13
 * size must be a power of 2.  A page can be used entirely or can be divided
14
 * on chunks of equal size.  Chunk size must be a power of 2.  Non-freeable
15
 * memory is also allocated from pages.  A cluster can contains a mix of pages
16
 * with different chunk sizes and non-freeable pages.  Cluster size must be
17
 * a multiple of page size and may be not a power of 2.  Allocations greater
18
 * than page are allocated outside clusters.  Start addresses and sizes of
19
 * the clusters and large allocations are stored in rbtree blocks to find
20
 * them on free operations.  The rbtree nodes are sorted by start addresses.
21
 * The rbtree is also used to destroy memory pool.
22
 */
23
24
25
typedef struct {
26
    /*
27
     * Used to link
28
     *  *) pages with free chunks in pool chunk pages lists,
29
     *  *) pages with free space for non-freeable allocations,
30
     *  *) free pages in clusters.
31
     */
32
    nxt_queue_link_t     link;
33
34
    union {
35
        /* Chunk bitmap.  There can be no more than 32 chunks in a page. */
36
        uint32_t         map;
37
38
        /* Size of taken non-freeable space. */
39
        uint32_t         taken;
40
    } u;
41
42
    /*
43
     * Size of chunks or page shifted by pool->chunk_size_shift.  Zero means
44
     * that page is free, 0xFF means page with non-freeable allocations.
45
     */
46
    uint8_t              size;
47
48
    /* Number of free chunks of a chunked page. */
49
    uint8_t              chunks;
50
51
    /*
52
     * Number of allocation fails due to free space insufficiency
53
     * in non-freeable page.
54
     */
55
    uint8_t              fails;
56
57
    /*
58
     * Page number in page cluster.
59
     * There can be no more than 256 pages in a cluster.
60
     */
61
    uint8_t              number;
62
} nxt_mp_page_t;
63
64
65
/*
66
 * Some malloc implementations (e.g. jemalloc) allocates large enough
67
 * blocks (e.g. greater than 4K) with 4K alignment.  So if a block
68
 * descriptor will be allocated together with the block it will take
69
 * excessive 4K memory.  So it is better to allocate the block descriptor
70
 * apart.
71
 */
72
73
typedef enum {
74
    /* Block of cluster.  The block is allocated apart of the cluster. */
75
    NXT_MP_CLUSTER_BLOCK = 0,
76
    /*
77
     * Block of large allocation.
78
     * The block is allocated apart of the allocation.
79
     */
80
    NXT_MP_DISCRETE_BLOCK,
81
    /*
82
     * Block of large allocation.
83
     * The block is allocated just after of the allocation.
84
     */
85
    NXT_MP_EMBEDDED_BLOCK,
86
} nxt_mp_block_type_t;
87
88
89
typedef struct {
90
    NXT_RBTREE_NODE      (node);
91
    nxt_mp_block_type_t  type:8;
92
    uint8_t              freeable;
93
94
    /* Block size must be less than 4G. */
95
    uint32_t             size;
96
97
    u_char               *start;
98
    nxt_mp_page_t        pages[];
99
} nxt_mp_block_t;
100
101
102
struct nxt_mp_s {
103
    /* rbtree of nxt_mp_block_t. */
104
    nxt_rbtree_t         blocks;
105
106
    uint8_t              chunk_size_shift;
107
    uint8_t              page_size_shift;
108
    uint32_t             page_size;
109
    uint32_t             page_alignment;
110
    uint32_t             cluster_size;
111
    uint32_t             retain;
112
113
#if (NXT_DEBUG)
114
    nxt_pid_t            pid;
115
    nxt_tid_t            tid;
116
#endif
117
118
    nxt_work_t           *cleanup;
119
120
    /* Lists of nxt_mp_page_t. */
121
    nxt_queue_t          free_pages;
122
    nxt_queue_t          nget_pages;
123
    nxt_queue_t          get_pages;
124
    nxt_queue_t          chunk_pages[];
125
};
126
127
128
#define nxt_mp_chunk_get_free(map)                                            \
129
0
    (__builtin_ffs(map) - 1)
130
131
132
#define nxt_mp_chunk_is_free(map, chunk)                                      \
133
    ((map & (1 << chunk)) != 0)
134
135
136
#define nxt_mp_chunk_set_busy(map, chunk)                                     \
137
0
    map &= ~(1 << chunk)
138
139
140
#define nxt_mp_chunk_set_free(map, chunk)                                     \
141
0
    map |= (1 << chunk)
142
143
144
#define nxt_mp_free_junk(p, size)                                             \
145
0
    memset((p), 0x5A, size)
146
147
148
#if !(NXT_DEBUG_MEMORY)
149
static void *nxt_mp_alloc_small(nxt_mp_t *mp, size_t size);
150
static void *nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size);
151
static nxt_mp_page_t *nxt_mp_alloc_page(nxt_mp_t *mp);
152
static nxt_mp_block_t *nxt_mp_alloc_cluster(nxt_mp_t *mp);
153
#endif
154
static void *nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size,
155
    nxt_bool_t freeable);
156
static intptr_t nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1,
157
    nxt_rbtree_node_t *node2);
158
static nxt_mp_block_t *nxt_mp_find_block(nxt_rbtree_t *tree, const u_char *p);
159
static const char *nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster,
160
    u_char *p);
161
162
163
#if (NXT_HAVE_BUILTIN_CLZ)
164
165
#define nxt_lg2(value)                                                        \
166
0
    (31 - __builtin_clz(value))
167
168
#else
169
170
static const int nxt_lg2_tab64[64] = {
171
    63,  0, 58,  1, 59, 47, 53,  2,
172
    60, 39, 48, 27, 54, 33, 42,  3,
173
    61, 51, 37, 40, 49, 18, 28, 20,
174
    55, 30, 34, 11, 43, 14, 22,  4,
175
    62, 57, 46, 52, 38, 26, 32, 41,
176
    50, 36, 17, 19, 29, 10, 13, 21,
177
    56, 45, 25, 31, 35, 16,  9, 12,
178
    44, 24, 15,  8, 23,  7,  6,  5
179
};
180
181
static const uint64_t nxt_lg2_magic = 0x07EDD5E59A4E28C2ULL;
182
183
static int
184
nxt_lg2(uint64_t v)
185
{
186
    v |= v >> 1;
187
    v |= v >> 2;
188
    v |= v >> 4;
189
    v |= v >> 8;
190
    v |= v >> 16;
191
    v |= v >> 32;
192
    return nxt_lg2_tab64[ ((v - (v >> 1)) * nxt_lg2_magic) >> 58 ];
193
}
194
195
#endif
196
197
198
#if (NXT_DEBUG)
199
200
nxt_inline void
201
nxt_mp_thread_assert(nxt_mp_t *mp)
202
{
203
    nxt_tid_t     tid;
204
    nxt_thread_t  *thread;
205
206
    thread = nxt_thread();
207
    tid = nxt_thread_tid(thread);
208
209
    if (nxt_fast_path(mp->tid == tid)) {
210
        return;
211
    }
212
213
    if (nxt_slow_path(nxt_pid != mp->pid)) {
214
        mp->pid = nxt_pid;
215
        mp->tid = tid;
216
217
        return;
218
    }
219
220
    nxt_log_alert(thread->log, "mem_pool locked by thread %PT", mp->tid);
221
    nxt_abort();
222
}
223
224
#else
225
226
#define nxt_mp_thread_assert(mp)
227
228
#endif
229
230
231
void
232
nxt_mp_thread_adopt(nxt_mp_t *mp)
233
0
{
234
#if (NXT_DEBUG)
235
    mp->pid = nxt_pid;
236
    mp->tid = nxt_thread_tid(nxt_thread());
237
#endif
238
0
}
239
240
241
nxt_mp_t *
242
nxt_mp_create(size_t cluster_size, size_t page_alignment, size_t page_size,
243
    size_t min_chunk_size)
244
0
{
245
0
    nxt_mp_t     *mp;
246
0
    uint32_t     pages, chunk_size_shift, page_size_shift;
247
0
    nxt_queue_t  *chunk_pages;
248
249
0
    chunk_size_shift = nxt_lg2(min_chunk_size);
250
0
    page_size_shift = nxt_lg2(page_size);
251
252
0
    pages = page_size_shift - chunk_size_shift;
253
254
0
    mp = nxt_zalloc(sizeof(nxt_mp_t) + pages * sizeof(nxt_queue_t));
255
256
0
    if (nxt_fast_path(mp != NULL)) {
257
0
        mp->retain = 1;
258
0
        mp->chunk_size_shift = chunk_size_shift;
259
0
        mp->page_size_shift = page_size_shift;
260
0
        mp->page_size = page_size;
261
0
        mp->page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
262
0
        mp->cluster_size = cluster_size;
263
264
0
        chunk_pages = mp->chunk_pages;
265
266
0
        while (pages != 0) {
267
0
            nxt_queue_init(chunk_pages);
268
0
            chunk_pages++;
269
0
            pages--;
270
0
        }
271
272
0
        nxt_queue_init(&mp->free_pages);
273
0
        nxt_queue_init(&mp->nget_pages);
274
0
        nxt_queue_init(&mp->get_pages);
275
276
0
        nxt_rbtree_init(&mp->blocks, nxt_mp_rbtree_compare);
277
0
    }
278
279
0
    nxt_debug_alloc("mp %p create(%uz, %uz, %uz, %uz)", mp, cluster_size,
280
0
                    page_alignment, page_size, min_chunk_size);
281
282
0
    return mp;
283
0
}
284
285
286
void
287
nxt_mp_retain(nxt_mp_t *mp)
288
0
{
289
0
    mp->retain++;
290
291
0
    nxt_thread_log_debug("mp %p retain: %uD", mp, mp->retain);
292
0
}
293
294
295
void
296
nxt_mp_release(nxt_mp_t *mp)
297
0
{
298
0
    mp->retain--;
299
300
0
    nxt_thread_log_debug("mp %p release: %uD", mp, mp->retain);
301
302
0
    if (mp->retain == 0) {
303
0
        nxt_mp_destroy(mp);
304
0
    }
305
0
}
306
307
308
void
309
nxt_mp_destroy(nxt_mp_t *mp)
310
0
{
311
0
    void               *p;
312
0
    nxt_work_t         *work, *next_work;
313
0
    nxt_mp_block_t     *block;
314
0
    nxt_rbtree_node_t  *node, *next;
315
316
0
    nxt_debug_alloc("mp %p destroy", mp);
317
318
0
    nxt_mp_thread_assert(mp);
319
320
0
    while (mp->cleanup != NULL) {
321
0
        work = mp->cleanup;
322
0
        next_work = work->next;
323
324
0
        work->handler(work->task, work->obj, work->data);
325
326
0
        mp->cleanup = next_work;
327
0
    }
328
329
0
    next = nxt_rbtree_root(&mp->blocks);
330
331
0
    while (next != nxt_rbtree_sentinel(&mp->blocks)) {
332
333
0
        node = nxt_rbtree_destroy_next(&mp->blocks, &next);
334
0
        block = (nxt_mp_block_t *) node;
335
336
0
        p = block->start;
337
338
0
        if (block->type != NXT_MP_EMBEDDED_BLOCK) {
339
0
            nxt_free(block);
340
0
        }
341
342
0
        nxt_free(p);
343
0
    }
344
345
0
    nxt_free(mp);
346
0
}
347
348
349
nxt_bool_t
350
nxt_mp_test_sizes(size_t cluster_size, size_t page_alignment, size_t page_size,
351
    size_t min_chunk_size)
352
0
{
353
0
    nxt_bool_t  valid;
354
355
    /* Alignment and sizes must be a power of 2. */
356
357
0
    valid = nxt_expect(1, (nxt_is_power_of_two(page_alignment)
358
0
                           && nxt_is_power_of_two(page_size)
359
0
                           && nxt_is_power_of_two(min_chunk_size)));
360
0
    if (!valid) {
361
0
        return 0;
362
0
    }
363
364
0
    page_alignment = nxt_max(page_alignment, NXT_MAX_ALIGNMENT);
365
366
0
    valid = nxt_expect(1, (page_size >= 64
367
0
                           && page_size >= page_alignment
368
0
                           && page_size >= min_chunk_size
369
0
                           && min_chunk_size * 32 >= page_size
370
0
                           && cluster_size >= page_size
371
0
                           && cluster_size / page_size <= 256
372
0
                           && cluster_size % page_size == 0));
373
0
    if (!valid) {
374
0
        return 0;
375
0
    }
376
377
0
    return 1;
378
0
}
379
380
381
nxt_bool_t
382
nxt_mp_is_empty(nxt_mp_t *mp)
383
0
{
384
0
    return (nxt_rbtree_is_empty(&mp->blocks)
385
0
            && nxt_queue_is_empty(&mp->free_pages));
386
0
}
387
388
389
void *
390
nxt_mp_alloc(nxt_mp_t *mp, size_t size)
391
0
{
392
0
    void  *p;
393
394
0
#if !(NXT_DEBUG_MEMORY)
395
396
0
    if (size <= mp->page_size) {
397
0
        p = nxt_mp_alloc_small(mp, size);
398
399
0
    } else {
400
0
        p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 1);
401
0
    }
402
403
#else
404
405
    p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 1);
406
407
#endif
408
409
0
    nxt_debug_alloc("mp %p alloc(%uz): %p", mp, size, p);
410
411
0
    return p;
412
0
}
413
414
415
void *
416
nxt_mp_zalloc(nxt_mp_t *mp, size_t size)
417
0
{
418
0
    void  *p;
419
420
0
    p = nxt_mp_alloc(mp, size);
421
422
0
    if (nxt_fast_path(p != NULL)) {
423
0
        memset(p, 0, size);
424
0
    }
425
426
0
    return p;
427
0
}
428
429
430
void *
431
nxt_mp_align(nxt_mp_t *mp, size_t alignment, size_t size)
432
0
{
433
0
    void    *p;
434
435
    /* Alignment must be a power of 2. */
436
437
0
    if (nxt_fast_path(nxt_is_power_of_two(alignment))) {
438
439
0
#if !(NXT_DEBUG_MEMORY)
440
441
0
        size_t  aligned_size;
442
443
0
        aligned_size = nxt_max(size, alignment);
444
445
0
        if (aligned_size <= mp->page_size && alignment <= mp->page_alignment) {
446
0
            p = nxt_mp_alloc_small(mp, aligned_size);
447
448
0
        } else {
449
0
            p = nxt_mp_alloc_large(mp, alignment, size, 1);
450
0
        }
451
452
#else
453
454
        p = nxt_mp_alloc_large(mp, alignment, size, 1);
455
456
#endif
457
458
0
    } else {
459
0
        p = NULL;
460
0
    }
461
462
0
    nxt_debug_alloc("mp %p align(@%uz:%uz): %p", mp, alignment, size, p);
463
464
0
    return p;
465
0
}
466
467
468
void *
469
nxt_mp_zalign(nxt_mp_t *mp, size_t alignment, size_t size)
470
0
{
471
0
    void  *p;
472
473
0
    p = nxt_mp_align(mp, alignment, size);
474
475
0
    if (nxt_fast_path(p != NULL)) {
476
0
        memset(p, 0, size);
477
0
    }
478
479
0
    return p;
480
0
}
481
482
483
nxt_inline nxt_uint_t
484
nxt_mp_chunk_pages_index(nxt_mp_t *mp, size_t size)
485
0
{
486
0
    nxt_int_t  n, index;
487
488
0
    index = 0;
489
490
0
    if (size > 1) {
491
0
        n = nxt_lg2(size - 1) + 1 - mp->chunk_size_shift;
492
493
0
        if (n > 0) {
494
0
            index = n;
495
0
        }
496
0
    }
497
498
0
    return index;
499
0
}
500
501
502
#if !(NXT_DEBUG_MEMORY)
503
504
nxt_inline u_char *
505
nxt_mp_page_addr(nxt_mp_t *mp, nxt_mp_page_t *page)
506
0
{
507
0
    size_t          page_offset;
508
0
    nxt_mp_block_t  *block;
509
510
0
    page_offset = page->number * sizeof(nxt_mp_page_t)
511
0
                  + offsetof(nxt_mp_block_t, pages);
512
513
0
    block = (nxt_mp_block_t *) ((u_char *) page - page_offset);
514
515
0
    return block->start + (page->number << mp->page_size_shift);
516
0
}
517
518
519
static void *
520
nxt_mp_alloc_small(nxt_mp_t *mp, size_t size)
521
0
{
522
0
    u_char            *p;
523
0
    nxt_uint_t        n, index;
524
0
    nxt_queue_t       *chunk_pages;
525
0
    nxt_mp_page_t     *page;
526
0
    nxt_queue_link_t  *link;
527
528
0
    nxt_mp_thread_assert(mp);
529
530
0
    p = NULL;
531
532
0
    if (size <= mp->page_size / 2) {
533
534
0
        index = nxt_mp_chunk_pages_index(mp, size);
535
0
        chunk_pages = &mp->chunk_pages[index];
536
537
0
        if (nxt_fast_path(!nxt_queue_is_empty(chunk_pages))) {
538
539
0
            link = nxt_queue_first(chunk_pages);
540
0
            page = nxt_queue_link_data(link, nxt_mp_page_t, link);
541
542
0
            p = nxt_mp_page_addr(mp, page);
543
544
0
            n = nxt_mp_chunk_get_free(page->u.map);
545
0
            nxt_mp_chunk_set_busy(page->u.map, n);
546
547
0
            p += ((n << index) << mp->chunk_size_shift);
548
549
0
            page->chunks--;
550
551
0
            if (page->chunks == 0) {
552
                /*
553
                 * Remove full page from the pool chunk pages list
554
                 * of pages with free chunks.
555
                 */
556
0
                nxt_queue_remove(&page->link);
557
0
            }
558
559
0
        } else {
560
0
            page = nxt_mp_alloc_page(mp);
561
562
0
            if (nxt_fast_path(page != NULL)) {
563
0
                page->size = (1 << index);
564
565
0
                n = mp->page_size_shift - (index + mp->chunk_size_shift);
566
0
                page->chunks = (1 << n) - 1;
567
568
0
                nxt_queue_insert_head(chunk_pages, &page->link);
569
570
                /* Mark the first chunk as busy. */
571
0
                page->u.map = 0xFFFFFFFE;
572
573
0
                p = nxt_mp_page_addr(mp, page);
574
0
            }
575
0
        }
576
577
0
    } else {
578
0
        page = nxt_mp_alloc_page(mp);
579
580
0
        if (nxt_fast_path(page != NULL)) {
581
0
            page->size = mp->page_size >> mp->chunk_size_shift;
582
583
0
            p = nxt_mp_page_addr(mp, page);
584
0
        }
585
0
    }
586
587
0
    nxt_debug_alloc("mp %p chunk:%uz alloc: %p", mp,
588
0
                    page->size << mp->chunk_size_shift, p);
589
590
0
    return p;
591
0
}
592
593
594
static void *
595
nxt_mp_get_small(nxt_mp_t *mp, nxt_queue_t *pages, size_t size)
596
0
{
597
0
    u_char            *p;
598
0
    uint32_t          available;
599
0
    nxt_mp_page_t     *page;
600
0
    nxt_queue_link_t  *link, *next;
601
602
0
    nxt_mp_thread_assert(mp);
603
604
0
    for (link = nxt_queue_first(pages);
605
0
         link != nxt_queue_tail(pages);
606
0
         link = next)
607
0
    {
608
0
        next = nxt_queue_next(link);
609
0
        page = nxt_queue_link_data(link, nxt_mp_page_t, link);
610
611
0
        available = mp->page_size - page->u.taken;
612
613
0
        if (size <= available) {
614
0
            goto found;
615
0
        }
616
617
0
        if (available == 0 || page->fails++ > 100) {
618
0
            nxt_queue_remove(link);
619
0
        }
620
0
    }
621
622
0
    page = nxt_mp_alloc_page(mp);
623
624
0
    if (nxt_slow_path(page == NULL)) {
625
0
        return page;
626
0
    }
627
628
0
    nxt_queue_insert_head(pages, &page->link);
629
630
0
    page->size = 0xFF;
631
0
    page->u.taken = 0;
632
633
0
found:
634
635
0
    p = nxt_mp_page_addr(mp, page);
636
637
0
    p += page->u.taken;
638
0
    page->u.taken += size;
639
640
0
    return p;
641
0
}
642
643
644
static nxt_mp_page_t *
645
nxt_mp_alloc_page(nxt_mp_t *mp)
646
0
{
647
0
    nxt_mp_page_t     *page;
648
0
    nxt_mp_block_t    *cluster;
649
0
    nxt_queue_link_t  *link;
650
651
0
    if (nxt_queue_is_empty(&mp->free_pages)) {
652
0
        cluster = nxt_mp_alloc_cluster(mp);
653
0
        if (nxt_slow_path(cluster == NULL)) {
654
0
            return NULL;
655
0
        }
656
0
    }
657
658
0
    link = nxt_queue_first(&mp->free_pages);
659
0
    nxt_queue_remove(link);
660
661
0
    page = nxt_queue_link_data(link, nxt_mp_page_t, link);
662
663
0
    return page;
664
0
}
665
666
667
static nxt_mp_block_t *
668
nxt_mp_alloc_cluster(nxt_mp_t *mp)
669
0
{
670
0
    nxt_uint_t      n;
671
0
    nxt_mp_block_t  *cluster;
672
673
0
    n = mp->cluster_size >> mp->page_size_shift;
674
675
0
    cluster = nxt_zalloc(sizeof(nxt_mp_block_t) + n * sizeof(nxt_mp_page_t));
676
677
0
    if (nxt_slow_path(cluster == NULL)) {
678
0
        return NULL;
679
0
    }
680
681
    /* NXT_MP_CLUSTER_BLOCK type is zero. */
682
683
0
    cluster->size = mp->cluster_size;
684
685
0
    cluster->start = nxt_memalign(mp->page_alignment, mp->cluster_size);
686
0
    if (nxt_slow_path(cluster->start == NULL)) {
687
0
        nxt_free(cluster);
688
0
        return NULL;
689
0
    }
690
691
0
    n--;
692
0
    cluster->pages[n].number = n;
693
0
    nxt_queue_insert_head(&mp->free_pages, &cluster->pages[n].link);
694
695
0
    while (n != 0) {
696
0
        n--;
697
0
        cluster->pages[n].number = n;
698
0
        nxt_queue_insert_before(&cluster->pages[n + 1].link,
699
0
                                &cluster->pages[n].link);
700
0
    }
701
702
0
    nxt_rbtree_insert(&mp->blocks, &cluster->node);
703
704
0
    return cluster;
705
0
}
706
707
#endif
708
709
710
static void *
711
nxt_mp_alloc_large(nxt_mp_t *mp, size_t alignment, size_t size,
712
    nxt_bool_t freeable)
713
0
{
714
0
    u_char          *p;
715
0
    size_t          aligned_size;
716
0
    uint8_t         type;
717
0
    nxt_mp_block_t  *block;
718
719
0
    nxt_mp_thread_assert(mp);
720
721
    /* Allocation must be less than 4G. */
722
0
    if (nxt_slow_path(size >= 0xFFFFFFFF)) {
723
0
        return NULL;
724
0
    }
725
726
0
    if (nxt_is_power_of_two(size)) {
727
0
        block = nxt_malloc(sizeof(nxt_mp_block_t));
728
0
        if (nxt_slow_path(block == NULL)) {
729
0
            return NULL;
730
0
        }
731
732
0
        p = nxt_memalign(alignment, size);
733
0
        if (nxt_slow_path(p == NULL)) {
734
0
            nxt_free(block);
735
0
            return NULL;
736
0
        }
737
738
0
        type = NXT_MP_DISCRETE_BLOCK;
739
740
0
    } else {
741
0
        aligned_size = nxt_align_size(size, sizeof(uintptr_t));
742
743
0
        p = nxt_memalign(alignment, aligned_size + sizeof(nxt_mp_block_t));
744
0
        if (nxt_slow_path(p == NULL)) {
745
0
            return NULL;
746
0
        }
747
748
0
        block = (nxt_mp_block_t *) (p + aligned_size);
749
0
        type = NXT_MP_EMBEDDED_BLOCK;
750
0
    }
751
752
0
    block->type = type;
753
0
    block->freeable = freeable;
754
0
    block->size = size;
755
0
    block->start = p;
756
757
0
    nxt_rbtree_insert(&mp->blocks, &block->node);
758
759
0
    return p;
760
0
}
761
762
763
static intptr_t
764
nxt_mp_rbtree_compare(nxt_rbtree_node_t *node1, nxt_rbtree_node_t *node2)
765
0
{
766
0
    nxt_mp_block_t  *block1, *block2;
767
768
0
    block1 = (nxt_mp_block_t *) node1;
769
0
    block2 = (nxt_mp_block_t *) node2;
770
771
    /*
772
     * Shifting is necessary to prevent overflow of intptr_t when block1->start
773
     * is much greater than block2->start or vice versa.
774
     *
775
     * It is safe to drop one bit since there cannot be adjacent addresses
776
     * because of alignments and allocation sizes.  Effectively this reduces
777
     * the absolute values to fit into the magnitude of intptr_t.
778
     */
779
0
    return ((uintptr_t) block1->start >> 1) - ((uintptr_t) block2->start >> 1);
780
0
}
781
782
783
void
784
nxt_mp_free(nxt_mp_t *mp, void *p)
785
0
{
786
0
    const char      *err;
787
0
    nxt_mp_block_t  *block;
788
789
0
    nxt_mp_thread_assert(mp);
790
791
0
    nxt_debug_alloc("mp %p free(%p)", mp, p);
792
793
0
    block = nxt_mp_find_block(&mp->blocks, p);
794
795
0
    if (nxt_fast_path(block != NULL)) {
796
797
0
        if (block->type == NXT_MP_CLUSTER_BLOCK) {
798
0
            err = nxt_mp_chunk_free(mp, block, p);
799
800
0
            if (nxt_fast_path(err == NULL)) {
801
0
                return;
802
0
            }
803
804
0
        } else if (nxt_fast_path(p == block->start)) {
805
806
0
            if (block->freeable) {
807
0
                nxt_rbtree_delete(&mp->blocks, &block->node);
808
809
0
                if (block->type == NXT_MP_DISCRETE_BLOCK) {
810
0
                    nxt_free(block);
811
0
                }
812
813
0
                nxt_free(p);
814
815
0
                return;
816
0
            }
817
818
0
            err = "freed pointer points to non-freeable block: %p";
819
820
0
        } else {
821
0
            err = "freed pointer points to middle of block: %p";
822
0
        }
823
824
0
    } else {
825
0
        err = "freed pointer is out of pool: %p";
826
0
    }
827
828
0
    nxt_thread_log_alert(err, p);
829
0
}
830
831
832
static nxt_mp_block_t *
833
nxt_mp_find_block(nxt_rbtree_t *tree, const u_char *p)
834
0
{
835
0
    nxt_mp_block_t     *block;
836
0
    nxt_rbtree_node_t  *node, *sentinel;
837
838
0
    node = nxt_rbtree_root(tree);
839
0
    sentinel = nxt_rbtree_sentinel(tree);
840
841
0
    while (node != sentinel) {
842
843
0
        block = (nxt_mp_block_t *) node;
844
845
0
        if (p < block->start) {
846
0
            node = node->left;
847
848
0
        } else if (p >= block->start + block->size) {
849
0
            node = node->right;
850
851
0
        } else {
852
0
            return block;
853
0
        }
854
0
    }
855
856
0
    return NULL;
857
0
}
858
859
860
static const char *
861
nxt_mp_chunk_free(nxt_mp_t *mp, nxt_mp_block_t *cluster, u_char *p)
862
0
{
863
0
    u_char         *start;
864
0
    uintptr_t      offset;
865
0
    nxt_uint_t     n, size, chunk;
866
0
    nxt_queue_t    *chunk_pages;
867
0
    nxt_mp_page_t  *page;
868
869
0
    n = (p - cluster->start) >> mp->page_size_shift;
870
0
    start = cluster->start + (n << mp->page_size_shift);
871
872
0
    page = &cluster->pages[n];
873
874
0
    if (nxt_slow_path(page->size == 0)) {
875
0
        return "freed pointer points to already free page: %p";
876
0
    }
877
878
0
    if (nxt_slow_path(page->size == 0xFF)) {
879
0
        return "freed pointer points to non-freeable page: %p";
880
0
    }
881
882
0
    size = page->size << mp->chunk_size_shift;
883
884
0
    if (size != mp->page_size) {
885
886
0
        offset = (uintptr_t) (p - start) & (mp->page_size - 1);
887
0
        chunk = offset / size;
888
889
0
        if (nxt_slow_path(offset != chunk * size)) {
890
0
            return "freed pointer points to wrong chunk: %p";
891
0
        }
892
893
0
        if (nxt_slow_path(nxt_mp_chunk_is_free(page->u.map, chunk))) {
894
0
            return "freed pointer points to already free chunk: %p";
895
0
        }
896
897
0
        nxt_mp_chunk_set_free(page->u.map, chunk);
898
899
0
        if (page->u.map != 0xFFFFFFFF) {
900
0
            page->chunks++;
901
902
0
            if (page->chunks == 1) {
903
                /*
904
                 * Add the page to the head of pool chunk pages list
905
                 * of pages with free chunks.
906
                 */
907
0
                n = nxt_mp_chunk_pages_index(mp, size);
908
0
                chunk_pages = &mp->chunk_pages[n];
909
910
0
                nxt_queue_insert_head(chunk_pages, &page->link);
911
0
            }
912
913
0
            nxt_mp_free_junk(p, size);
914
915
0
            return NULL;
916
917
0
        } else {
918
            /*
919
             * All chunks are free, remove the page from pool
920
             * chunk pages list of pages with free chunks.
921
             */
922
0
            nxt_queue_remove(&page->link);
923
0
        }
924
925
0
    } else if (nxt_slow_path(p != start)) {
926
0
        return "invalid pointer to chunk: %p";
927
0
    }
928
929
    /* Add the free page to the pool's free pages tree. */
930
931
0
    page->size = 0;
932
0
    nxt_queue_insert_head(&mp->free_pages, &page->link);
933
934
0
    nxt_mp_free_junk(p, size);
935
936
    /* Test if all pages in the cluster are free. */
937
938
0
    n = mp->cluster_size >> mp->page_size_shift;
939
0
    page = cluster->pages;
940
941
0
    do {
942
0
        if (page->size != 0) {
943
0
            return NULL;
944
0
        }
945
946
0
        page++;
947
0
        n--;
948
0
    } while (n != 0);
949
950
    /* Free cluster. */
951
952
0
    n = mp->cluster_size >> mp->page_size_shift;
953
0
    page = cluster->pages;
954
955
0
    do {
956
0
        nxt_queue_remove(&page->link);
957
0
        page++;
958
0
        n--;
959
0
    } while (n != 0);
960
961
0
    nxt_rbtree_delete(&mp->blocks, &cluster->node);
962
963
0
    p = cluster->start;
964
965
0
    nxt_free(cluster);
966
0
    nxt_free(p);
967
968
0
    return NULL;
969
0
}
970
971
972
void *
973
nxt_mp_nget(nxt_mp_t *mp, size_t size)
974
0
{
975
0
    void  *p;
976
977
0
#if !(NXT_DEBUG_MEMORY)
978
979
0
    if (size <= mp->page_size) {
980
0
        p = nxt_mp_get_small(mp, &mp->nget_pages, size);
981
982
0
    } else {
983
0
        p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0);
984
0
    }
985
986
#else
987
988
    p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0);
989
990
#endif
991
992
0
    nxt_debug_alloc("mp %p nget(%uz): %p", mp, size, p);
993
994
0
    return p;
995
0
}
996
997
998
void *
999
nxt_mp_get(nxt_mp_t *mp, size_t size)
1000
0
{
1001
0
    void  *p;
1002
1003
0
#if !(NXT_DEBUG_MEMORY)
1004
1005
0
    if (size <= mp->page_size) {
1006
0
        size = nxt_max(size, NXT_MAX_ALIGNMENT);
1007
0
        p = nxt_mp_get_small(mp, &mp->get_pages, size);
1008
1009
0
    } else {
1010
0
        p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0);
1011
0
    }
1012
1013
#else
1014
1015
    p = nxt_mp_alloc_large(mp, NXT_MAX_ALIGNMENT, size, 0);
1016
1017
#endif
1018
1019
0
    nxt_debug_alloc("mp %p get(%uz): %p", mp, size, p);
1020
1021
0
    return p;
1022
0
}
1023
1024
1025
void *
1026
nxt_mp_zget(nxt_mp_t *mp, size_t size)
1027
0
{
1028
0
    void  *p;
1029
1030
0
    p = nxt_mp_get(mp, size);
1031
1032
0
    if (nxt_fast_path(p != NULL)) {
1033
0
        memset(p, 0, size);
1034
0
    }
1035
1036
0
    return p;
1037
0
}
1038
1039
1040
nxt_int_t
1041
nxt_mp_cleanup(nxt_mp_t *mp, nxt_work_handler_t handler,
1042
    nxt_task_t *task, void *obj, void *data)
1043
0
{
1044
0
    nxt_work_t  *work;
1045
1046
0
    work = nxt_mp_get(mp, sizeof(nxt_work_t));
1047
1048
0
    if (nxt_slow_path(work == NULL)) {
1049
0
        return NXT_ERROR;
1050
0
    }
1051
1052
0
    work->next = mp->cleanup;
1053
0
    work->handler = handler;
1054
0
    work->task = task;
1055
0
    work->obj = obj;
1056
0
    work->data = data;
1057
1058
0
    mp->cleanup = work;
1059
1060
0
    return NXT_OK;
1061
0
}
1062
1063
1064
void *
1065
nxt_mp_lvlhsh_alloc(void *pool, size_t size)
1066
0
{
1067
0
    return nxt_mp_align(pool, size, size);
1068
0
}
1069
1070
1071
void
1072
nxt_mp_lvlhsh_free(void *pool, void *p)
1073
0
{
1074
0
    nxt_mp_free(pool, p);
1075
0
}