Coverage Report

Created: 2025-09-05 06:52

/src/serenity/AK/BumpAllocator.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2021, Ali Mohammad Pur <mpfard@serenityos.org>
3
 *
4
 * SPDX-License-Identifier: BSD-2-Clause
5
 */
6
7
#pragma once
8
9
#include <AK/Atomic.h>
10
#include <AK/StdLibExtras.h>
11
#include <AK/Types.h>
12
#include <AK/kmalloc.h>
13
#include <sys/mman.h>
14
15
namespace AK {
16
17
template<bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
18
class BumpAllocator {
19
public:
20
    BumpAllocator()
21
5.33M
    {
22
        if constexpr (use_mmap)
23
5.33M
            m_chunk_size = chunk_size;
24
        else
25
            m_chunk_size = kmalloc_good_size(chunk_size);
26
5.33M
    }
27
28
    ~BumpAllocator()
29
5.33M
    {
30
5.33M
        deallocate_all();
31
5.33M
    }
32
33
    void* allocate(size_t size, size_t align)
34
56.5M
    {
35
56.5M
        VERIFY(size < m_chunk_size - sizeof(ChunkHeader));
36
56.5M
        if (!m_current_chunk) {
37
5.33M
            if (!allocate_a_chunk())
38
0
                return nullptr;
39
5.33M
        }
40
41
56.5M
    allocate_again:;
42
56.5M
        VERIFY(m_current_chunk != 0);
43
44
56.5M
        auto aligned_ptr = align_up_to(m_byte_offset_into_current_chunk + m_current_chunk, align);
45
56.5M
        auto next_offset = aligned_ptr + size - m_current_chunk;
46
56.5M
        if (next_offset > m_chunk_size) {
47
13.5k
            if (!allocate_a_chunk())
48
0
                return nullptr;
49
13.5k
            goto allocate_again;
50
13.5k
        }
51
56.5M
        m_byte_offset_into_current_chunk = next_offset;
52
56.5M
        return (void*)aligned_ptr;
53
56.5M
    }
54
55
    void deallocate_all()
56
5.33M
    {
57
5.33M
        if (!m_head_chunk)
58
0
            return;
59
        // Note that 'cache_filled' is just an educated guess, and we don't rely on it.
60
        // If we determine 'cache_filled=true' and the cache becomes empty in the meantime,
61
        // then we haven't lost much; it was a close call anyway.
62
        // If we determine 'cache_filled=false' and the cache becomes full in the meantime,
63
        // then we'll end up with a different chunk to munmap(), no big difference.
64
5.33M
        bool cache_filled = s_unused_allocation_cache.load(MemoryOrder::memory_order_relaxed);
65
5.34M
        for_each_chunk([&](auto chunk) {
66
5.34M
            if (!cache_filled) {
67
5.33M
                cache_filled = true;
68
5.33M
                (reinterpret_cast<ChunkHeader*>(chunk))->next_chunk = 0;
69
5.33M
                chunk = s_unused_allocation_cache.exchange(chunk);
70
5.33M
                if (!chunk)
71
5.33M
                    return;
72
                // The cache got filled in the meantime. Oh well, we have to call munmap() anyway.
73
5.33M
            }
74
75
13.5k
            if constexpr (use_mmap) {
76
13.5k
                munmap((void*)chunk, m_chunk_size);
77
            } else {
78
                kfree_sized((void*)chunk, m_chunk_size);
79
            }
80
13.5k
        });
81
5.33M
    }
82
83
protected:
84
    template<typename TFn>
85
    void for_each_chunk(TFn&& fn)
86
10.6M
    {
87
10.6M
        auto head_chunk = m_head_chunk;
88
21.3M
        while (head_chunk) {
89
10.6M
            auto& chunk_header = *reinterpret_cast<ChunkHeader const*>(head_chunk);
90
10.6M
            VERIFY(chunk_header.magic == chunk_magic);
91
10.6M
            if (head_chunk == m_current_chunk)
92
10.6M
                VERIFY(chunk_header.next_chunk == 0);
93
10.6M
            auto next_chunk = chunk_header.next_chunk;
94
10.6M
            fn(head_chunk);
95
10.6M
            head_chunk = next_chunk;
96
10.6M
        }
97
10.6M
    }
void AK::BumpAllocator<true, 2097152ul>::for_each_chunk<AK::UniformBumpAllocator<regex::BumpAllocatedLinkedList<regex::MatchState>::Node, true, 2097152ul>::destroy_all()::{lambda(auto:1)#1}>(AK::UniformBumpAllocator<regex::BumpAllocatedLinkedList<regex::MatchState>::Node, true, 2097152ul>::destroy_all()::{lambda(auto:1)#1}&&)
Line
Count
Source
86
5.33M
    {
87
5.33M
        auto head_chunk = m_head_chunk;
88
10.6M
        while (head_chunk) {
89
5.34M
            auto& chunk_header = *reinterpret_cast<ChunkHeader const*>(head_chunk);
90
5.34M
            VERIFY(chunk_header.magic == chunk_magic);
91
5.34M
            if (head_chunk == m_current_chunk)
92
5.33M
                VERIFY(chunk_header.next_chunk == 0);
93
5.34M
            auto next_chunk = chunk_header.next_chunk;
94
5.34M
            fn(head_chunk);
95
5.34M
            head_chunk = next_chunk;
96
5.34M
        }
97
5.33M
    }
void AK::BumpAllocator<true, 2097152ul>::for_each_chunk<AK::BumpAllocator<true, 2097152ul>::deallocate_all()::{lambda(auto:1)#1}>(AK::BumpAllocator<true, 2097152ul>::deallocate_all()::{lambda(auto:1)#1}&&)
Line
Count
Source
86
5.33M
    {
87
5.33M
        auto head_chunk = m_head_chunk;
88
10.6M
        while (head_chunk) {
89
5.34M
            auto& chunk_header = *reinterpret_cast<ChunkHeader const*>(head_chunk);
90
5.34M
            VERIFY(chunk_header.magic == chunk_magic);
91
5.34M
            if (head_chunk == m_current_chunk)
92
5.33M
                VERIFY(chunk_header.next_chunk == 0);
93
5.34M
            auto next_chunk = chunk_header.next_chunk;
94
5.34M
            fn(head_chunk);
95
5.34M
            head_chunk = next_chunk;
96
5.34M
        }
97
5.33M
    }
98
99
    bool allocate_a_chunk()
100
5.34M
    {
101
        // dbgln("Allocated {} entries in previous chunk and have {} unusable bytes", m_allocations_in_previous_chunk, m_chunk_size - m_byte_offset_into_current_chunk);
102
        // m_allocations_in_previous_chunk = 0;
103
5.34M
        void* new_chunk = reinterpret_cast<void*>(s_unused_allocation_cache.exchange(0));
104
5.34M
        if (!new_chunk) {
105
13.5k
            if constexpr (use_mmap) {
106
#ifdef AK_OS_SERENITY
107
                new_chunk = serenity_mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, m_chunk_size, "BumpAllocator Chunk");
108
#else
109
13.5k
                new_chunk = mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
110
13.5k
#endif
111
13.5k
                if (new_chunk == MAP_FAILED)
112
0
                    return false;
113
            } else {
114
                new_chunk = kmalloc(m_chunk_size);
115
                if (!new_chunk)
116
                    return false;
117
            }
118
13.5k
        }
119
120
13.5k
        auto& new_header = *reinterpret_cast<ChunkHeader*>(new_chunk);
121
5.34M
        new_header.magic = chunk_magic;
122
5.34M
        new_header.next_chunk = 0;
123
5.34M
        m_byte_offset_into_current_chunk = sizeof(ChunkHeader);
124
125
5.34M
        if (!m_head_chunk) {
126
5.33M
            VERIFY(!m_current_chunk);
127
5.33M
            m_head_chunk = reinterpret_cast<FlatPtr>(new_chunk);
128
5.33M
            m_current_chunk = reinterpret_cast<FlatPtr>(new_chunk);
129
5.33M
            return true;
130
5.33M
        }
131
132
13.5k
        VERIFY(m_current_chunk);
133
13.5k
        auto& old_header = *reinterpret_cast<ChunkHeader*>(m_current_chunk);
134
13.5k
        VERIFY(old_header.magic == chunk_magic);
135
13.5k
        VERIFY(old_header.next_chunk == 0);
136
13.5k
        old_header.next_chunk = reinterpret_cast<FlatPtr>(new_chunk);
137
13.5k
        m_current_chunk = reinterpret_cast<FlatPtr>(new_chunk);
138
13.5k
        return true;
139
13.5k
    }
140
141
    constexpr static FlatPtr chunk_magic = explode_byte(0xdf);
142
    struct ChunkHeader {
143
        FlatPtr magic;
144
        FlatPtr next_chunk;
145
    };
146
    FlatPtr m_head_chunk { 0 };
147
    FlatPtr m_current_chunk { 0 };
148
    size_t m_byte_offset_into_current_chunk { 0 };
149
    size_t m_chunk_size { 0 };
150
    static Atomic<FlatPtr> s_unused_allocation_cache;
151
};
152
153
template<typename T, bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
154
class UniformBumpAllocator : protected BumpAllocator<use_mmap, chunk_size> {
155
    using Allocator = BumpAllocator<use_mmap, chunk_size>;
156
157
public:
158
5.33M
    UniformBumpAllocator() = default;
159
    ~UniformBumpAllocator()
160
5.33M
    {
161
5.33M
        destroy_all();
162
5.33M
    }
163
164
    template<typename... Args>
165
    T* allocate(Args&&... args)
166
56.5M
    {
167
56.5M
        auto ptr = (T*)Allocator::allocate(sizeof(T), alignof(T));
168
56.5M
        if (!ptr)
169
0
            return nullptr;
170
56.5M
        return new (ptr) T { forward<Args>(args)... };
171
56.5M
    }
172
173
    void deallocate_all()
174
    {
175
        destroy_all();
176
        Allocator::deallocate_all();
177
    }
178
179
    void destroy_all()
180
5.33M
    {
181
5.34M
        this->for_each_chunk([&](auto chunk) {
182
5.34M
            auto base_ptr = align_up_to(chunk + sizeof(typename Allocator::ChunkHeader), alignof(T));
183
            // Compute the offset of the first byte *after* this chunk:
184
5.34M
            FlatPtr end_offset = base_ptr + this->m_chunk_size - chunk - sizeof(typename Allocator::ChunkHeader);
185
5.34M
            if (chunk == this->m_current_chunk)
186
5.33M
                end_offset = this->m_byte_offset_into_current_chunk;
187
            // Compute the offset of the first byte *after* the last valid object, in case the end of the chunk does not align with the end of an object:
188
5.34M
            end_offset = (end_offset / sizeof(T)) * sizeof(T);
189
61.9M
            for (; base_ptr - chunk < end_offset; base_ptr += sizeof(T))
190
56.5M
                reinterpret_cast<T*>(base_ptr)->~T();
191
5.34M
        });
192
5.33M
    }
193
};
194
195
template<bool use_mmap, size_t size>
196
inline Atomic<FlatPtr> BumpAllocator<use_mmap, size>::s_unused_allocation_cache { 0 };
197
198
}
199
200
#if USING_AK_GLOBALLY
201
using AK::BumpAllocator;
202
using AK::UniformBumpAllocator;
203
#endif