/src/serenity/Userland/Libraries/LibJS/Heap/Heap.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org> |
3 | | * Copyright (c) 2023, Aliaksandr Kalenik <kalenik.aliaksandr@gmail.com> |
4 | | * |
5 | | * SPDX-License-Identifier: BSD-2-Clause |
6 | | */ |
7 | | |
8 | | #include <AK/Badge.h> |
9 | | #include <AK/Debug.h> |
10 | | #include <AK/HashTable.h> |
11 | | #include <AK/JsonArray.h> |
12 | | #include <AK/JsonObject.h> |
13 | | #include <AK/Platform.h> |
14 | | #include <AK/StackInfo.h> |
15 | | #include <AK/TemporaryChange.h> |
16 | | #include <LibCore/ElapsedTimer.h> |
17 | | #include <LibJS/Bytecode/Interpreter.h> |
18 | | #include <LibJS/Heap/CellAllocator.h> |
19 | | #include <LibJS/Heap/Handle.h> |
20 | | #include <LibJS/Heap/Heap.h> |
21 | | #include <LibJS/Heap/HeapBlock.h> |
22 | | #include <LibJS/Runtime/Object.h> |
23 | | #include <LibJS/Runtime/WeakContainer.h> |
24 | | #include <LibJS/SafeFunction.h> |
25 | | #include <setjmp.h> |
26 | | |
27 | | #ifdef AK_OS_SERENITY |
28 | | # include <serenity.h> |
29 | | #endif |
30 | | |
31 | | #ifdef HAS_ADDRESS_SANITIZER |
32 | | # include <sanitizer/asan_interface.h> |
33 | | #endif |
34 | | |
35 | | namespace JS { |
36 | | |
37 | | #ifdef AK_OS_SERENITY |
38 | | static int gc_perf_string_id; |
39 | | #endif |
40 | | |
41 | | // NOTE: We keep a per-thread list of custom ranges. This hinges on the assumption that there is one JS VM per thread. |
42 | | static __thread HashMap<FlatPtr*, size_t>* s_custom_ranges_for_conservative_scan = nullptr; |
43 | | static __thread HashMap<FlatPtr*, SourceLocation*>* s_safe_function_locations = nullptr; |
44 | | |
45 | | Heap::Heap(VM& vm) |
46 | 61 | : HeapBase(vm) |
47 | 61 | { |
48 | | #ifdef AK_OS_SERENITY |
49 | | auto gc_signpost_string = "Garbage collection"sv; |
50 | | gc_perf_string_id = perf_register_string(gc_signpost_string.characters_without_null_termination(), gc_signpost_string.length()); |
51 | | #endif |
52 | | |
53 | 61 | static_assert(HeapBlock::min_possible_cell_size <= 32, "Heap Cell tracking uses too much data!"); |
54 | 61 | m_size_based_cell_allocators.append(make<CellAllocator>(64)); |
55 | 61 | m_size_based_cell_allocators.append(make<CellAllocator>(96)); |
56 | 61 | m_size_based_cell_allocators.append(make<CellAllocator>(128)); |
57 | 61 | m_size_based_cell_allocators.append(make<CellAllocator>(256)); |
58 | 61 | m_size_based_cell_allocators.append(make<CellAllocator>(512)); |
59 | 61 | m_size_based_cell_allocators.append(make<CellAllocator>(1024)); |
60 | 61 | m_size_based_cell_allocators.append(make<CellAllocator>(3072)); |
61 | 61 | } |
62 | | |
63 | | Heap::~Heap() |
64 | 61 | { |
65 | 61 | vm().string_cache().clear(); |
66 | 61 | vm().byte_string_cache().clear(); |
67 | 61 | collect_garbage(CollectionType::CollectEverything); |
68 | 61 | } |
69 | | |
70 | | void Heap::will_allocate(size_t size) |
71 | 79.2k | { |
72 | 79.2k | if (should_collect_on_every_allocation()) { |
73 | 0 | m_allocated_bytes_since_last_gc = 0; |
74 | 0 | collect_garbage(); |
75 | 79.2k | } else if (m_allocated_bytes_since_last_gc + size > m_gc_bytes_threshold) { |
76 | 0 | m_allocated_bytes_since_last_gc = 0; |
77 | 0 | collect_garbage(); |
78 | 0 | } |
79 | | |
80 | 79.2k | m_allocated_bytes_since_last_gc += size; |
81 | 79.2k | } |
82 | | |
83 | | static void add_possible_value(HashMap<FlatPtr, HeapRoot>& possible_pointers, FlatPtr data, HeapRoot origin, FlatPtr min_block_address, FlatPtr max_block_address) |
84 | 0 | { |
85 | 0 | if constexpr (sizeof(FlatPtr*) == sizeof(Value)) { |
86 | | // Because Value stores pointers in non-canonical form we have to check if the top bytes |
87 | | // match any pointer-backed tag, in that case we have to extract the pointer to its |
88 | | // canonical form and add that as a possible pointer. |
89 | 0 | FlatPtr possible_pointer; |
90 | 0 | if ((data & SHIFTED_IS_CELL_PATTERN) == SHIFTED_IS_CELL_PATTERN) |
91 | 0 | possible_pointer = Value::extract_pointer_bits(data); |
92 | 0 | else |
93 | 0 | possible_pointer = data; |
94 | 0 | if (possible_pointer < min_block_address || possible_pointer > max_block_address) |
95 | 0 | return; |
96 | 0 | possible_pointers.set(possible_pointer, move(origin)); |
97 | | } else { |
98 | | static_assert((sizeof(Value) % sizeof(FlatPtr*)) == 0); |
99 | | if (data < min_block_address || data > max_block_address) |
100 | | return; |
101 | | // In the 32-bit case we will look at the top and bottom part of Value separately we just |
102 | | // add both the upper and lower bytes as possible pointers. |
103 | | possible_pointers.set(data, move(origin)); |
104 | | } |
105 | 0 | } |
106 | | |
107 | | void Heap::find_min_and_max_block_addresses(FlatPtr& min_address, FlatPtr& max_address) |
108 | 0 | { |
109 | 0 | min_address = explode_byte(0xff); |
110 | 0 | max_address = 0; |
111 | 0 | for (auto& allocator : m_all_cell_allocators) { |
112 | 0 | min_address = min(min_address, allocator.min_block_address()); |
113 | 0 | max_address = max(max_address, allocator.max_block_address() + HeapBlockBase::block_size); |
114 | 0 | } |
115 | 0 | } |
116 | | |
117 | | template<typename Callback> |
118 | | static void for_each_cell_among_possible_pointers(HashTable<HeapBlock*> const& all_live_heap_blocks, HashMap<FlatPtr, HeapRoot>& possible_pointers, Callback callback) |
119 | 0 | { |
120 | 0 | for (auto possible_pointer : possible_pointers.keys()) { |
121 | 0 | if (!possible_pointer) |
122 | 0 | continue; |
123 | 0 | auto* possible_heap_block = HeapBlock::from_cell(reinterpret_cast<Cell const*>(possible_pointer)); |
124 | 0 | if (!all_live_heap_blocks.contains(possible_heap_block)) |
125 | 0 | continue; |
126 | 0 | if (auto* cell = possible_heap_block->cell_from_possible_pointer(possible_pointer)) { |
127 | 0 | callback(cell, possible_pointer); |
128 | 0 | } |
129 | 0 | } |
130 | 0 | } Unexecuted instantiation: Heap.cpp:void JS::for_each_cell_among_possible_pointers<JS::GraphConstructorVisitor::visit_possible_values(AK::Span<unsigned char const>)::{lambda(JS::Cell*, unsigned long)#1}>(AK::HashTable<JS::HeapBlock*, AK::Traits<JS::HeapBlock>, false> const&, AK::HashMap<unsigned long, JS::HeapRoot, JS::HeapBlock*<unsigned long>, JS::HeapBlock*<AK::HashMap>, false>&, JS::GraphConstructorVisitor::visit_possible_values(AK::Span<unsigned char const>)::{lambda(JS::Cell*, unsigned long)#1}) Unexecuted instantiation: Heap.cpp:void JS::for_each_cell_among_possible_pointers<JS::MarkingVisitor::visit_possible_values(AK::Span<unsigned char const>)::{lambda(JS::Cell*, unsigned long)#1}>(AK::HashTable<JS::HeapBlock*, AK::Traits<JS::HeapBlock>, false> const&, AK::HashMap<unsigned long, JS::HeapRoot, JS::HeapBlock*<unsigned long>, JS::HeapBlock*<AK::HashMap>, false>&, JS::MarkingVisitor::visit_possible_values(AK::Span<unsigned char const>)::{lambda(JS::Cell*, unsigned long)#1}) Unexecuted instantiation: Heap.cpp:void JS::for_each_cell_among_possible_pointers<JS::Heap::gather_conservative_roots(AK::HashMap<JS::Cell*, JS::HeapRoot, AK::Traits<JS::Cell*>, AK::Traits<JS::HeapRoot>, false>&)::$_1>(AK::HashTable<JS::HeapBlock*, AK::Traits<JS::HeapBlock*>, false> const&, AK::HashMap<unsigned long, JS::HeapRoot, AK::Traits<unsigned long>, AK::Traits<JS::HeapRoot>, false>&, JS::Heap::gather_conservative_roots(AK::HashMap<JS::Cell*, JS::HeapRoot, AK::Traits<JS::Cell*>, AK::Traits<JS::HeapRoot>, false>&)::$_1) |
131 | | |
132 | | class GraphConstructorVisitor final : public Cell::Visitor { |
133 | | public: |
134 | | explicit GraphConstructorVisitor(Heap& heap, HashMap<Cell*, HeapRoot> const& roots) |
135 | 0 | : m_heap(heap) |
136 | 0 | { |
137 | 0 | m_heap.find_min_and_max_block_addresses(m_min_block_address, m_max_block_address); |
138 | 0 | m_heap.for_each_block([&](auto& block) { |
139 | 0 | m_all_live_heap_blocks.set(&block); |
140 | 0 | return IterationDecision::Continue; |
141 | 0 | }); |
142 | 0 | m_work_queue.ensure_capacity(roots.size()); |
143 | |
|
144 | 0 | for (auto& [root, root_origin] : roots) { |
145 | 0 | auto& graph_node = m_graph.ensure(bit_cast<FlatPtr>(root)); |
146 | 0 | graph_node.class_name = root->class_name(); |
147 | 0 | graph_node.root_origin = root_origin; |
148 | |
|
149 | 0 | m_work_queue.append(*root); |
150 | 0 | } |
151 | 0 | } |
152 | | |
153 | | virtual void visit_impl(Cell& cell) override |
154 | 0 | { |
155 | 0 | if (m_node_being_visited) |
156 | 0 | m_node_being_visited->edges.set(reinterpret_cast<FlatPtr>(&cell)); |
157 | |
|
158 | 0 | if (m_graph.get(reinterpret_cast<FlatPtr>(&cell)).has_value()) |
159 | 0 | return; |
160 | | |
161 | 0 | m_work_queue.append(cell); |
162 | 0 | } |
163 | | |
164 | | virtual void visit_possible_values(ReadonlyBytes bytes) override |
165 | 0 | { |
166 | 0 | HashMap<FlatPtr, HeapRoot> possible_pointers; |
167 | |
|
168 | 0 | auto* raw_pointer_sized_values = reinterpret_cast<FlatPtr const*>(bytes.data()); |
169 | 0 | for (size_t i = 0; i < (bytes.size() / sizeof(FlatPtr)); ++i) |
170 | 0 | add_possible_value(possible_pointers, raw_pointer_sized_values[i], HeapRoot { .type = HeapRoot::Type::HeapFunctionCapturedPointer }, m_min_block_address, m_max_block_address); |
171 | |
|
172 | 0 | for_each_cell_among_possible_pointers(m_all_live_heap_blocks, possible_pointers, [&](Cell* cell, FlatPtr) { |
173 | 0 | if (m_node_being_visited) |
174 | 0 | m_node_being_visited->edges.set(reinterpret_cast<FlatPtr>(cell)); |
175 | |
|
176 | 0 | if (m_graph.get(reinterpret_cast<FlatPtr>(&cell)).has_value()) |
177 | 0 | return; |
178 | 0 | m_work_queue.append(*cell); |
179 | 0 | }); |
180 | 0 | } |
181 | | |
182 | | void visit_all_cells() |
183 | 0 | { |
184 | 0 | while (!m_work_queue.is_empty()) { |
185 | 0 | auto cell = m_work_queue.take_last(); |
186 | 0 | m_node_being_visited = &m_graph.ensure(bit_cast<FlatPtr>(cell.ptr())); |
187 | 0 | m_node_being_visited->class_name = cell->class_name(); |
188 | 0 | cell->visit_edges(*this); |
189 | 0 | m_node_being_visited = nullptr; |
190 | 0 | } |
191 | 0 | } |
192 | | |
193 | | AK::JsonObject dump() |
194 | 0 | { |
195 | 0 | auto graph = AK::JsonObject(); |
196 | 0 | for (auto& it : m_graph) { |
197 | 0 | AK::JsonArray edges; |
198 | 0 | for (auto const& value : it.value.edges) { |
199 | 0 | edges.must_append(ByteString::formatted("{}", value)); |
200 | 0 | } |
201 | |
|
202 | 0 | auto node = AK::JsonObject(); |
203 | 0 | if (it.value.root_origin.has_value()) { |
204 | 0 | auto type = it.value.root_origin->type; |
205 | 0 | auto location = it.value.root_origin->location; |
206 | 0 | switch (type) { |
207 | 0 | case HeapRoot::Type::Handle: |
208 | 0 | node.set("root"sv, ByteString::formatted("Handle {} {}:{}", location->function_name(), location->filename(), location->line_number())); |
209 | 0 | break; |
210 | 0 | case HeapRoot::Type::MarkedVector: |
211 | 0 | node.set("root"sv, "MarkedVector"); |
212 | 0 | break; |
213 | 0 | case HeapRoot::Type::RegisterPointer: |
214 | 0 | node.set("root"sv, "RegisterPointer"); |
215 | 0 | break; |
216 | 0 | case HeapRoot::Type::StackPointer: |
217 | 0 | node.set("root"sv, "StackPointer"); |
218 | 0 | break; |
219 | 0 | case HeapRoot::Type::VM: |
220 | 0 | node.set("root"sv, "VM"); |
221 | 0 | break; |
222 | 0 | case HeapRoot::Type::SafeFunction: |
223 | 0 | node.set("root"sv, ByteString::formatted("SafeFunction {} {}:{}", location->function_name(), location->filename(), location->line_number())); |
224 | 0 | break; |
225 | 0 | default: |
226 | 0 | VERIFY_NOT_REACHED(); |
227 | 0 | } |
228 | 0 | } |
229 | 0 | node.set("class_name"sv, it.value.class_name); |
230 | 0 | node.set("edges"sv, edges); |
231 | 0 | graph.set(ByteString::number(it.key), node); |
232 | 0 | } |
233 | | |
234 | 0 | return graph; |
235 | 0 | } |
236 | | |
237 | | private: |
238 | | struct GraphNode { |
239 | | Optional<HeapRoot> root_origin; |
240 | | StringView class_name; |
241 | | HashTable<FlatPtr> edges {}; |
242 | | }; |
243 | | |
244 | | GraphNode* m_node_being_visited { nullptr }; |
245 | | Vector<NonnullGCPtr<Cell>> m_work_queue; |
246 | | HashMap<FlatPtr, GraphNode> m_graph; |
247 | | |
248 | | Heap& m_heap; |
249 | | HashTable<HeapBlock*> m_all_live_heap_blocks; |
250 | | FlatPtr m_min_block_address; |
251 | | FlatPtr m_max_block_address; |
252 | | }; |
253 | | |
254 | | AK::JsonObject Heap::dump_graph() |
255 | 0 | { |
256 | 0 | HashMap<Cell*, HeapRoot> roots; |
257 | 0 | gather_roots(roots); |
258 | 0 | GraphConstructorVisitor visitor(*this, roots); |
259 | 0 | visitor.visit_all_cells(); |
260 | 0 | return visitor.dump(); |
261 | 0 | } |
262 | | |
263 | | void Heap::collect_garbage(CollectionType collection_type, bool print_report) |
264 | 61 | { |
265 | 61 | VERIFY(!m_collecting_garbage); |
266 | 61 | TemporaryChange change(m_collecting_garbage, true); |
267 | | |
268 | | #ifdef AK_OS_SERENITY |
269 | | static size_t global_gc_counter = 0; |
270 | | perf_event(PERF_EVENT_SIGNPOST, gc_perf_string_id, global_gc_counter++); |
271 | | #endif |
272 | | |
273 | 61 | Core::ElapsedTimer collection_measurement_timer; |
274 | 61 | if (print_report) |
275 | 0 | collection_measurement_timer.start(); |
276 | | |
277 | 61 | if (collection_type == CollectionType::CollectGarbage) { |
278 | 0 | if (m_gc_deferrals) { |
279 | 0 | m_should_gc_when_deferral_ends = true; |
280 | 0 | return; |
281 | 0 | } |
282 | 0 | HashMap<Cell*, HeapRoot> roots; |
283 | 0 | gather_roots(roots); |
284 | 0 | mark_live_cells(roots); |
285 | 0 | } |
286 | 61 | finalize_unmarked_cells(); |
287 | 61 | sweep_dead_cells(print_report, collection_measurement_timer); |
288 | 61 | } |
289 | | |
290 | | void Heap::gather_roots(HashMap<Cell*, HeapRoot>& roots) |
291 | 0 | { |
292 | 0 | vm().gather_roots(roots); |
293 | 0 | gather_conservative_roots(roots); |
294 | |
|
295 | 0 | for (auto& handle : m_handles) |
296 | 0 | roots.set(handle.cell(), HeapRoot { .type = HeapRoot::Type::Handle, .location = &handle.source_location() }); |
297 | |
|
298 | 0 | for (auto& vector : m_marked_vectors) |
299 | 0 | vector.gather_roots(roots); |
300 | |
|
301 | | if constexpr (HEAP_DEBUG) { |
302 | | dbgln("gather_roots:"); |
303 | | for (auto* root : roots.keys()) |
304 | | dbgln(" + {}", root); |
305 | | } |
306 | 0 | } |
307 | | |
308 | | #ifdef HAS_ADDRESS_SANITIZER |
309 | | NO_SANITIZE_ADDRESS void Heap::gather_asan_fake_stack_roots(HashMap<FlatPtr, HeapRoot>& possible_pointers, FlatPtr addr, FlatPtr min_block_address, FlatPtr max_block_address) |
310 | | { |
311 | | void* begin = nullptr; |
312 | | void* end = nullptr; |
313 | | void* real_stack = __asan_addr_is_in_fake_stack(__asan_get_current_fake_stack(), reinterpret_cast<void*>(addr), &begin, &end); |
314 | | |
315 | | if (real_stack != nullptr) { |
316 | | for (auto* real_stack_addr = reinterpret_cast<void const* const*>(begin); real_stack_addr < end; ++real_stack_addr) { |
317 | | void const* real_address = *real_stack_addr; |
318 | | if (real_address == nullptr) |
319 | | continue; |
320 | | add_possible_value(possible_pointers, reinterpret_cast<FlatPtr>(real_address), HeapRoot { .type = HeapRoot::Type::StackPointer }, min_block_address, max_block_address); |
321 | | } |
322 | | } |
323 | | } |
324 | | #else |
325 | | void Heap::gather_asan_fake_stack_roots(HashMap<FlatPtr, HeapRoot>&, FlatPtr, FlatPtr, FlatPtr) |
326 | 0 | { |
327 | 0 | } |
328 | | #endif |
329 | | |
330 | | NO_SANITIZE_ADDRESS void Heap::gather_conservative_roots(HashMap<Cell*, HeapRoot>& roots) |
331 | 0 | { |
332 | 0 | FlatPtr dummy; |
333 | |
|
334 | 0 | dbgln_if(HEAP_DEBUG, "gather_conservative_roots:"); |
335 | |
|
336 | 0 | jmp_buf buf {}; |
337 | 0 | setjmp(buf); |
338 | |
|
339 | 0 | HashMap<FlatPtr, HeapRoot> possible_pointers; |
340 | |
|
341 | 0 | auto* raw_jmp_buf = reinterpret_cast<FlatPtr const*>(buf); |
342 | |
|
343 | 0 | FlatPtr min_block_address, max_block_address; |
344 | 0 | find_min_and_max_block_addresses(min_block_address, max_block_address); |
345 | |
|
346 | 0 | for (size_t i = 0; i < ((size_t)sizeof(buf)) / sizeof(FlatPtr); ++i) |
347 | 0 | add_possible_value(possible_pointers, raw_jmp_buf[i], HeapRoot { .type = HeapRoot::Type::RegisterPointer }, min_block_address, max_block_address); |
348 | |
|
349 | 0 | auto stack_reference = bit_cast<FlatPtr>(&dummy); |
350 | 0 | auto& stack_info = m_vm.stack_info(); |
351 | |
|
352 | 0 | for (FlatPtr stack_address = stack_reference; stack_address < stack_info.top(); stack_address += sizeof(FlatPtr)) { |
353 | 0 | auto data = *reinterpret_cast<FlatPtr*>(stack_address); |
354 | 0 | add_possible_value(possible_pointers, data, HeapRoot { .type = HeapRoot::Type::StackPointer }, min_block_address, max_block_address); |
355 | 0 | gather_asan_fake_stack_roots(possible_pointers, data, min_block_address, max_block_address); |
356 | 0 | } |
357 | | |
358 | | // NOTE: If we have any custom ranges registered, scan those as well. |
359 | | // This is where JS::SafeFunction closures get marked. |
360 | 0 | if (s_custom_ranges_for_conservative_scan) { |
361 | 0 | for (auto& custom_range : *s_custom_ranges_for_conservative_scan) { |
362 | 0 | for (size_t i = 0; i < (custom_range.value / sizeof(FlatPtr)); ++i) { |
363 | 0 | auto safe_function_location = s_safe_function_locations->get(custom_range.key); |
364 | 0 | add_possible_value(possible_pointers, custom_range.key[i], HeapRoot { .type = HeapRoot::Type::SafeFunction, .location = *safe_function_location }, min_block_address, max_block_address); |
365 | 0 | } |
366 | 0 | } |
367 | 0 | } |
368 | |
|
369 | 0 | for (auto& vector : m_conservative_vectors) { |
370 | 0 | for (auto possible_value : vector.possible_values()) { |
371 | 0 | add_possible_value(possible_pointers, possible_value, HeapRoot { .type = HeapRoot::Type::ConservativeVector }, min_block_address, max_block_address); |
372 | 0 | } |
373 | 0 | } |
374 | |
|
375 | 0 | HashTable<HeapBlock*> all_live_heap_blocks; |
376 | 0 | for_each_block([&](auto& block) { |
377 | 0 | all_live_heap_blocks.set(&block); |
378 | 0 | return IterationDecision::Continue; |
379 | 0 | }); |
380 | |
|
381 | 0 | for_each_cell_among_possible_pointers(all_live_heap_blocks, possible_pointers, [&](Cell* cell, FlatPtr possible_pointer) { |
382 | 0 | if (cell->state() == Cell::State::Live) { |
383 | 0 | dbgln_if(HEAP_DEBUG, " ?-> {}", (void const*)cell); |
384 | 0 | roots.set(cell, *possible_pointers.get(possible_pointer)); |
385 | 0 | } else { |
386 | 0 | dbgln_if(HEAP_DEBUG, " #-> {}", (void const*)cell); |
387 | 0 | } |
388 | 0 | }); |
389 | 0 | } |
390 | | |
391 | | class MarkingVisitor final : public Cell::Visitor { |
392 | | public: |
393 | | explicit MarkingVisitor(Heap& heap, HashMap<Cell*, HeapRoot> const& roots) |
394 | 0 | : m_heap(heap) |
395 | 0 | { |
396 | 0 | m_heap.find_min_and_max_block_addresses(m_min_block_address, m_max_block_address); |
397 | 0 | m_heap.for_each_block([&](auto& block) { |
398 | 0 | m_all_live_heap_blocks.set(&block); |
399 | 0 | return IterationDecision::Continue; |
400 | 0 | }); |
401 | |
|
402 | 0 | for (auto* root : roots.keys()) { |
403 | 0 | visit(root); |
404 | 0 | } |
405 | 0 | } |
406 | | |
407 | | virtual void visit_impl(Cell& cell) override |
408 | 0 | { |
409 | 0 | if (cell.is_marked()) |
410 | 0 | return; |
411 | 0 | dbgln_if(HEAP_DEBUG, " ! {}", &cell); |
412 | |
|
413 | 0 | cell.set_marked(true); |
414 | 0 | m_work_queue.append(cell); |
415 | 0 | } |
416 | | |
417 | | virtual void visit_possible_values(ReadonlyBytes bytes) override |
418 | 0 | { |
419 | 0 | HashMap<FlatPtr, HeapRoot> possible_pointers; |
420 | |
|
421 | 0 | auto* raw_pointer_sized_values = reinterpret_cast<FlatPtr const*>(bytes.data()); |
422 | 0 | for (size_t i = 0; i < (bytes.size() / sizeof(FlatPtr)); ++i) |
423 | 0 | add_possible_value(possible_pointers, raw_pointer_sized_values[i], HeapRoot { .type = HeapRoot::Type::HeapFunctionCapturedPointer }, m_min_block_address, m_max_block_address); |
424 | |
|
425 | 0 | for_each_cell_among_possible_pointers(m_all_live_heap_blocks, possible_pointers, [&](Cell* cell, FlatPtr) { |
426 | 0 | if (cell->is_marked()) |
427 | 0 | return; |
428 | 0 | if (cell->state() != Cell::State::Live) |
429 | 0 | return; |
430 | 0 | cell->set_marked(true); |
431 | 0 | m_work_queue.append(*cell); |
432 | 0 | }); |
433 | 0 | } |
434 | | |
435 | | void mark_all_live_cells() |
436 | 0 | { |
437 | 0 | while (!m_work_queue.is_empty()) { |
438 | 0 | m_work_queue.take_last()->visit_edges(*this); |
439 | 0 | } |
440 | 0 | } |
441 | | |
442 | | private: |
443 | | Heap& m_heap; |
444 | | Vector<NonnullGCPtr<Cell>> m_work_queue; |
445 | | HashTable<HeapBlock*> m_all_live_heap_blocks; |
446 | | FlatPtr m_min_block_address; |
447 | | FlatPtr m_max_block_address; |
448 | | }; |
449 | | |
450 | | void Heap::mark_live_cells(HashMap<Cell*, HeapRoot> const& roots) |
451 | 0 | { |
452 | 0 | dbgln_if(HEAP_DEBUG, "mark_live_cells:"); |
453 | |
|
454 | 0 | MarkingVisitor visitor(*this, roots); |
455 | |
|
456 | 0 | visitor.mark_all_live_cells(); |
457 | |
|
458 | 0 | for (auto& inverse_root : m_uprooted_cells) |
459 | 0 | inverse_root->set_marked(false); |
460 | |
|
461 | 0 | m_uprooted_cells.clear(); |
462 | 0 | } |
463 | | |
464 | | bool Heap::cell_must_survive_garbage_collection(Cell const& cell) |
465 | 158k | { |
466 | 158k | if (!cell.overrides_must_survive_garbage_collection({})) |
467 | 158k | return false; |
468 | 0 | return cell.must_survive_garbage_collection(); |
469 | 158k | } |
470 | | |
471 | | void Heap::finalize_unmarked_cells() |
472 | 61 | { |
473 | 4.89k | for_each_block([&](auto& block) { |
474 | 79.2k | block.template for_each_cell_in_state<Cell::State::Live>([](Cell* cell) { |
475 | 79.2k | if (!cell->is_marked() && !cell_must_survive_garbage_collection(*cell)) |
476 | 79.2k | cell->finalize(); |
477 | 79.2k | }); |
478 | 4.89k | return IterationDecision::Continue; |
479 | 4.89k | }); |
480 | 61 | } |
481 | | |
482 | | void Heap::sweep_dead_cells(bool print_report, Core::ElapsedTimer const& measurement_timer) |
483 | 61 | { |
484 | 61 | dbgln_if(HEAP_DEBUG, "sweep_dead_cells:"); |
485 | 61 | Vector<HeapBlock*, 32> empty_blocks; |
486 | 61 | Vector<HeapBlock*, 32> full_blocks_that_became_usable; |
487 | | |
488 | 61 | size_t collected_cells = 0; |
489 | 61 | size_t live_cells = 0; |
490 | 61 | size_t collected_cell_bytes = 0; |
491 | 61 | size_t live_cell_bytes = 0; |
492 | | |
493 | 4.89k | for_each_block([&](auto& block) { |
494 | 4.89k | bool block_has_live_cells = false; |
495 | 4.89k | bool block_was_full = block.is_full(); |
496 | 79.2k | block.template for_each_cell_in_state<Cell::State::Live>([&](Cell* cell) { |
497 | 79.2k | if (!cell->is_marked() && !cell_must_survive_garbage_collection(*cell)) { |
498 | 79.2k | dbgln_if(HEAP_DEBUG, " ~ {}", cell); |
499 | 79.2k | block.deallocate(cell); |
500 | 79.2k | ++collected_cells; |
501 | 79.2k | collected_cell_bytes += block.cell_size(); |
502 | 79.2k | } else { |
503 | 0 | cell->set_marked(false); |
504 | 0 | block_has_live_cells = true; |
505 | 0 | ++live_cells; |
506 | 0 | live_cell_bytes += block.cell_size(); |
507 | 0 | } |
508 | 79.2k | }); |
509 | 4.89k | if (!block_has_live_cells) |
510 | 4.89k | empty_blocks.append(&block); |
511 | 0 | else if (block_was_full != block.is_full()) |
512 | 0 | full_blocks_that_became_usable.append(&block); |
513 | 4.89k | return IterationDecision::Continue; |
514 | 4.89k | }); |
515 | | |
516 | 61 | for (auto& weak_container : m_weak_containers) |
517 | 0 | weak_container.remove_dead_cells({}); |
518 | | |
519 | 4.89k | for (auto* block : empty_blocks) { |
520 | 4.89k | dbgln_if(HEAP_DEBUG, " - HeapBlock empty @ {}: cell_size={}", block, block->cell_size()); |
521 | 4.89k | block->cell_allocator().block_did_become_empty({}, *block); |
522 | 4.89k | } |
523 | | |
524 | 61 | for (auto* block : full_blocks_that_became_usable) { |
525 | 0 | dbgln_if(HEAP_DEBUG, " - HeapBlock usable again @ {}: cell_size={}", block, block->cell_size()); |
526 | 0 | block->cell_allocator().block_did_become_usable({}, *block); |
527 | 0 | } |
528 | | |
529 | | if constexpr (HEAP_DEBUG) { |
530 | | for_each_block([&](auto& block) { |
531 | | dbgln(" > Live HeapBlock @ {}: cell_size={}", &block, block.cell_size()); |
532 | | return IterationDecision::Continue; |
533 | | }); |
534 | | } |
535 | | |
536 | 61 | m_gc_bytes_threshold = live_cell_bytes > GC_MIN_BYTES_THRESHOLD ? live_cell_bytes : GC_MIN_BYTES_THRESHOLD; |
537 | | |
538 | 61 | if (print_report) { |
539 | 0 | Duration const time_spent = measurement_timer.elapsed_time(); |
540 | 0 | size_t live_block_count = 0; |
541 | 0 | for_each_block([&](auto&) { |
542 | 0 | ++live_block_count; |
543 | 0 | return IterationDecision::Continue; |
544 | 0 | }); |
545 | |
|
546 | 0 | dbgln("Garbage collection report"); |
547 | 0 | dbgln("============================================="); |
548 | 0 | dbgln(" Time spent: {} ms", time_spent.to_milliseconds()); |
549 | 0 | dbgln(" Live cells: {} ({} bytes)", live_cells, live_cell_bytes); |
550 | 0 | dbgln("Collected cells: {} ({} bytes)", collected_cells, collected_cell_bytes); |
551 | 0 | dbgln(" Live blocks: {} ({} bytes)", live_block_count, live_block_count * HeapBlock::block_size); |
552 | 0 | dbgln(" Freed blocks: {} ({} bytes)", empty_blocks.size(), empty_blocks.size() * HeapBlock::block_size); |
553 | 0 | dbgln("============================================="); |
554 | 0 | } |
555 | 61 | } |
556 | | |
557 | | void Heap::defer_gc() |
558 | 79.2k | { |
559 | 79.2k | ++m_gc_deferrals; |
560 | 79.2k | } |
561 | | |
562 | | void Heap::undefer_gc() |
563 | 79.2k | { |
564 | 79.2k | VERIFY(m_gc_deferrals > 0); |
565 | 79.2k | --m_gc_deferrals; |
566 | | |
567 | 79.2k | if (!m_gc_deferrals) { |
568 | 9.45k | if (m_should_gc_when_deferral_ends) |
569 | 0 | collect_garbage(); |
570 | 9.45k | m_should_gc_when_deferral_ends = false; |
571 | 9.45k | } |
572 | 79.2k | } |
573 | | |
574 | | void Heap::uproot_cell(Cell* cell) |
575 | 0 | { |
576 | 0 | m_uprooted_cells.append(cell); |
577 | 0 | } |
578 | | |
579 | | void register_safe_function_closure(void* base, size_t size, SourceLocation* location) |
580 | 0 | { |
581 | 0 | if (!s_custom_ranges_for_conservative_scan) { |
582 | | // FIXME: This per-thread HashMap is currently leaked on thread exit. |
583 | 0 | s_custom_ranges_for_conservative_scan = new HashMap<FlatPtr*, size_t>; |
584 | 0 | } |
585 | 0 | if (!s_safe_function_locations) { |
586 | 0 | s_safe_function_locations = new HashMap<FlatPtr*, SourceLocation*>; |
587 | 0 | } |
588 | 0 | auto result = s_custom_ranges_for_conservative_scan->set(reinterpret_cast<FlatPtr*>(base), size); |
589 | 0 | VERIFY(result == AK::HashSetResult::InsertedNewEntry); |
590 | 0 | result = s_safe_function_locations->set(reinterpret_cast<FlatPtr*>(base), location); |
591 | 0 | VERIFY(result == AK::HashSetResult::InsertedNewEntry); |
592 | 0 | } |
593 | | |
594 | | void unregister_safe_function_closure(void* base, size_t, SourceLocation*) |
595 | 0 | { |
596 | 0 | VERIFY(s_custom_ranges_for_conservative_scan); |
597 | 0 | bool did_remove_range = s_custom_ranges_for_conservative_scan->remove(reinterpret_cast<FlatPtr*>(base)); |
598 | 0 | VERIFY(did_remove_range); |
599 | 0 | bool did_remove_location = s_safe_function_locations->remove(reinterpret_cast<FlatPtr*>(base)); |
600 | 0 | VERIFY(did_remove_location); |
601 | 0 | } |
602 | | |
603 | | } |