Line data Source code
1 : // Copyright 2015 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/heap/factory.h"
6 : #include "src/heap/heap-inl.h"
7 : #include "src/heap/mark-compact.h"
8 : #include "src/isolate.h"
9 : #include "src/objects-inl.h"
10 : #include "test/cctest/cctest.h"
11 : #include "test/cctest/heap/heap-tester.h"
12 : #include "test/cctest/heap/heap-utils.h"
13 :
14 : namespace v8 {
15 : namespace internal {
16 : namespace heap {
17 :
18 : namespace {
19 :
20 20 : void CheckInvariantsOfAbortedPage(Page* page) {
21 : // Check invariants:
22 : // 1) Markbits are cleared
23 : // 2) The page is not marked as evacuation candidate anymore
24 : // 3) The page is not marked as aborted compaction anymore.
25 20 : CHECK(page->heap()
26 : ->mark_compact_collector()
27 : ->non_atomic_marking_state()
28 : ->bitmap(page)
29 : ->IsClean());
30 20 : CHECK(!page->IsEvacuationCandidate());
31 20 : CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
32 20 : }
33 :
34 20 : void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
35 : Page* page) {
36 195 : for (Handle<FixedArray> fixed_array : handles) {
37 175 : CHECK(Page::FromHeapObject(*fixed_array) == page);
38 : }
39 20 : }
40 :
41 : } // namespace
42 :
43 26661 : HEAP_TEST(CompactionFullAbortedPage) {
44 5 : if (FLAG_never_compact) return;
45 : // Test the scenario where we reach OOM during compaction and the whole page
46 : // is aborted.
47 :
48 : // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
49 : // we can reach the state of a half aborted page.
50 : ManualGCScope manual_gc_scope;
51 5 : FLAG_manual_evacuation_candidates_selection = true;
52 5 : CcTest::InitializeVM();
53 : Isolate* isolate = CcTest::i_isolate();
54 : Heap* heap = isolate->heap();
55 : {
56 : HandleScope scope1(isolate);
57 :
58 5 : heap::SealCurrentObjects(heap);
59 :
60 : {
61 : HandleScope scope2(isolate);
62 5 : CHECK(heap->old_space()->Expand());
63 : auto compaction_page_handles = heap::CreatePadding(
64 : heap,
65 5 : static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
66 5 : AllocationType::kOld);
67 : Page* to_be_aborted_page =
68 : Page::FromHeapObject(*compaction_page_handles.front());
69 : to_be_aborted_page->SetFlag(
70 : MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
71 5 : CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
72 :
73 : heap->set_force_oom(true);
74 5 : CcTest::CollectAllGarbage();
75 5 : heap->mark_compact_collector()->EnsureSweepingCompleted();
76 :
77 : // Check that all handles still point to the same page, i.e., compaction
78 : // has been aborted on the page.
79 15 : for (Handle<FixedArray> object : compaction_page_handles) {
80 10 : CHECK_EQ(to_be_aborted_page, Page::FromHeapObject(*object));
81 : }
82 5 : CheckInvariantsOfAbortedPage(to_be_aborted_page);
83 : }
84 : }
85 : }
86 :
87 :
88 26661 : HEAP_TEST(CompactionPartiallyAbortedPage) {
89 5 : if (FLAG_never_compact) return;
90 : // Test the scenario where we reach OOM during compaction and parts of the
91 : // page have already been migrated to a new one.
92 :
93 : // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
94 : // we can reach the state of a half aborted page.
95 : ManualGCScope manual_gc_scope;
96 5 : FLAG_manual_evacuation_candidates_selection = true;
97 :
98 : const int objects_per_page = 10;
99 : const int object_size =
100 5 : Min(kMaxRegularHeapObjectSize,
101 5 : static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
102 : objects_per_page);
103 :
104 5 : CcTest::InitializeVM();
105 : Isolate* isolate = CcTest::i_isolate();
106 : Heap* heap = isolate->heap();
107 : {
108 : HandleScope scope1(isolate);
109 :
110 5 : heap::SealCurrentObjects(heap);
111 :
112 : {
113 : HandleScope scope2(isolate);
114 : // Fill another page with objects of size {object_size} (last one is
115 : // properly adjusted).
116 5 : CHECK(heap->old_space()->Expand());
117 : auto compaction_page_handles = heap::CreatePadding(
118 : heap,
119 5 : static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
120 5 : AllocationType::kOld, object_size);
121 : Page* to_be_aborted_page =
122 : Page::FromHeapObject(*compaction_page_handles.front());
123 : to_be_aborted_page->SetFlag(
124 : MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
125 5 : CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
126 :
127 : {
128 : // Add another page that is filled with {num_objects} objects of size
129 : // {object_size}.
130 : HandleScope scope3(isolate);
131 5 : CHECK(heap->old_space()->Expand());
132 : const int num_objects = 3;
133 : std::vector<Handle<FixedArray>> page_to_fill_handles =
134 : heap::CreatePadding(heap, object_size * num_objects,
135 5 : AllocationType::kOld, object_size);
136 : Page* page_to_fill =
137 : Page::FromAddress(page_to_fill_handles.front()->address());
138 :
139 : heap->set_force_oom(true);
140 5 : CcTest::CollectAllGarbage();
141 5 : heap->mark_compact_collector()->EnsureSweepingCompleted();
142 :
143 : bool migration_aborted = false;
144 60 : for (Handle<FixedArray> object : compaction_page_handles) {
145 : // Once compaction has been aborted, all following objects still have
146 : // to be on the initial page.
147 70 : CHECK(!migration_aborted ||
148 : (Page::FromHeapObject(*object) == to_be_aborted_page));
149 55 : if (Page::FromHeapObject(*object) == to_be_aborted_page) {
150 : // This object has not been migrated.
151 : migration_aborted = true;
152 : } else {
153 35 : CHECK_EQ(Page::FromHeapObject(*object), page_to_fill);
154 : }
155 : }
156 : // Check that we actually created a scenario with a partially aborted
157 : // page.
158 5 : CHECK(migration_aborted);
159 5 : CheckInvariantsOfAbortedPage(to_be_aborted_page);
160 : }
161 : }
162 : }
163 : }
164 :
165 :
166 26661 : HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
167 5 : if (FLAG_never_compact) return;
168 : // Test the scenario where we reach OOM during compaction and parts of the
169 : // page have already been migrated to a new one. Objects on the aborted page
170 : // are linked together. This test makes sure that intra-aborted page pointers
171 : // get properly updated.
172 :
173 : // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
174 : // we can reach the state of a half aborted page.
175 : ManualGCScope manual_gc_scope;
176 5 : FLAG_manual_evacuation_candidates_selection = true;
177 :
178 : const int objects_per_page = 10;
179 : const int object_size =
180 5 : Min(kMaxRegularHeapObjectSize,
181 5 : static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
182 : objects_per_page);
183 :
184 5 : CcTest::InitializeVM();
185 : Isolate* isolate = CcTest::i_isolate();
186 : Heap* heap = isolate->heap();
187 : {
188 : HandleScope scope1(isolate);
189 : Handle<FixedArray> root_array =
190 5 : isolate->factory()->NewFixedArray(10, AllocationType::kOld);
191 :
192 5 : heap::SealCurrentObjects(heap);
193 :
194 : Page* to_be_aborted_page = nullptr;
195 : {
196 : HandleScope temporary_scope(isolate);
197 : // Fill a fresh page with objects of size {object_size} (last one is
198 : // properly adjusted).
199 5 : CHECK(heap->old_space()->Expand());
200 : std::vector<Handle<FixedArray>> compaction_page_handles =
201 : heap::CreatePadding(
202 : heap,
203 : static_cast<int>(
204 5 : MemoryChunkLayout::AllocatableMemoryInDataPage()),
205 5 : AllocationType::kOld, object_size);
206 : to_be_aborted_page =
207 : Page::FromHeapObject(*compaction_page_handles.front());
208 : to_be_aborted_page->SetFlag(
209 : MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
210 55 : for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
211 150 : compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
212 : }
213 10 : root_array->set(0, *compaction_page_handles.back());
214 5 : CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
215 : }
216 : {
217 : // Add another page that is filled with {num_objects} objects of size
218 : // {object_size}.
219 : HandleScope scope3(isolate);
220 5 : CHECK(heap->old_space()->Expand());
221 : const int num_objects = 2;
222 5 : int used_memory = object_size * num_objects;
223 : std::vector<Handle<FixedArray>> page_to_fill_handles =
224 : heap::CreatePadding(heap, used_memory, AllocationType::kOld,
225 5 : object_size);
226 : Page* page_to_fill = Page::FromHeapObject(*page_to_fill_handles.front());
227 :
228 : heap->set_force_oom(true);
229 5 : CcTest::CollectAllGarbage();
230 5 : heap->mark_compact_collector()->EnsureSweepingCompleted();
231 :
232 : // The following check makes sure that we compacted "some" objects, while
233 : // leaving others in place.
234 : bool in_place = true;
235 : Handle<FixedArray> current = root_array;
236 60 : while (current->get(0) != ReadOnlyRoots(heap).undefined_value()) {
237 : current =
238 : Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
239 55 : CHECK(current->IsFixedArray());
240 55 : if (Page::FromHeapObject(*current) != to_be_aborted_page) {
241 : in_place = false;
242 : }
243 : bool on_aborted_page =
244 : Page::FromHeapObject(*current) == to_be_aborted_page;
245 : bool on_fill_page = Page::FromHeapObject(*current) == page_to_fill;
246 55 : CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
247 : }
248 : // Check that we at least migrated one object, as otherwise the test would
249 : // not trigger.
250 5 : CHECK(!in_place);
251 5 : CheckInvariantsOfAbortedPage(to_be_aborted_page);
252 : }
253 : }
254 : }
255 :
256 :
257 26661 : HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
258 5 : if (FLAG_never_compact) return;
259 : // Test the scenario where we reach OOM during compaction and parts of the
260 : // page have already been migrated to a new one. Objects on the aborted page
261 : // are linked together and the very first object on the aborted page points
262 : // into new space. The test verifies that the store buffer entries are
263 : // properly cleared and rebuilt after aborting a page. Failing to do so can
264 : // result in other objects being allocated in the free space where their
265 : // payload looks like a valid new space pointer.
266 :
267 : // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
268 : // we can reach the state of a half aborted page.
269 : ManualGCScope manual_gc_scope;
270 5 : FLAG_manual_evacuation_candidates_selection = true;
271 :
272 : const int objects_per_page = 10;
273 : const int object_size =
274 5 : Min(kMaxRegularHeapObjectSize,
275 5 : static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
276 : objects_per_page);
277 :
278 5 : CcTest::InitializeVM();
279 : Isolate* isolate = CcTest::i_isolate();
280 : Heap* heap = isolate->heap();
281 : {
282 : HandleScope scope1(isolate);
283 : Handle<FixedArray> root_array =
284 5 : isolate->factory()->NewFixedArray(10, AllocationType::kOld);
285 5 : heap::SealCurrentObjects(heap);
286 :
287 : Page* to_be_aborted_page = nullptr;
288 : {
289 : HandleScope temporary_scope(isolate);
290 : // Fill another page with objects of size {object_size} (last one is
291 : // properly adjusted).
292 5 : CHECK(heap->old_space()->Expand());
293 : auto compaction_page_handles = heap::CreatePadding(
294 : heap,
295 5 : static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
296 5 : AllocationType::kOld, object_size);
297 : // Sanity check that we have enough space for linking up arrays.
298 5 : CHECK_GE(compaction_page_handles.front()->length(), 2);
299 : to_be_aborted_page =
300 : Page::FromHeapObject(*compaction_page_handles.front());
301 : to_be_aborted_page->SetFlag(
302 : MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
303 :
304 55 : for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
305 150 : compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
306 : }
307 10 : root_array->set(0, *compaction_page_handles.back());
308 : Handle<FixedArray> new_space_array =
309 5 : isolate->factory()->NewFixedArray(1, AllocationType::kYoung);
310 5 : CHECK(Heap::InYoungGeneration(*new_space_array));
311 10 : compaction_page_handles.front()->set(1, *new_space_array);
312 5 : CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
313 : }
314 :
315 : {
316 : // Add another page that is filled with {num_objects} objects of size
317 : // {object_size}.
318 : HandleScope scope3(isolate);
319 5 : CHECK(heap->old_space()->Expand());
320 : const int num_objects = 2;
321 5 : int used_memory = object_size * num_objects;
322 : std::vector<Handle<FixedArray>> page_to_fill_handles =
323 : heap::CreatePadding(heap, used_memory, AllocationType::kOld,
324 5 : object_size);
325 : Page* page_to_fill = Page::FromHeapObject(*page_to_fill_handles.front());
326 :
327 : heap->set_force_oom(true);
328 5 : CcTest::CollectAllGarbage();
329 5 : heap->mark_compact_collector()->EnsureSweepingCompleted();
330 :
331 : // The following check makes sure that we compacted "some" objects, while
332 : // leaving others in place.
333 : bool in_place = true;
334 : Handle<FixedArray> current = root_array;
335 60 : while (current->get(0) != ReadOnlyRoots(heap).undefined_value()) {
336 : current =
337 : Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
338 55 : CHECK(!Heap::InYoungGeneration(*current));
339 55 : CHECK(current->IsFixedArray());
340 55 : if (Page::FromHeapObject(*current) != to_be_aborted_page) {
341 : in_place = false;
342 : }
343 : bool on_aborted_page =
344 : Page::FromHeapObject(*current) == to_be_aborted_page;
345 : bool on_fill_page = Page::FromHeapObject(*current) == page_to_fill;
346 55 : CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
347 : }
348 : // Check that we at least migrated one object, as otherwise the test would
349 : // not trigger.
350 5 : CHECK(!in_place);
351 5 : CheckInvariantsOfAbortedPage(to_be_aborted_page);
352 :
353 : // Allocate a new object in new space.
354 : Handle<FixedArray> holder =
355 5 : isolate->factory()->NewFixedArray(10, AllocationType::kYoung);
356 : // Create a broken address that looks like a tagged pointer to a new space
357 : // object.
358 5 : Address broken_address = holder->address() + 2 * kTaggedSize + 1;
359 : // Convert it to a vector to create a string from it.
360 : Vector<const uint8_t> string_to_broken_addresss(
361 : reinterpret_cast<const uint8_t*>(&broken_address), kTaggedSize);
362 :
363 : Handle<String> string;
364 5 : do {
365 : // We know that the interesting slot will be on the aborted page and
366 : // hence we allocate until we get our string on the aborted page.
367 : // We used slot 1 in the fixed size array which corresponds to the
368 : // the first word in the string. Since the first object definitely
369 : // migrated we can just allocate until we hit the aborted page.
370 : string = isolate->factory()
371 10 : ->NewStringFromOneByte(string_to_broken_addresss,
372 : AllocationType::kOld)
373 : .ToHandleChecked();
374 : } while (Page::FromHeapObject(*string) != to_be_aborted_page);
375 :
376 : // If store buffer entries are not properly filtered/reset for aborted
377 : // pages we have now a broken address at an object slot in old space and
378 : // the following scavenge will crash.
379 5 : CcTest::CollectGarbage(NEW_SPACE);
380 : }
381 : }
382 : }
383 :
384 : } // namespace heap
385 : } // namespace internal
386 79968 : } // namespace v8
|