Line data Source code
1 : // Copyright 2015 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/factory.h"
6 : #include "src/heap/mark-compact.h"
7 : #include "src/isolate.h"
8 : #include "src/objects-inl.h"
9 : #include "test/cctest/cctest.h"
10 : #include "test/cctest/heap/heap-tester.h"
11 : #include "test/cctest/heap/heap-utils.h"
12 :
13 : namespace v8 {
14 : namespace internal {
15 : namespace heap {
16 :
17 : namespace {
18 :
19 24 : void CheckInvariantsOfAbortedPage(Page* page) {
20 : // Check invariants:
21 : // 1) Markbits are cleared
22 : // 2) The page is not marked as evacuation candidate anymore
23 : // 3) The page is not marked as aborted compaction anymore.
24 24 : CHECK(page->heap()
25 : ->mark_compact_collector()
26 : ->non_atomic_marking_state()
27 : ->bitmap(page)
28 : ->IsClean());
29 24 : CHECK(!page->IsEvacuationCandidate());
30 24 : CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
31 24 : }
32 :
33 24 : void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
34 : Page* page) {
35 240 : for (auto& fixed_array : handles) {
36 384 : CHECK(Page::FromAddress(fixed_array->address()) == page);
37 : }
38 24 : }
39 :
40 : } // namespace
41 :
42 23724 : HEAP_TEST(CompactionFullAbortedPage) {
43 12 : if (FLAG_never_compact) return;
44 : // Test the scenario where we reach OOM during compaction and the whole page
45 : // is aborted.
46 :
47 : // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
48 : // we can reach the state of a half aborted page.
49 6 : FLAG_concurrent_sweeping = false;
50 6 : FLAG_concurrent_marking = false;
51 6 : FLAG_stress_incremental_marking = false;
52 6 : FLAG_manual_evacuation_candidates_selection = true;
53 6 : CcTest::InitializeVM();
54 : Isolate* isolate = CcTest::i_isolate();
55 18 : Heap* heap = isolate->heap();
56 : {
57 : HandleScope scope1(isolate);
58 :
59 6 : heap::SealCurrentObjects(heap);
60 :
61 : {
62 : HandleScope scope2(isolate);
63 6 : CHECK(heap->old_space()->Expand());
64 : auto compaction_page_handles =
65 6 : heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED);
66 : Page* to_be_aborted_page =
67 6 : Page::FromAddress(compaction_page_handles.front()->address());
68 : to_be_aborted_page->SetFlag(
69 : MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
70 6 : CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
71 :
72 : heap->set_force_oom(true);
73 6 : CcTest::CollectAllGarbage();
74 6 : heap->mark_compact_collector()->EnsureSweepingCompleted();
75 :
76 : // Check that all handles still point to the same page, i.e., compaction
77 : // has been aborted on the page.
78 24 : for (Handle<FixedArray> object : compaction_page_handles) {
79 24 : CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
80 : }
81 6 : CheckInvariantsOfAbortedPage(to_be_aborted_page);
82 : }
83 : }
84 : }
85 :
86 :
87 23724 : HEAP_TEST(CompactionPartiallyAbortedPage) {
88 12 : if (FLAG_never_compact) return;
89 : // Test the scenario where we reach OOM during compaction and parts of the
90 : // page have already been migrated to a new one.
91 :
92 : // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
93 : // we can reach the state of a half aborted page.
94 6 : FLAG_concurrent_sweeping = false;
95 6 : FLAG_concurrent_marking = false;
96 6 : FLAG_stress_incremental_marking = false;
97 6 : FLAG_manual_evacuation_candidates_selection = true;
98 :
99 : const int objects_per_page = 10;
100 : const int object_size = Page::kAllocatableMemory / objects_per_page;
101 :
102 6 : CcTest::InitializeVM();
103 : Isolate* isolate = CcTest::i_isolate();
104 24 : Heap* heap = isolate->heap();
105 : {
106 : HandleScope scope1(isolate);
107 :
108 6 : heap::SealCurrentObjects(heap);
109 :
110 : {
111 : HandleScope scope2(isolate);
112 : // Fill another page with objects of size {object_size} (last one is
113 : // properly adjusted).
114 6 : CHECK(heap->old_space()->Expand());
115 : auto compaction_page_handles = heap::CreatePadding(
116 6 : heap, Page::kAllocatableMemory, TENURED, object_size);
117 : Page* to_be_aborted_page =
118 6 : Page::FromAddress(compaction_page_handles.front()->address());
119 : to_be_aborted_page->SetFlag(
120 : MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
121 6 : CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
122 :
123 : {
124 : // Add another page that is filled with {num_objects} objects of size
125 : // {object_size}.
126 : HandleScope scope3(isolate);
127 6 : CHECK(heap->old_space()->Expand());
128 : const int num_objects = 3;
129 : std::vector<Handle<FixedArray>> page_to_fill_handles =
130 : heap::CreatePadding(heap, object_size * num_objects, TENURED,
131 6 : object_size);
132 : Page* page_to_fill =
133 6 : Page::FromAddress(page_to_fill_handles.front()->address());
134 :
135 : heap->set_force_oom(true);
136 6 : CcTest::CollectAllGarbage();
137 6 : heap->mark_compact_collector()->EnsureSweepingCompleted();
138 :
139 : bool migration_aborted = false;
140 72 : for (Handle<FixedArray> object : compaction_page_handles) {
141 : // Once compaction has been aborted, all following objects still have
142 : // to be on the initial page.
143 84 : CHECK(!migration_aborted ||
144 : (Page::FromAddress(object->address()) == to_be_aborted_page));
145 120 : if (Page::FromAddress(object->address()) == to_be_aborted_page) {
146 : // This object has not been migrated.
147 : migration_aborted = true;
148 : } else {
149 42 : CHECK_EQ(Page::FromAddress(object->address()), page_to_fill);
150 : }
151 : }
152 : // Check that we actually created a scenario with a partially aborted
153 : // page.
154 6 : CHECK(migration_aborted);
155 6 : CheckInvariantsOfAbortedPage(to_be_aborted_page);
156 : }
157 : }
158 : }
159 : }
160 :
161 :
162 23724 : HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
163 12 : if (FLAG_never_compact) return;
164 : // Test the scenario where we reach OOM during compaction and parts of the
165 : // page have already been migrated to a new one. Objects on the aborted page
166 : // are linked together. This test makes sure that intra-aborted page pointers
167 : // get properly updated.
168 :
169 : // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
170 : // we can reach the state of a half aborted page.
171 6 : FLAG_concurrent_sweeping = false;
172 6 : FLAG_concurrent_marking = false;
173 6 : FLAG_stress_incremental_marking = false;
174 6 : FLAG_manual_evacuation_candidates_selection = true;
175 :
176 : const int objects_per_page = 10;
177 : const int object_size = Page::kAllocatableMemory / objects_per_page;
178 :
179 6 : CcTest::InitializeVM();
180 : Isolate* isolate = CcTest::i_isolate();
181 90 : Heap* heap = isolate->heap();
182 : {
183 : HandleScope scope1(isolate);
184 : Handle<FixedArray> root_array =
185 6 : isolate->factory()->NewFixedArray(10, TENURED);
186 :
187 6 : heap::SealCurrentObjects(heap);
188 :
189 : Page* to_be_aborted_page = nullptr;
190 : {
191 : HandleScope temporary_scope(isolate);
192 : // Fill a fresh page with objects of size {object_size} (last one is
193 : // properly adjusted).
194 6 : CHECK(heap->old_space()->Expand());
195 : std::vector<Handle<FixedArray>> compaction_page_handles =
196 : heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED,
197 6 : object_size);
198 : to_be_aborted_page =
199 6 : Page::FromAddress(compaction_page_handles.front()->address());
200 : to_be_aborted_page->SetFlag(
201 : MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
202 72 : for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
203 108 : compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
204 : }
205 6 : root_array->set(0, *compaction_page_handles.back());
206 6 : CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
207 : }
208 : {
209 : // Add another page that is filled with {num_objects} objects of size
210 : // {object_size}.
211 : HandleScope scope3(isolate);
212 6 : CHECK(heap->old_space()->Expand());
213 : const int num_objects = 2;
214 : int used_memory = object_size * num_objects;
215 : std::vector<Handle<FixedArray>> page_to_fill_handles =
216 6 : heap::CreatePadding(heap, used_memory, TENURED, object_size);
217 : Page* page_to_fill =
218 6 : Page::FromAddress(page_to_fill_handles.front()->address());
219 :
220 : heap->set_force_oom(true);
221 6 : CcTest::CollectAllGarbage();
222 6 : heap->mark_compact_collector()->EnsureSweepingCompleted();
223 :
224 : // The following check makes sure that we compacted "some" objects, while
225 : // leaving others in place.
226 : bool in_place = true;
227 : Handle<FixedArray> current = root_array;
228 72 : while (current->get(0) != heap->undefined_value()) {
229 : current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
230 60 : CHECK(current->IsFixedArray());
231 120 : if (Page::FromAddress(current->address()) != to_be_aborted_page) {
232 : in_place = false;
233 : }
234 : bool on_aborted_page =
235 : Page::FromAddress(current->address()) == to_be_aborted_page;
236 : bool on_fill_page =
237 : Page::FromAddress(current->address()) == page_to_fill;
238 60 : CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
239 : }
240 : // Check that we at least migrated one object, as otherwise the test would
241 : // not trigger.
242 6 : CHECK(!in_place);
243 6 : CheckInvariantsOfAbortedPage(to_be_aborted_page);
244 : }
245 : }
246 : }
247 :
248 :
249 23724 : HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
250 12 : if (FLAG_never_compact) return;
251 : // Test the scenario where we reach OOM during compaction and parts of the
252 : // page have already been migrated to a new one. Objects on the aborted page
253 : // are linked together and the very first object on the aborted page points
254 : // into new space. The test verifies that the store buffer entries are
255 : // properly cleared and rebuilt after aborting a page. Failing to do so can
256 : // result in other objects being allocated in the free space where their
257 : // payload looks like a valid new space pointer.
258 :
259 : // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
260 : // we can reach the state of a half aborted page.
261 6 : FLAG_concurrent_sweeping = false;
262 6 : FLAG_concurrent_marking = false;
263 6 : FLAG_stress_incremental_marking = false;
264 6 : FLAG_manual_evacuation_candidates_selection = true;
265 :
266 : const int objects_per_page = 10;
267 : const int object_size = Page::kAllocatableMemory / objects_per_page;
268 :
269 6 : CcTest::InitializeVM();
270 : Isolate* isolate = CcTest::i_isolate();
271 90 : Heap* heap = isolate->heap();
272 : {
273 : HandleScope scope1(isolate);
274 : Handle<FixedArray> root_array =
275 6 : isolate->factory()->NewFixedArray(10, TENURED);
276 6 : heap::SealCurrentObjects(heap);
277 :
278 : Page* to_be_aborted_page = nullptr;
279 : {
280 : HandleScope temporary_scope(isolate);
281 : // Fill another page with objects of size {object_size} (last one is
282 : // properly adjusted).
283 6 : CHECK(heap->old_space()->Expand());
284 : auto compaction_page_handles = heap::CreatePadding(
285 6 : heap, Page::kAllocatableMemory, TENURED, object_size);
286 : // Sanity check that we have enough space for linking up arrays.
287 6 : CHECK_GE(compaction_page_handles.front()->length(), 2);
288 : to_be_aborted_page =
289 6 : Page::FromAddress(compaction_page_handles.front()->address());
290 : to_be_aborted_page->SetFlag(
291 : MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
292 :
293 72 : for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
294 108 : compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
295 : }
296 6 : root_array->set(0, *compaction_page_handles.back());
297 : Handle<FixedArray> new_space_array =
298 6 : isolate->factory()->NewFixedArray(1, NOT_TENURED);
299 6 : CHECK(heap->InNewSpace(*new_space_array));
300 6 : compaction_page_handles.front()->set(1, *new_space_array);
301 6 : CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
302 : }
303 :
304 : {
305 : // Add another page that is filled with {num_objects} objects of size
306 : // {object_size}.
307 : HandleScope scope3(isolate);
308 6 : CHECK(heap->old_space()->Expand());
309 : const int num_objects = 2;
310 : int used_memory = object_size * num_objects;
311 : std::vector<Handle<FixedArray>> page_to_fill_handles =
312 6 : heap::CreatePadding(heap, used_memory, TENURED, object_size);
313 : Page* page_to_fill =
314 6 : Page::FromAddress(page_to_fill_handles.front()->address());
315 :
316 : heap->set_force_oom(true);
317 6 : CcTest::CollectAllGarbage();
318 6 : heap->mark_compact_collector()->EnsureSweepingCompleted();
319 :
320 : // The following check makes sure that we compacted "some" objects, while
321 : // leaving others in place.
322 : bool in_place = true;
323 : Handle<FixedArray> current = root_array;
324 72 : while (current->get(0) != heap->undefined_value()) {
325 : current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
326 60 : CHECK(!heap->InNewSpace(*current));
327 60 : CHECK(current->IsFixedArray());
328 120 : if (Page::FromAddress(current->address()) != to_be_aborted_page) {
329 : in_place = false;
330 : }
331 : bool on_aborted_page =
332 : Page::FromAddress(current->address()) == to_be_aborted_page;
333 : bool on_fill_page =
334 : Page::FromAddress(current->address()) == page_to_fill;
335 60 : CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
336 : }
337 : // Check that we at least migrated one object, as otherwise the test would
338 : // not trigger.
339 6 : CHECK(!in_place);
340 6 : CheckInvariantsOfAbortedPage(to_be_aborted_page);
341 :
342 : // Allocate a new object in new space.
343 : Handle<FixedArray> holder =
344 6 : isolate->factory()->NewFixedArray(10, NOT_TENURED);
345 : // Create a broken address that looks like a tagged pointer to a new space
346 : // object.
347 6 : Address broken_address = holder->address() + 2 * kPointerSize + 1;
348 : // Convert it to a vector to create a string from it.
349 : Vector<const uint8_t> string_to_broken_addresss(
350 : reinterpret_cast<const uint8_t*>(&broken_address), kPointerSize);
351 :
352 : Handle<String> string;
353 6 : do {
354 : // We know that the interesting slot will be on the aborted page and
355 : // hence we allocate until we get our string on the aborted page.
356 : // We used slot 1 in the fixed size array which corresponds to the
357 : // the first word in the string. Since the first object definitely
358 : // migrated we can just allocate until we hit the aborted page.
359 : string = isolate->factory()
360 : ->NewStringFromOneByte(string_to_broken_addresss, TENURED)
361 12 : .ToHandleChecked();
362 6 : } while (Page::FromAddress(string->address()) != to_be_aborted_page);
363 :
364 : // If store buffer entries are not properly filtered/reset for aborted
365 : // pages we have now a broken address at an object slot in old space and
366 : // the following scavenge will crash.
367 6 : CcTest::CollectGarbage(NEW_SPACE);
368 : }
369 : }
370 : }
371 :
372 : } // namespace heap
373 : } // namespace internal
374 71154 : } // namespace v8
|