LCOV - code coverage report
Current view: top level - test/cctest/heap - test-compaction.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 111 111 100.0 %
Date: 2019-01-20 Functions: 8 8 100.0 %

          Line data    Source code
       1             : // Copyright 2015 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/heap/factory.h"
       6             : #include "src/heap/mark-compact.h"
       7             : #include "src/isolate.h"
       8             : #include "src/objects-inl.h"
       9             : #include "test/cctest/cctest.h"
      10             : #include "test/cctest/heap/heap-tester.h"
      11             : #include "test/cctest/heap/heap-utils.h"
      12             : 
      13             : namespace v8 {
      14             : namespace internal {
      15             : namespace heap {
      16             : 
      17             : namespace {
      18             : 
      19          20 : void CheckInvariantsOfAbortedPage(Page* page) {
      20             :   // Check invariants:
      21             :   // 1) Markbits are cleared
      22             :   // 2) The page is not marked as evacuation candidate anymore
      23             :   // 3) The page is not marked as aborted compaction anymore.
      24          20 :   CHECK(page->heap()
      25             :             ->mark_compact_collector()
      26             :             ->non_atomic_marking_state()
      27             :             ->bitmap(page)
      28             :             ->IsClean());
      29          20 :   CHECK(!page->IsEvacuationCandidate());
      30          20 :   CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
      31          20 : }
      32             : 
      33          20 : void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
      34             :                            Page* page) {
      35         200 :   for (Handle<FixedArray> fixed_array : handles) {
      36         160 :     CHECK(Page::FromHeapObject(*fixed_array) == page);
      37             :   }
      38          20 : }
      39             : 
      40             : }  // namespace
      41             : 
      42       28342 : HEAP_TEST(CompactionFullAbortedPage) {
      43          10 :   if (FLAG_never_compact) return;
      44             :   // Test the scenario where we reach OOM during compaction and the whole page
      45             :   // is aborted.
      46             : 
      47             :   // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
      48             :   // we can reach the state of a half aborted page.
      49             :   ManualGCScope manual_gc_scope;
      50           5 :   FLAG_manual_evacuation_candidates_selection = true;
      51           5 :   CcTest::InitializeVM();
      52             :   Isolate* isolate = CcTest::i_isolate();
      53          15 :   Heap* heap = isolate->heap();
      54             :   {
      55             :     HandleScope scope1(isolate);
      56             : 
      57           5 :     heap::SealCurrentObjects(heap);
      58             : 
      59             :     {
      60             :       HandleScope scope2(isolate);
      61           5 :       CHECK(heap->old_space()->Expand());
      62             :       auto compaction_page_handles = heap::CreatePadding(
      63             :           heap,
      64           5 :           static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
      65           5 :           TENURED);
      66             :       Page* to_be_aborted_page =
      67             :           Page::FromHeapObject(*compaction_page_handles.front());
      68             :       to_be_aborted_page->SetFlag(
      69             :           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
      70           5 :       CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
      71             : 
      72             :       heap->set_force_oom(true);
      73           5 :       CcTest::CollectAllGarbage();
      74           5 :       heap->mark_compact_collector()->EnsureSweepingCompleted();
      75             : 
      76             :       // Check that all handles still point to the same page, i.e., compaction
      77             :       // has been aborted on the page.
      78          20 :       for (Handle<FixedArray> object : compaction_page_handles) {
      79          10 :         CHECK_EQ(to_be_aborted_page, Page::FromHeapObject(*object));
      80             :       }
      81           5 :       CheckInvariantsOfAbortedPage(to_be_aborted_page);
      82             :     }
      83             :   }
      84             : }
      85             : 
      86             : 
      87       28342 : HEAP_TEST(CompactionPartiallyAbortedPage) {
      88          10 :   if (FLAG_never_compact) return;
      89             :   // Test the scenario where we reach OOM during compaction and parts of the
      90             :   // page have already been migrated to a new one.
      91             : 
      92             :   // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
      93             :   // we can reach the state of a half aborted page.
      94             :   ManualGCScope manual_gc_scope;
      95           5 :   FLAG_manual_evacuation_candidates_selection = true;
      96             : 
      97             :   const int objects_per_page = 10;
      98             :   const int object_size =
      99           5 :       static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
     100           5 :       objects_per_page;
     101             : 
     102           5 :   CcTest::InitializeVM();
     103             :   Isolate* isolate = CcTest::i_isolate();
     104          20 :   Heap* heap = isolate->heap();
     105             :   {
     106             :     HandleScope scope1(isolate);
     107             : 
     108           5 :     heap::SealCurrentObjects(heap);
     109             : 
     110             :     {
     111             :       HandleScope scope2(isolate);
     112             :       // Fill another page with objects of size {object_size} (last one is
     113             :       // properly adjusted).
     114           5 :       CHECK(heap->old_space()->Expand());
     115             :       auto compaction_page_handles = heap::CreatePadding(
     116             :           heap,
     117           5 :           static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
     118           5 :           TENURED, object_size);
     119             :       Page* to_be_aborted_page =
     120             :           Page::FromHeapObject(*compaction_page_handles.front());
     121             :       to_be_aborted_page->SetFlag(
     122             :           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
     123           5 :       CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
     124             : 
     125             :       {
     126             :         // Add another page that is filled with {num_objects} objects of size
     127             :         // {object_size}.
     128             :         HandleScope scope3(isolate);
     129           5 :         CHECK(heap->old_space()->Expand());
     130             :         const int num_objects = 3;
     131             :         std::vector<Handle<FixedArray>> page_to_fill_handles =
     132             :             heap::CreatePadding(heap, object_size * num_objects, TENURED,
     133           5 :                                 object_size);
     134             :         Page* page_to_fill =
     135             :             Page::FromAddress(page_to_fill_handles.front()->address());
     136             : 
     137             :         heap->set_force_oom(true);
     138           5 :         CcTest::CollectAllGarbage();
     139           5 :         heap->mark_compact_collector()->EnsureSweepingCompleted();
     140             : 
     141             :         bool migration_aborted = false;
     142          60 :         for (Handle<FixedArray> object : compaction_page_handles) {
     143             :           // Once compaction has been aborted, all following objects still have
     144             :           // to be on the initial page.
     145          60 :           CHECK(!migration_aborted ||
     146             :                 (Page::FromHeapObject(*object) == to_be_aborted_page));
     147          50 :           if (Page::FromHeapObject(*object) == to_be_aborted_page) {
     148             :             // This object has not been migrated.
     149             :             migration_aborted = true;
     150             :           } else {
     151          35 :             CHECK_EQ(Page::FromHeapObject(*object), page_to_fill);
     152             :           }
     153             :         }
     154             :         // Check that we actually created a scenario with a partially aborted
     155             :         // page.
     156           5 :         CHECK(migration_aborted);
     157           5 :         CheckInvariantsOfAbortedPage(to_be_aborted_page);
     158             :       }
     159             :     }
     160             :   }
     161             : }
     162             : 
     163             : 
     164       28342 : HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
     165          10 :   if (FLAG_never_compact) return;
     166             :   // Test the scenario where we reach OOM during compaction and parts of the
     167             :   // page have already been migrated to a new one. Objects on the aborted page
     168             :   // are linked together. This test makes sure that intra-aborted page pointers
     169             :   // get properly updated.
     170             : 
     171             :   // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
     172             :   // we can reach the state of a half aborted page.
     173             :   ManualGCScope manual_gc_scope;
     174           5 :   FLAG_manual_evacuation_candidates_selection = true;
     175             : 
     176             :   const int objects_per_page = 10;
     177             :   const int object_size =
     178           5 :       static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
     179           5 :       objects_per_page;
     180             : 
     181           5 :   CcTest::InitializeVM();
     182             :   Isolate* isolate = CcTest::i_isolate();
     183          20 :   Heap* heap = isolate->heap();
     184             :   {
     185             :     HandleScope scope1(isolate);
     186             :     Handle<FixedArray> root_array =
     187           5 :         isolate->factory()->NewFixedArray(10, TENURED);
     188             : 
     189           5 :     heap::SealCurrentObjects(heap);
     190             : 
     191             :     Page* to_be_aborted_page = nullptr;
     192             :     {
     193             :       HandleScope temporary_scope(isolate);
     194             :       // Fill a fresh page with objects of size {object_size} (last one is
     195             :       // properly adjusted).
     196           5 :       CHECK(heap->old_space()->Expand());
     197             :       std::vector<Handle<FixedArray>> compaction_page_handles =
     198             :           heap::CreatePadding(
     199             :               heap,
     200             :               static_cast<int>(
     201           5 :                   MemoryChunkLayout::AllocatableMemoryInDataPage()),
     202           5 :               TENURED, object_size);
     203             :       to_be_aborted_page =
     204             :           Page::FromHeapObject(*compaction_page_handles.front());
     205             :       to_be_aborted_page->SetFlag(
     206             :           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
     207          60 :       for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
     208         135 :         compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
     209             :       }
     210          10 :       root_array->set(0, *compaction_page_handles.back());
     211           5 :       CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
     212             :     }
     213             :     {
     214             :       // Add another page that is filled with {num_objects} objects of size
     215             :       // {object_size}.
     216             :       HandleScope scope3(isolate);
     217           5 :       CHECK(heap->old_space()->Expand());
     218             :       const int num_objects = 2;
     219           5 :       int used_memory = object_size * num_objects;
     220             :       std::vector<Handle<FixedArray>> page_to_fill_handles =
     221           5 :           heap::CreatePadding(heap, used_memory, TENURED, object_size);
     222             :       Page* page_to_fill = Page::FromHeapObject(*page_to_fill_handles.front());
     223             : 
     224             :       heap->set_force_oom(true);
     225           5 :       CcTest::CollectAllGarbage();
     226           5 :       heap->mark_compact_collector()->EnsureSweepingCompleted();
     227             : 
     228             :       // The following check makes sure that we compacted "some" objects, while
     229             :       // leaving others in place.
     230             :       bool in_place = true;
     231             :       Handle<FixedArray> current = root_array;
     232          60 :       while (current->get(0) != ReadOnlyRoots(heap).undefined_value()) {
     233             :         current =
     234             :             Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
     235         100 :         CHECK(current->IsFixedArray());
     236          50 :         if (Page::FromHeapObject(*current) != to_be_aborted_page) {
     237             :           in_place = false;
     238             :         }
     239             :         bool on_aborted_page =
     240             :             Page::FromHeapObject(*current) == to_be_aborted_page;
     241             :         bool on_fill_page = Page::FromHeapObject(*current) == page_to_fill;
     242          50 :         CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
     243             :       }
     244             :       // Check that we at least migrated one object, as otherwise the test would
     245             :       // not trigger.
     246           5 :       CHECK(!in_place);
     247           5 :       CheckInvariantsOfAbortedPage(to_be_aborted_page);
     248             :     }
     249             :   }
     250             : }
     251             : 
     252             : 
     253       28342 : HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
     254          10 :   if (FLAG_never_compact) return;
     255             :   // Test the scenario where we reach OOM during compaction and parts of the
     256             :   // page have already been migrated to a new one. Objects on the aborted page
     257             :   // are linked together and the very first object on the aborted page points
     258             :   // into new space. The test verifies that the store buffer entries are
     259             :   // properly cleared and rebuilt after aborting a page. Failing to do so can
     260             :   // result in other objects being allocated in the free space where their
     261             :   // payload looks like a valid new space pointer.
     262             : 
     263             :   // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
     264             :   // we can reach the state of a half aborted page.
     265             :   ManualGCScope manual_gc_scope;
     266           5 :   FLAG_manual_evacuation_candidates_selection = true;
     267             : 
     268             :   const int objects_per_page = 10;
     269             :   const int object_size =
     270           5 :       static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()) /
     271           5 :       objects_per_page;
     272             : 
     273           5 :   CcTest::InitializeVM();
     274             :   Isolate* isolate = CcTest::i_isolate();
     275          20 :   Heap* heap = isolate->heap();
     276             :   {
     277             :     HandleScope scope1(isolate);
     278             :     Handle<FixedArray> root_array =
     279           5 :         isolate->factory()->NewFixedArray(10, TENURED);
     280           5 :     heap::SealCurrentObjects(heap);
     281             : 
     282             :     Page* to_be_aborted_page = nullptr;
     283             :     {
     284             :       HandleScope temporary_scope(isolate);
     285             :       // Fill another page with objects of size {object_size} (last one is
     286             :       // properly adjusted).
     287           5 :       CHECK(heap->old_space()->Expand());
     288             :       auto compaction_page_handles = heap::CreatePadding(
     289             :           heap,
     290           5 :           static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
     291           5 :           TENURED, object_size);
     292             :       // Sanity check that we have enough space for linking up arrays.
     293           5 :       CHECK_GE(compaction_page_handles.front()->length(), 2);
     294             :       to_be_aborted_page =
     295             :           Page::FromHeapObject(*compaction_page_handles.front());
     296             :       to_be_aborted_page->SetFlag(
     297             :           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
     298             : 
     299          60 :       for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
     300         135 :         compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
     301             :       }
     302          10 :       root_array->set(0, *compaction_page_handles.back());
     303             :       Handle<FixedArray> new_space_array =
     304           5 :           isolate->factory()->NewFixedArray(1, NOT_TENURED);
     305           5 :       CHECK(Heap::InNewSpace(*new_space_array));
     306          10 :       compaction_page_handles.front()->set(1, *new_space_array);
     307           5 :       CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
     308             :     }
     309             : 
     310             :     {
     311             :       // Add another page that is filled with {num_objects} objects of size
     312             :       // {object_size}.
     313             :       HandleScope scope3(isolate);
     314           5 :       CHECK(heap->old_space()->Expand());
     315             :       const int num_objects = 2;
     316           5 :       int used_memory = object_size * num_objects;
     317             :       std::vector<Handle<FixedArray>> page_to_fill_handles =
     318           5 :           heap::CreatePadding(heap, used_memory, TENURED, object_size);
     319             :       Page* page_to_fill = Page::FromHeapObject(*page_to_fill_handles.front());
     320             : 
     321             :       heap->set_force_oom(true);
     322           5 :       CcTest::CollectAllGarbage();
     323           5 :       heap->mark_compact_collector()->EnsureSweepingCompleted();
     324             : 
     325             :       // The following check makes sure that we compacted "some" objects, while
     326             :       // leaving others in place.
     327             :       bool in_place = true;
     328             :       Handle<FixedArray> current = root_array;
     329          60 :       while (current->get(0) != ReadOnlyRoots(heap).undefined_value()) {
     330             :         current =
     331             :             Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
     332          50 :         CHECK(!Heap::InNewSpace(*current));
     333         100 :         CHECK(current->IsFixedArray());
     334          50 :         if (Page::FromHeapObject(*current) != to_be_aborted_page) {
     335             :           in_place = false;
     336             :         }
     337             :         bool on_aborted_page =
     338             :             Page::FromHeapObject(*current) == to_be_aborted_page;
     339             :         bool on_fill_page = Page::FromHeapObject(*current) == page_to_fill;
     340          50 :         CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
     341             :       }
     342             :       // Check that we at least migrated one object, as otherwise the test would
     343             :       // not trigger.
     344           5 :       CHECK(!in_place);
     345           5 :       CheckInvariantsOfAbortedPage(to_be_aborted_page);
     346             : 
     347             :       // Allocate a new object in new space.
     348             :       Handle<FixedArray> holder =
     349           5 :           isolate->factory()->NewFixedArray(10, NOT_TENURED);
     350             :       // Create a broken address that looks like a tagged pointer to a new space
     351             :       // object.
     352           5 :       Address broken_address = holder->address() + 2 * kTaggedSize + 1;
     353             :       // Convert it to a vector to create a string from it.
     354             :       Vector<const uint8_t> string_to_broken_addresss(
     355             :           reinterpret_cast<const uint8_t*>(&broken_address), kTaggedSize);
     356             : 
     357             :       Handle<String> string;
     358           5 :       do {
     359             :         // We know that the interesting slot will be on the aborted page and
     360             :         // hence we allocate until we get our string on the aborted page.
     361             :         // We used slot 1 in the fixed size array which corresponds to the
     362             :         // the first word in the string. Since the first object definitely
     363             :         // migrated we can just allocate until we hit the aborted page.
     364             :         string = isolate->factory()
     365             :                      ->NewStringFromOneByte(string_to_broken_addresss, TENURED)
     366          10 :                      .ToHandleChecked();
     367             :       } while (Page::FromHeapObject(*string) != to_be_aborted_page);
     368             : 
     369             :       // If store buffer entries are not properly filtered/reset for aborted
     370             :       // pages we have now a broken address at an object slot in old space and
     371             :       // the following scavenge will crash.
     372           5 :       CcTest::CollectGarbage(NEW_SPACE);
     373             :     }
     374             :   }
     375             : }
     376             : 
     377             : }  // namespace heap
     378             : }  // namespace internal
     379       85011 : }  // namespace v8

Generated by: LCOV version 1.10