Line data Source code
1 : // Copyright 2014 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include <map>
6 :
7 : #include "src/base/region-allocator.h"
8 : #include "src/heap/heap-inl.h"
9 : #include "src/heap/spaces-inl.h"
10 : #include "src/isolate.h"
11 : #include "src/ostreams.h"
12 : #include "test/unittests/test-utils.h"
13 : #include "testing/gtest/include/gtest/gtest.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 :
18 : // This is a v8::PageAllocator implementation that decorates provided page
19 : // allocator object with page tracking functionality.
20 : class TrackingPageAllocator : public ::v8::PageAllocator {
21 : public:
22 2 : explicit TrackingPageAllocator(v8::PageAllocator* page_allocator)
23 : : page_allocator_(page_allocator),
24 2 : allocate_page_size_(page_allocator_->AllocatePageSize()),
25 2 : commit_page_size_(page_allocator_->CommitPageSize()),
26 : region_allocator_(kNullAddress, size_t{0} - commit_page_size_,
27 6 : commit_page_size_) {
28 2 : CHECK_NOT_NULL(page_allocator);
29 4 : CHECK(IsAligned(allocate_page_size_, commit_page_size_));
30 2 : }
31 8 : ~TrackingPageAllocator() override = default;
32 :
33 38 : size_t AllocatePageSize() override { return allocate_page_size_; }
34 :
35 86 : size_t CommitPageSize() override { return commit_page_size_; }
36 :
37 0 : void SetRandomMmapSeed(int64_t seed) override {
38 0 : return page_allocator_->SetRandomMmapSeed(seed);
39 : }
40 :
41 24 : void* GetRandomMmapAddr() override {
42 24 : return page_allocator_->GetRandomMmapAddr();
43 : }
44 :
45 20 : void* AllocatePages(void* address, size_t size, size_t alignment,
46 : PageAllocator::Permission access) override {
47 : void* result =
48 20 : page_allocator_->AllocatePages(address, size, alignment, access);
49 20 : if (result) {
50 : // Mark pages as used.
51 20 : Address current_page = reinterpret_cast<Address>(result);
52 40 : CHECK(IsAligned(current_page, allocate_page_size_));
53 20 : CHECK(IsAligned(size, allocate_page_size_));
54 20 : CHECK(region_allocator_.AllocateRegionAt(current_page, size));
55 20 : Address end = current_page + size;
56 133172 : while (current_page < end) {
57 66576 : page_permissions_.insert({current_page, access});
58 66576 : current_page += commit_page_size_;
59 : }
60 : }
61 20 : return result;
62 : }
63 :
64 20 : bool FreePages(void* address, size_t size) override {
65 20 : bool result = page_allocator_->FreePages(address, size);
66 20 : if (result) {
67 : // Mark pages as free.
68 20 : Address start = reinterpret_cast<Address>(address);
69 40 : CHECK(IsAligned(start, allocate_page_size_));
70 20 : CHECK(IsAligned(size, allocate_page_size_));
71 20 : size_t freed_size = region_allocator_.FreeRegion(start);
72 40 : CHECK(IsAligned(freed_size, commit_page_size_));
73 40 : CHECK_EQ(RoundUp(freed_size, allocate_page_size_), size);
74 : auto start_iter = page_permissions_.find(start);
75 20 : CHECK_NE(start_iter, page_permissions_.end());
76 20 : auto end_iter = page_permissions_.lower_bound(start + size);
77 : page_permissions_.erase(start_iter, end_iter);
78 : }
79 20 : return result;
80 : }
81 :
82 4 : bool ReleasePages(void* address, size_t size, size_t new_size) override {
83 4 : bool result = page_allocator_->ReleasePages(address, size, new_size);
84 4 : if (result) {
85 4 : Address start = reinterpret_cast<Address>(address);
86 8 : CHECK(IsAligned(start, allocate_page_size_));
87 8 : CHECK(IsAligned(size, commit_page_size_));
88 4 : CHECK(IsAligned(new_size, commit_page_size_));
89 4 : CHECK_LT(new_size, size);
90 4 : CHECK_EQ(region_allocator_.TrimRegion(start, new_size), size - new_size);
91 4 : auto start_iter = page_permissions_.find(start + new_size);
92 4 : CHECK_NE(start_iter, page_permissions_.end());
93 4 : auto end_iter = page_permissions_.lower_bound(start + size);
94 : page_permissions_.erase(start_iter, end_iter);
95 : }
96 4 : return result;
97 : }
98 :
99 50 : bool SetPermissions(void* address, size_t size,
100 : PageAllocator::Permission access) override {
101 50 : bool result = page_allocator_->SetPermissions(address, size, access);
102 50 : if (result) {
103 50 : UpdatePagePermissions(reinterpret_cast<Address>(address), size, access);
104 : }
105 50 : return result;
106 : }
107 :
108 : // Returns true if all the allocated pages were freed.
109 : bool IsEmpty() { return page_permissions_.empty(); }
110 :
111 2 : void CheckIsFree(Address address, size_t size) {
112 4 : CHECK(IsAligned(address, allocate_page_size_));
113 2 : CHECK(IsAligned(size, allocate_page_size_));
114 4 : EXPECT_TRUE(region_allocator_.IsFree(address, size));
115 2 : }
116 :
117 5 : void CheckPagePermissions(Address address, size_t size,
118 : PageAllocator::Permission access) {
119 10 : ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
120 10 : EXPECT_EQ(access, value->second);
121 10 : });
122 5 : }
123 :
124 : void Print(const char* comment) const {
125 : i::StdoutStream os;
126 : os << "\n========================================="
127 : << "\nTracingPageAllocator state: ";
128 : if (comment) os << comment;
129 : os << "\n-----------------------------------------\n";
130 : region_allocator_.Print(os);
131 : os << "-----------------------------------------"
132 : << "\nPage permissions:";
133 : if (page_permissions_.empty()) {
134 : os << " empty\n";
135 : return;
136 : }
137 : os << "\n" << std::hex << std::showbase;
138 :
139 : Address contiguous_region_start = static_cast<Address>(-1);
140 : Address contiguous_region_end = contiguous_region_start;
141 : PageAllocator::Permission contiguous_region_access =
142 : PageAllocator::kNoAccess;
143 : for (auto& pair : page_permissions_) {
144 : if (contiguous_region_end == pair.first &&
145 : pair.second == contiguous_region_access) {
146 : contiguous_region_end += commit_page_size_;
147 : continue;
148 : }
149 : if (contiguous_region_start != contiguous_region_end) {
150 : PrintRegion(os, contiguous_region_start, contiguous_region_end,
151 : contiguous_region_access);
152 : }
153 : contiguous_region_start = pair.first;
154 : contiguous_region_end = pair.first + commit_page_size_;
155 : contiguous_region_access = pair.second;
156 : }
157 : if (contiguous_region_start != contiguous_region_end) {
158 : PrintRegion(os, contiguous_region_start, contiguous_region_end,
159 : contiguous_region_access);
160 : }
161 : }
162 :
163 : private:
164 : using PagePermissionsMap = std::map<Address, PageAllocator::Permission>;
165 : using ForEachFn = std::function<void(PagePermissionsMap::value_type*)>;
166 :
167 : static void PrintRegion(std::ostream& os, Address start, Address end,
168 : PageAllocator::Permission access) {
169 : os << " page: [" << start << ", " << end << "), access: ";
170 : switch (access) {
171 : case PageAllocator::kNoAccess:
172 : os << "--";
173 : break;
174 : case PageAllocator::kRead:
175 : os << "R";
176 : break;
177 : case PageAllocator::kReadWrite:
178 : os << "RW";
179 : break;
180 : case PageAllocator::kReadWriteExecute:
181 : os << "RWX";
182 : break;
183 : case PageAllocator::kReadExecute:
184 : os << "RX";
185 : break;
186 : }
187 : os << "\n";
188 : }
189 :
190 55 : void ForEachPage(Address address, size_t size, const ForEachFn& fn) {
191 110 : CHECK(IsAligned(address, commit_page_size_));
192 55 : CHECK(IsAligned(size, commit_page_size_));
193 : auto start_iter = page_permissions_.find(address);
194 : // Start page must exist in page_permissions_.
195 55 : CHECK_NE(start_iter, page_permissions_.end());
196 55 : auto end_iter = page_permissions_.find(address + size - commit_page_size_);
197 : // Ensure the last but one page exists in page_permissions_.
198 55 : CHECK_NE(end_iter, page_permissions_.end());
199 : // Now make it point to the next element in order to also process is by the
200 : // following for loop.
201 : ++end_iter;
202 2504 : for (auto iter = start_iter; iter != end_iter; ++iter) {
203 : PagePermissionsMap::value_type& pair = *iter;
204 : fn(&pair);
205 : }
206 55 : }
207 :
208 50 : void UpdatePagePermissions(Address address, size_t size,
209 : PageAllocator::Permission access) {
210 50 : ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
211 2444 : value->second = access;
212 50 : });
213 50 : }
214 :
215 : v8::PageAllocator* const page_allocator_;
216 : const size_t allocate_page_size_;
217 : const size_t commit_page_size_;
218 : // Region allocator tracks page allocation/deallocation requests.
219 : base::RegionAllocator region_allocator_;
220 : // This map keeps track of allocated pages' permissions.
221 : PagePermissionsMap page_permissions_;
222 : };
223 :
224 : class SequentialUnmapperTest : public TestWithIsolate {
225 : public:
226 2 : SequentialUnmapperTest() = default;
227 4 : ~SequentialUnmapperTest() override = default;
228 :
229 2 : static void SetUpTestCase() {
230 2 : CHECK_NULL(tracking_page_allocator_);
231 2 : old_page_allocator_ = GetPlatformPageAllocator();
232 2 : tracking_page_allocator_ = new TrackingPageAllocator(old_page_allocator_);
233 2 : CHECK(tracking_page_allocator_->IsEmpty());
234 2 : CHECK_EQ(old_page_allocator_,
235 : SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
236 2 : old_flag_ = i::FLAG_concurrent_sweeping;
237 2 : i::FLAG_concurrent_sweeping = false;
238 : TestWithIsolate::SetUpTestCase();
239 2 : }
240 :
241 2 : static void TearDownTestCase() {
242 : TestWithIsolate::TearDownTestCase();
243 2 : i::FLAG_concurrent_sweeping = old_flag_;
244 2 : CHECK(tracking_page_allocator_->IsEmpty());
245 2 : delete tracking_page_allocator_;
246 2 : tracking_page_allocator_ = nullptr;
247 2 : }
248 :
249 : Heap* heap() { return isolate()->heap(); }
250 : MemoryAllocator* allocator() { return heap()->memory_allocator(); }
251 : MemoryAllocator::Unmapper* unmapper() { return allocator()->unmapper(); }
252 :
253 : TrackingPageAllocator* tracking_page_allocator() {
254 9 : return tracking_page_allocator_;
255 : }
256 :
257 : private:
258 : static TrackingPageAllocator* tracking_page_allocator_;
259 : static v8::PageAllocator* old_page_allocator_;
260 : static bool old_flag_;
261 :
262 : DISALLOW_COPY_AND_ASSIGN(SequentialUnmapperTest);
263 : };
264 :
265 : TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
266 : nullptr;
267 : v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
268 : bool SequentialUnmapperTest::old_flag_;
269 :
270 : // See v8:5945.
271 15419 : TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
272 2 : Page* page = allocator()->AllocatePage(
273 : MemoryChunkLayout::AllocatableMemoryInDataPage(),
274 : static_cast<PagedSpace*>(heap()->old_space()),
275 1 : Executability::NOT_EXECUTABLE);
276 1 : EXPECT_NE(nullptr, page);
277 1 : const size_t page_size = tracking_page_allocator()->AllocatePageSize();
278 1 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
279 1 : PageAllocator::kReadWrite);
280 2 : allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
281 1 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
282 1 : PageAllocator::kReadWrite);
283 1 : unmapper()->FreeQueuedChunks();
284 1 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
285 1 : PageAllocator::kNoAccess);
286 1 : unmapper()->TearDown();
287 1 : if (i_isolate()->isolate_allocation_mode() ==
288 : IsolateAllocationMode::kInV8Heap) {
289 : // In this mode Isolate uses bounded page allocator which allocates pages
290 : // inside prereserved region. Thus these pages are kept reserved until
291 : // the Isolate dies.
292 0 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
293 0 : PageAllocator::kNoAccess);
294 : } else {
295 1 : CHECK_EQ(IsolateAllocationMode::kInCppHeap,
296 : i_isolate()->isolate_allocation_mode());
297 2 : tracking_page_allocator()->CheckIsFree(page->address(), page_size);
298 : }
299 1 : }
300 :
301 : // See v8:5945.
302 15419 : TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
303 2 : Page* page = allocator()->AllocatePage(
304 : MemoryChunkLayout::AllocatableMemoryInDataPage(),
305 : static_cast<PagedSpace*>(heap()->old_space()),
306 1 : Executability::NOT_EXECUTABLE);
307 1 : EXPECT_NE(nullptr, page);
308 1 : const size_t page_size = tracking_page_allocator()->AllocatePageSize();
309 1 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
310 1 : PageAllocator::kReadWrite);
311 :
312 2 : allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
313 1 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
314 1 : PageAllocator::kReadWrite);
315 1 : unmapper()->TearDown();
316 1 : if (i_isolate()->isolate_allocation_mode() ==
317 : IsolateAllocationMode::kInV8Heap) {
318 : // In this mode Isolate uses bounded page allocator which allocates pages
319 : // inside prereserved region. Thus these pages are kept reserved until
320 : // the Isolate dies.
321 0 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
322 0 : PageAllocator::kNoAccess);
323 : } else {
324 1 : CHECK_EQ(IsolateAllocationMode::kInCppHeap,
325 : i_isolate()->isolate_allocation_mode());
326 2 : tracking_page_allocator()->CheckIsFree(page->address(), page_size);
327 : }
328 1 : }
329 :
330 : } // namespace internal
331 9249 : } // namespace v8
|