Line data Source code
1 : // Copyright 2014 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include <map>
6 :
7 : #include "src/base/region-allocator.h"
8 : #include "src/heap/heap-inl.h"
9 : #include "src/heap/spaces-inl.h"
10 : #include "src/isolate.h"
11 : #include "test/unittests/test-utils.h"
12 : #include "testing/gtest/include/gtest/gtest.h"
13 :
14 : namespace v8 {
15 : namespace internal {
16 :
17 : // This is a v8::PageAllocator implementation that decorates provided page
18 : // allocator object with page tracking functionality.
19 : class TrackingPageAllocator : public ::v8::PageAllocator {
20 : public:
21 2 : explicit TrackingPageAllocator(v8::PageAllocator* page_allocator)
22 : : page_allocator_(page_allocator),
23 2 : allocate_page_size_(page_allocator_->AllocatePageSize()),
24 2 : commit_page_size_(page_allocator_->CommitPageSize()),
25 : region_allocator_(kNullAddress, size_t{0} - commit_page_size_,
26 6 : commit_page_size_) {
27 2 : CHECK_NOT_NULL(page_allocator);
28 4 : CHECK(IsAligned(allocate_page_size_, commit_page_size_));
29 2 : }
30 8 : ~TrackingPageAllocator() override = default;
31 :
32 34 : size_t AllocatePageSize() override { return allocate_page_size_; }
33 :
34 80 : size_t CommitPageSize() override { return commit_page_size_; }
35 :
36 0 : void SetRandomMmapSeed(int64_t seed) override {
37 0 : return page_allocator_->SetRandomMmapSeed(seed);
38 : }
39 :
40 20 : void* GetRandomMmapAddr() override {
41 20 : return page_allocator_->GetRandomMmapAddr();
42 : }
43 :
44 16 : void* AllocatePages(void* address, size_t size, size_t alignment,
45 : PageAllocator::Permission access) override {
46 : void* result =
47 16 : page_allocator_->AllocatePages(address, size, alignment, access);
48 16 : if (result) {
49 : // Mark pages as used.
50 16 : Address current_page = reinterpret_cast<Address>(result);
51 32 : CHECK(IsAligned(current_page, allocate_page_size_));
52 16 : CHECK(IsAligned(size, allocate_page_size_));
53 16 : CHECK(region_allocator_.AllocateRegionAt(current_page, size));
54 16 : Address end = current_page + size;
55 67120 : while (current_page < end) {
56 67088 : page_permissions_.insert({current_page, access});
57 67088 : current_page += commit_page_size_;
58 : }
59 : }
60 16 : return result;
61 : }
62 :
63 16 : bool FreePages(void* address, size_t size) override {
64 16 : bool result = page_allocator_->FreePages(address, size);
65 16 : if (result) {
66 : // Mark pages as free.
67 16 : Address start = reinterpret_cast<Address>(address);
68 32 : CHECK(IsAligned(start, allocate_page_size_));
69 16 : CHECK(IsAligned(size, allocate_page_size_));
70 16 : size_t freed_size = region_allocator_.FreeRegion(start);
71 32 : CHECK(IsAligned(freed_size, commit_page_size_));
72 32 : CHECK_EQ(RoundUp(freed_size, allocate_page_size_), size);
73 16 : auto start_iter = page_permissions_.find(start);
74 32 : CHECK_NE(start_iter, page_permissions_.end());
75 16 : auto end_iter = page_permissions_.lower_bound(start + size);
76 : page_permissions_.erase(start_iter, end_iter);
77 : }
78 16 : return result;
79 : }
80 :
81 4 : bool ReleasePages(void* address, size_t size, size_t new_size) override {
82 4 : bool result = page_allocator_->ReleasePages(address, size, new_size);
83 4 : if (result) {
84 4 : Address start = reinterpret_cast<Address>(address);
85 8 : CHECK(IsAligned(start, allocate_page_size_));
86 8 : CHECK(IsAligned(size, commit_page_size_));
87 4 : CHECK(IsAligned(new_size, commit_page_size_));
88 4 : CHECK_LT(new_size, size);
89 4 : CHECK_EQ(region_allocator_.TrimRegion(start, new_size), size - new_size);
90 8 : auto start_iter = page_permissions_.find(start + new_size);
91 8 : CHECK_NE(start_iter, page_permissions_.end());
92 4 : auto end_iter = page_permissions_.lower_bound(start + size);
93 : page_permissions_.erase(start_iter, end_iter);
94 : }
95 4 : return result;
96 : }
97 :
98 40 : bool SetPermissions(void* address, size_t size,
99 : PageAllocator::Permission access) override {
100 40 : bool result = page_allocator_->SetPermissions(address, size, access);
101 40 : if (result) {
102 40 : UpdatePagePermissions(reinterpret_cast<Address>(address), size, access);
103 : }
104 40 : return result;
105 : }
106 :
107 : // Returns true if all the allocated pages were freed.
108 : bool IsEmpty() { return page_permissions_.empty(); }
109 :
110 2 : void CheckIsFree(Address address, size_t size) {
111 4 : CHECK(IsAligned(address, allocate_page_size_));
112 2 : CHECK(IsAligned(size, allocate_page_size_));
113 4 : EXPECT_TRUE(region_allocator_.IsFree(address, size));
114 2 : }
115 :
116 5 : void CheckPagePermissions(Address address, size_t size,
117 : PageAllocator::Permission access) {
118 5 : ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
119 10 : EXPECT_EQ(access, value->second);
120 15 : });
121 5 : }
122 :
123 : void Print(const char* comment) const {
124 : i::StdoutStream os;
125 : os << "\n========================================="
126 : << "\nTracingPageAllocator state: ";
127 : if (comment) os << comment;
128 : os << "\n-----------------------------------------\n";
129 : region_allocator_.Print(os);
130 : os << "-----------------------------------------"
131 : << "\nPage permissions:";
132 : if (page_permissions_.empty()) {
133 : os << " empty\n";
134 : return;
135 : }
136 : os << "\n" << std::hex << std::showbase;
137 :
138 : Address contiguous_region_start = static_cast<Address>(-1);
139 : Address contiguous_region_end = contiguous_region_start;
140 : PageAllocator::Permission contiguous_region_access =
141 : PageAllocator::kNoAccess;
142 : for (auto& pair : page_permissions_) {
143 : if (contiguous_region_end == pair.first &&
144 : pair.second == contiguous_region_access) {
145 : contiguous_region_end += commit_page_size_;
146 : continue;
147 : }
148 : if (contiguous_region_start != contiguous_region_end) {
149 : PrintRegion(os, contiguous_region_start, contiguous_region_end,
150 : contiguous_region_access);
151 : }
152 : contiguous_region_start = pair.first;
153 : contiguous_region_end = pair.first + commit_page_size_;
154 : contiguous_region_access = pair.second;
155 : }
156 : if (contiguous_region_start != contiguous_region_end) {
157 : PrintRegion(os, contiguous_region_start, contiguous_region_end,
158 : contiguous_region_access);
159 : }
160 : }
161 :
162 : private:
163 : typedef std::map<Address, PageAllocator::Permission> PagePermissionsMap;
164 : typedef std::function<void(PagePermissionsMap::value_type*)> ForEachFn;
165 :
166 : static void PrintRegion(std::ostream& os, Address start, Address end,
167 : PageAllocator::Permission access) {
168 : os << " page: [" << start << ", " << end << "), access: ";
169 : switch (access) {
170 : case PageAllocator::kNoAccess:
171 : os << "--";
172 : break;
173 : case PageAllocator::kRead:
174 : os << "R";
175 : break;
176 : case PageAllocator::kReadWrite:
177 : os << "RW";
178 : break;
179 : case PageAllocator::kReadWriteExecute:
180 : os << "RWX";
181 : break;
182 : case PageAllocator::kReadExecute:
183 : os << "RX";
184 : break;
185 : }
186 : os << "\n";
187 : }
188 :
189 45 : void ForEachPage(Address address, size_t size, const ForEachFn& fn) {
190 90 : CHECK(IsAligned(address, commit_page_size_));
191 45 : CHECK(IsAligned(size, commit_page_size_));
192 45 : auto start_iter = page_permissions_.find(address);
193 : // Start page must exist in page_permissions_.
194 90 : CHECK_NE(start_iter, page_permissions_.end());
195 90 : auto end_iter = page_permissions_.find(address + size - commit_page_size_);
196 : // Ensure the last but one page exists in page_permissions_.
197 90 : CHECK_NE(end_iter, page_permissions_.end());
198 : // Now make it point to the next element in order to also process is by the
199 : // following for loop.
200 : ++end_iter;
201 3689 : for (auto iter = start_iter; iter != end_iter; ++iter) {
202 3599 : PagePermissionsMap::value_type& pair = *iter;
203 3599 : fn(&pair);
204 : }
205 45 : }
206 :
207 40 : void UpdatePagePermissions(Address address, size_t size,
208 : PageAllocator::Permission access) {
209 : ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
210 3594 : value->second = access;
211 80 : });
212 40 : }
213 :
214 : v8::PageAllocator* const page_allocator_;
215 : const size_t allocate_page_size_;
216 : const size_t commit_page_size_;
217 : // Region allocator tracks page allocation/deallocation requests.
218 : base::RegionAllocator region_allocator_;
219 : // This map keeps track of allocated pages' permissions.
220 : PagePermissionsMap page_permissions_;
221 : };
222 :
223 : class SequentialUnmapperTest : public TestWithIsolate {
224 : public:
225 2 : SequentialUnmapperTest() = default;
226 2 : ~SequentialUnmapperTest() override = default;
227 :
228 2 : static void SetUpTestCase() {
229 2 : CHECK_NULL(tracking_page_allocator_);
230 2 : old_page_allocator_ = GetPlatformPageAllocator();
231 2 : tracking_page_allocator_ = new TrackingPageAllocator(old_page_allocator_);
232 2 : CHECK(tracking_page_allocator_->IsEmpty());
233 2 : CHECK_EQ(old_page_allocator_,
234 : SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
235 2 : old_flag_ = i::FLAG_concurrent_sweeping;
236 2 : i::FLAG_concurrent_sweeping = false;
237 : TestWithIsolate::SetUpTestCase();
238 2 : }
239 :
240 2 : static void TearDownTestCase() {
241 : TestWithIsolate::TearDownTestCase();
242 2 : i::FLAG_concurrent_sweeping = old_flag_;
243 4 : CHECK(tracking_page_allocator_->IsEmpty());
244 2 : delete tracking_page_allocator_;
245 2 : tracking_page_allocator_ = nullptr;
246 2 : }
247 :
248 : Heap* heap() { return isolate()->heap(); }
249 7 : MemoryAllocator* allocator() { return heap()->memory_allocator(); }
250 : MemoryAllocator::Unmapper* unmapper() { return allocator()->unmapper(); }
251 :
252 : TrackingPageAllocator* tracking_page_allocator() {
253 9 : return tracking_page_allocator_;
254 : }
255 :
256 : private:
257 : static TrackingPageAllocator* tracking_page_allocator_;
258 : static v8::PageAllocator* old_page_allocator_;
259 : static bool old_flag_;
260 :
261 : DISALLOW_COPY_AND_ASSIGN(SequentialUnmapperTest);
262 : };
263 :
264 : TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
265 : nullptr;
266 : v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
267 : bool SequentialUnmapperTest::old_flag_;
268 :
269 : // See v8:5945.
270 15129 : TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
271 : Page* page = allocator()->AllocatePage(
272 : MemoryChunkLayout::AllocatableMemoryInDataPage(),
273 1 : static_cast<PagedSpace*>(heap()->old_space()),
274 2 : Executability::NOT_EXECUTABLE);
275 1 : EXPECT_NE(nullptr, page);
276 1 : const size_t page_size = tracking_page_allocator()->AllocatePageSize();
277 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
278 2 : PageAllocator::kReadWrite);
279 2 : allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
280 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
281 2 : PageAllocator::kReadWrite);
282 1 : unmapper()->FreeQueuedChunks();
283 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
284 2 : PageAllocator::kNoAccess);
285 1 : unmapper()->TearDown();
286 1 : if (i_isolate()->isolate_allocation_mode() ==
287 : IsolateAllocationMode::kInV8Heap) {
288 : // In this mode Isolate uses bounded page allocator which allocates pages
289 : // inside prereserved region. Thus these pages are kept reserved until
290 : // the Isolate dies.
291 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
292 0 : PageAllocator::kNoAccess);
293 : } else {
294 1 : CHECK_EQ(IsolateAllocationMode::kInCppHeap,
295 : i_isolate()->isolate_allocation_mode());
296 2 : tracking_page_allocator()->CheckIsFree(page->address(), page_size);
297 : }
298 1 : }
299 :
300 : // See v8:5945.
301 15129 : TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
302 : Page* page = allocator()->AllocatePage(
303 : MemoryChunkLayout::AllocatableMemoryInDataPage(),
304 1 : static_cast<PagedSpace*>(heap()->old_space()),
305 2 : Executability::NOT_EXECUTABLE);
306 1 : EXPECT_NE(nullptr, page);
307 1 : const size_t page_size = tracking_page_allocator()->AllocatePageSize();
308 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
309 2 : PageAllocator::kReadWrite);
310 :
311 2 : allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
312 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
313 2 : PageAllocator::kReadWrite);
314 1 : unmapper()->TearDown();
315 1 : if (i_isolate()->isolate_allocation_mode() ==
316 : IsolateAllocationMode::kInV8Heap) {
317 : // In this mode Isolate uses bounded page allocator which allocates pages
318 : // inside prereserved region. Thus these pages are kept reserved until
319 : // the Isolate dies.
320 : tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
321 0 : PageAllocator::kNoAccess);
322 : } else {
323 1 : CHECK_EQ(IsolateAllocationMode::kInCppHeap,
324 : i_isolate()->isolate_allocation_mode());
325 2 : tracking_page_allocator()->CheckIsFree(page->address(), page_size);
326 : }
327 1 : }
328 :
329 : } // namespace internal
330 9075 : } // namespace v8
|