Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/allocation.h"
6 :
7 : #include <stdlib.h> // For free, malloc.
8 : #include "src/base/bits.h"
9 : #include "src/base/lazy-instance.h"
10 : #include "src/base/logging.h"
11 : #include "src/base/lsan-page-allocator.h"
12 : #include "src/base/page-allocator.h"
13 : #include "src/base/platform/platform.h"
14 : #include "src/memcopy.h"
15 : #include "src/v8.h"
16 : #include "src/vector.h"
17 :
18 : #if V8_LIBC_BIONIC
19 : #include <malloc.h> // NOLINT
20 : #endif
21 :
22 : namespace v8 {
23 : namespace internal {
24 :
25 : namespace {
26 :
27 : void* AlignedAllocInternal(size_t size, size_t alignment) {
28 : void* ptr;
29 : #if V8_OS_WIN
30 : ptr = _aligned_malloc(size, alignment);
31 : #elif V8_LIBC_BIONIC
32 : // posix_memalign is not exposed in some Android versions, so we fall back to
33 : // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
34 : ptr = memalign(alignment, size);
35 : #else
36 822 : if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
37 : #endif
38 : return ptr;
39 : }
40 :
41 : class PageAllocatorInitializer {
42 : public:
43 61004 : PageAllocatorInitializer() {
44 61004 : page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
45 61004 : if (page_allocator_ == nullptr) {
46 0 : static base::LeakyObject<base::PageAllocator> default_page_allocator;
47 0 : page_allocator_ = default_page_allocator.get();
48 : }
49 : #if defined(LEAK_SANITIZER)
50 : static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
51 : page_allocator_);
52 : page_allocator_ = lsan_allocator.get();
53 : #endif
54 61004 : }
55 :
56 35248990 : PageAllocator* page_allocator() const { return page_allocator_; }
57 :
58 : void SetPageAllocatorForTesting(PageAllocator* allocator) {
59 2 : page_allocator_ = allocator;
60 : }
61 :
62 : private:
63 : PageAllocator* page_allocator_;
64 : };
65 :
66 35304620 : DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
67 : GetPageTableInitializer)
68 :
69 : // We will attempt allocation this many times. After each failure, we call
70 : // OnCriticalMemoryPressure to try to free some memory.
71 : const int kAllocationTries = 2;
72 :
73 : } // namespace
74 :
75 7610639 : v8::PageAllocator* GetPlatformPageAllocator() {
76 : DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
77 42858321 : return GetPageTableInitializer()->page_allocator();
78 : }
79 :
80 2 : v8::PageAllocator* SetPlatformPageAllocatorForTesting(
81 : v8::PageAllocator* new_page_allocator) {
82 : v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
83 2 : GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
84 2 : return old_page_allocator;
85 : }
86 :
87 3304332 : void* Malloced::New(size_t size) {
88 3304332 : void* result = AllocWithRetry(size);
89 3304333 : if (result == nullptr) {
90 5 : V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
91 : }
92 3304328 : return result;
93 : }
94 :
95 3366597 : void Malloced::Delete(void* p) {
96 3366597 : free(p);
97 3366597 : }
98 :
99 160808869 : char* StrDup(const char* str) {
100 : int length = StrLength(str);
101 160808869 : char* result = NewArray<char>(length + 1);
102 160808869 : MemCopy(result, str, length);
103 160808869 : result[length] = '\0';
104 160808869 : return result;
105 : }
106 :
107 0 : char* StrNDup(const char* str, int n) {
108 : int length = StrLength(str);
109 0 : if (n < length) length = n;
110 0 : char* result = NewArray<char>(length + 1);
111 0 : MemCopy(result, str, length);
112 0 : result[length] = '\0';
113 0 : return result;
114 : }
115 :
116 62758419 : void* AllocWithRetry(size_t size) {
117 : void* result = nullptr;
118 62758459 : for (int i = 0; i < kAllocationTries; ++i) {
119 62760464 : result = malloc(size);
120 62760464 : if (result != nullptr) break;
121 20 : if (!OnCriticalMemoryPressure(size)) break;
122 : }
123 62758419 : return result;
124 : }
125 :
126 817 : void* AlignedAlloc(size_t size, size_t alignment) {
127 : DCHECK_LE(alignof(void*), alignment);
128 : DCHECK(base::bits::IsPowerOfTwo(alignment));
129 : void* result = nullptr;
130 837 : for (int i = 0; i < kAllocationTries; ++i) {
131 : result = AlignedAllocInternal(size, alignment);
132 822 : if (result != nullptr) break;
133 10 : if (!OnCriticalMemoryPressure(size + alignment)) break;
134 : }
135 817 : if (result == nullptr) {
136 5 : V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
137 : }
138 812 : return result;
139 : }
140 :
141 807 : void AlignedFree(void *ptr) {
142 : #if V8_OS_WIN
143 : _aligned_free(ptr);
144 : #elif V8_LIBC_BIONIC
145 : // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
146 : free(ptr);
147 : #else
148 807 : free(ptr);
149 : #endif
150 807 : }
151 :
152 363 : size_t AllocatePageSize() {
153 363 : return GetPlatformPageAllocator()->AllocatePageSize();
154 : }
155 :
156 52939966 : size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
157 :
158 59524 : void SetRandomMmapSeed(int64_t seed) {
159 59524 : GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
160 59524 : }
161 :
162 1107807 : void* GetRandomMmapAddr() {
163 1107807 : return GetPlatformPageAllocator()->GetRandomMmapAddr();
164 : }
165 :
166 2464807 : void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
167 : size_t size, size_t alignment,
168 : PageAllocator::Permission access) {
169 : DCHECK_NOT_NULL(page_allocator);
170 : DCHECK_EQ(address, AlignedAddress(address, alignment));
171 : DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
172 : void* result = nullptr;
173 2464847 : for (int i = 0; i < kAllocationTries; ++i) {
174 2464817 : result = page_allocator->AllocatePages(address, size, alignment, access);
175 2464818 : if (result != nullptr) break;
176 20 : size_t request_size = size + alignment - page_allocator->AllocatePageSize();
177 20 : if (!OnCriticalMemoryPressure(request_size)) break;
178 : }
179 2464808 : return result;
180 : }
181 :
182 616505 : bool FreePages(v8::PageAllocator* page_allocator, void* address,
183 : const size_t size) {
184 : DCHECK_NOT_NULL(page_allocator);
185 : DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
186 2464642 : return page_allocator->FreePages(address, size);
187 : }
188 :
189 0 : bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
190 : size_t new_size) {
191 : DCHECK_NOT_NULL(page_allocator);
192 : DCHECK_LT(new_size, size);
193 : DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
194 187081 : return page_allocator->ReleasePages(address, size, new_size);
195 : }
196 :
197 1421860 : bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
198 : size_t size, PageAllocator::Permission access) {
199 : DCHECK_NOT_NULL(page_allocator);
200 9333570 : return page_allocator->SetPermissions(address, size, access);
201 : }
202 :
203 0 : byte* AllocatePage(v8::PageAllocator* page_allocator, void* address,
204 : size_t* allocated) {
205 : DCHECK_NOT_NULL(page_allocator);
206 0 : size_t page_size = page_allocator->AllocatePageSize();
207 : void* result = AllocatePages(page_allocator, address, page_size, page_size,
208 0 : PageAllocator::kReadWrite);
209 0 : if (result != nullptr) *allocated = page_size;
210 0 : return static_cast<byte*>(result);
211 : }
212 :
213 50 : bool OnCriticalMemoryPressure(size_t length) {
214 : // TODO(bbudge) Rework retry logic once embedders implement the more
215 : // informative overload.
216 50 : if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
217 0 : V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
218 : }
219 50 : return true;
220 : }
221 :
222 2287356 : VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
223 : void* hint, size_t alignment)
224 2287356 : : page_allocator_(page_allocator) {
225 : DCHECK_NOT_NULL(page_allocator);
226 : DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
227 2287356 : size_t page_size = page_allocator_->AllocatePageSize();
228 2287356 : alignment = RoundUp(alignment, page_size);
229 : Address address = reinterpret_cast<Address>(
230 2287356 : AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment,
231 2287357 : PageAllocator::kNoAccess));
232 2287357 : if (address != kNullAddress) {
233 : DCHECK(IsAligned(address, alignment));
234 2287347 : region_ = base::AddressRegion(address, size);
235 : }
236 2287357 : }
237 :
238 19004812 : VirtualMemory::~VirtualMemory() {
239 9502406 : if (IsReserved()) {
240 63943 : Free();
241 : }
242 9502406 : }
243 :
244 1027622 : void VirtualMemory::Reset() {
245 10821086 : page_allocator_ = nullptr;
246 10821086 : region_ = base::AddressRegion();
247 1027622 : }
248 :
249 7911710 : bool VirtualMemory::SetPermissions(Address address, size_t size,
250 : PageAllocator::Permission access) {
251 7911710 : CHECK(InVM(address, size));
252 : bool result =
253 7911710 : v8::internal::SetPermissions(page_allocator_, address, size, access);
254 : DCHECK(result);
255 7911714 : return result;
256 : }
257 :
258 187081 : size_t VirtualMemory::Release(Address free_start) {
259 : DCHECK(IsReserved());
260 : DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
261 : // Notice: Order is important here. The VirtualMemory object might live
262 : // inside the allocated region.
263 :
264 : const size_t old_size = region_.size();
265 187081 : const size_t free_size = old_size - (free_start - region_.begin());
266 187081 : CHECK(InVM(free_start, free_size));
267 187081 : region_.set_size(old_size - free_size);
268 374163 : CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
269 : old_size, region_.size()));
270 187082 : return free_size;
271 : }
272 :
273 1848141 : void VirtualMemory::Free() {
274 : DCHECK(IsReserved());
275 : // Notice: Order is important here. The VirtualMemory object might live
276 : // inside the allocated region.
277 1848141 : v8::PageAllocator* page_allocator = page_allocator_;
278 1848141 : base::AddressRegion region = region_;
279 : Reset();
280 : // FreePages expects size to be aligned to allocation granularity however
281 : // ReleasePages may leave size at only commit granularity. Align it here.
282 5544421 : CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
283 : RoundUp(region.size(), page_allocator->AllocatePageSize())));
284 1848143 : }
285 :
286 7945323 : void VirtualMemory::TakeControl(VirtualMemory* from) {
287 : DCHECK(!IsReserved());
288 7945323 : page_allocator_ = from->page_allocator_;
289 7945323 : region_ = from->region_;
290 : from->Reset();
291 7945323 : }
292 :
293 : } // namespace internal
294 122036 : } // namespace v8
|