Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/allocation.h"
6 :
7 : #include <stdlib.h> // For free, malloc.
8 : #include "src/base/bits.h"
9 : #include "src/base/lazy-instance.h"
10 : #include "src/base/logging.h"
11 : #include "src/base/lsan-page-allocator.h"
12 : #include "src/base/page-allocator.h"
13 : #include "src/base/platform/platform.h"
14 : #include "src/memcopy.h"
15 : #include "src/v8.h"
16 : #include "src/vector.h"
17 :
18 : #if V8_LIBC_BIONIC
19 : #include <malloc.h> // NOLINT
20 : #endif
21 :
22 : namespace v8 {
23 : namespace internal {
24 :
25 : namespace {
26 :
27 : void* AlignedAllocInternal(size_t size, size_t alignment) {
28 : void* ptr;
29 : #if V8_OS_WIN
30 : ptr = _aligned_malloc(size, alignment);
31 : #elif V8_LIBC_BIONIC
32 : // posix_memalign is not exposed in some Android versions, so we fall back to
33 : // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
34 : ptr = memalign(alignment, size);
35 : #else
36 788 : if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
37 : #endif
38 788 : return ptr;
39 : }
40 :
41 : class PageAllocatorInitializer {
42 : public:
43 61279 : PageAllocatorInitializer() {
44 61279 : page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
45 61279 : if (page_allocator_ == nullptr) {
46 0 : static base::LeakyObject<base::PageAllocator> default_page_allocator;
47 0 : page_allocator_ = default_page_allocator.get();
48 : }
49 : #if defined(LEAK_SANITIZER)
50 : static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
51 : page_allocator_);
52 : page_allocator_ = lsan_allocator.get();
53 : #endif
54 61279 : }
55 :
56 83434446 : PageAllocator* page_allocator() const { return page_allocator_; }
57 :
58 : void SetPageAllocatorForTesting(PageAllocator* allocator) {
59 2 : page_allocator_ = allocator;
60 : }
61 :
62 : private:
63 : PageAllocator* page_allocator_;
64 : };
65 :
66 83490419 : DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
67 : GetPageTableInitializer);
68 :
69 : // We will attempt allocation this many times. After each failure, we call
70 : // OnCriticalMemoryPressure to try to free some memory.
71 : const int kAllocationTries = 2;
72 :
73 : } // namespace
74 :
75 46117451 : v8::PageAllocator* GetPlatformPageAllocator() {
76 : DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
77 129549167 : return GetPageTableInitializer()->page_allocator();
78 : }
79 :
80 2 : v8::PageAllocator* SetPlatformPageAllocatorForTesting(
81 : v8::PageAllocator* new_page_allocator) {
82 : v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
83 2 : GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
84 2 : return old_page_allocator;
85 : }
86 :
87 3394436 : void* Malloced::New(size_t size) {
88 3394436 : void* result = AllocWithRetry(size);
89 3394437 : if (result == nullptr) {
90 5 : V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
91 : }
92 3394432 : return result;
93 : }
94 :
95 3457098 : void Malloced::Delete(void* p) {
96 3457098 : free(p);
97 3457098 : }
98 :
99 162356036 : char* StrDup(const char* str) {
100 : int length = StrLength(str);
101 162356036 : char* result = NewArray<char>(length + 1);
102 162356036 : MemCopy(result, str, length);
103 162356036 : result[length] = '\0';
104 162356036 : return result;
105 : }
106 :
107 0 : char* StrNDup(const char* str, int n) {
108 : int length = StrLength(str);
109 0 : if (n < length) length = n;
110 0 : char* result = NewArray<char>(length + 1);
111 0 : MemCopy(result, str, length);
112 0 : result[length] = '\0';
113 0 : return result;
114 : }
115 :
116 32691336 : void* AllocWithRetry(size_t size) {
117 : void* result = nullptr;
118 32691356 : for (int i = 0; i < kAllocationTries; ++i) {
119 32691356 : result = malloc(size);
120 32691356 : if (result != nullptr) break;
121 20 : if (!OnCriticalMemoryPressure(size)) break;
122 : }
123 32691336 : return result;
124 : }
125 :
126 783 : void* AlignedAlloc(size_t size, size_t alignment) {
127 : DCHECK_LE(alignof(void*), alignment);
128 : DCHECK(base::bits::IsPowerOfTwo(alignment));
129 : void* result = nullptr;
130 793 : for (int i = 0; i < kAllocationTries; ++i) {
131 : result = AlignedAllocInternal(size, alignment);
132 788 : if (result != nullptr) break;
133 10 : if (!OnCriticalMemoryPressure(size + alignment)) break;
134 : }
135 783 : if (result == nullptr) {
136 5 : V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
137 : }
138 778 : return result;
139 : }
140 :
141 773 : void AlignedFree(void *ptr) {
142 : #if V8_OS_WIN
143 : _aligned_free(ptr);
144 : #elif V8_LIBC_BIONIC
145 : // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
146 : free(ptr);
147 : #else
148 773 : free(ptr);
149 : #endif
150 773 : }
151 :
152 1697 : size_t AllocatePageSize() {
153 1697 : return GetPlatformPageAllocator()->AllocatePageSize();
154 : }
155 :
156 72723987 : size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
157 :
158 59799 : void SetRandomMmapSeed(int64_t seed) {
159 59799 : GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
160 59799 : }
161 :
162 892134 : void* GetRandomMmapAddr() {
163 892135 : return GetPlatformPageAllocator()->GetRandomMmapAddr();
164 : }
165 :
166 2563749 : void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
167 : size_t size, size_t alignment,
168 : PageAllocator::Permission access) {
169 : DCHECK_NOT_NULL(page_allocator);
170 : DCHECK_EQ(address, AlignedAddress(address, alignment));
171 : DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
172 : void* result = nullptr;
173 2563769 : for (int i = 0; i < kAllocationTries; ++i) {
174 2563759 : result = page_allocator->AllocatePages(address, size, alignment, access);
175 2563761 : if (result != nullptr) break;
176 20 : size_t request_size = size + alignment - page_allocator->AllocatePageSize();
177 20 : if (!OnCriticalMemoryPressure(request_size)) break;
178 : }
179 2563751 : return result;
180 : }
181 :
182 466180 : bool FreePages(v8::PageAllocator* page_allocator, void* address,
183 : const size_t size) {
184 : DCHECK_NOT_NULL(page_allocator);
185 : DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
186 2563608 : return page_allocator->FreePages(address, size);
187 : }
188 :
189 0 : bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
190 : size_t new_size) {
191 : DCHECK_NOT_NULL(page_allocator);
192 : DCHECK_LT(new_size, size);
193 : DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
194 188372 : return page_allocator->ReleasePages(address, size, new_size);
195 : }
196 :
197 1676444 : bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
198 : size_t size, PageAllocator::Permission access) {
199 : DCHECK_NOT_NULL(page_allocator);
200 11100537 : return page_allocator->SetPermissions(address, size, access);
201 : }
202 :
203 0 : byte* AllocatePage(v8::PageAllocator* page_allocator, void* address,
204 : size_t* allocated) {
205 : DCHECK_NOT_NULL(page_allocator);
206 0 : size_t page_size = page_allocator->AllocatePageSize();
207 : void* result = AllocatePages(page_allocator, address, page_size, page_size,
208 0 : PageAllocator::kReadWrite);
209 0 : if (result != nullptr) *allocated = page_size;
210 0 : return static_cast<byte*>(result);
211 : }
212 :
213 50 : bool OnCriticalMemoryPressure(size_t length) {
214 : // TODO(bbudge) Rework retry logic once embedders implement the more
215 : // informative overload.
216 50 : if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
217 0 : V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
218 : }
219 50 : return true;
220 : }
221 :
222 2358132 : VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
223 : void* hint, size_t alignment)
224 2358132 : : page_allocator_(page_allocator) {
225 : DCHECK_NOT_NULL(page_allocator);
226 : DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
227 2358132 : size_t page_size = page_allocator_->AllocatePageSize();
228 2358132 : alignment = RoundUp(alignment, page_size);
229 : Address address = reinterpret_cast<Address>(
230 : AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment,
231 2358132 : PageAllocator::kNoAccess));
232 2358134 : if (address != kNullAddress) {
233 : DCHECK(IsAligned(address, alignment));
234 2358124 : region_ = base::AddressRegion(address, size);
235 : }
236 2358134 : }
237 :
238 10255994 : VirtualMemory::~VirtualMemory() {
239 10255994 : if (IsReserved()) {
240 64384 : Free();
241 : }
242 10255994 : }
243 :
244 791872 : void VirtualMemory::Reset() {
245 11297822 : page_allocator_ = nullptr;
246 11297822 : region_ = base::AddressRegion();
247 791872 : }
248 :
249 9424093 : bool VirtualMemory::SetPermissions(Address address, size_t size,
250 : PageAllocator::Permission access) {
251 9424093 : CHECK(InVM(address, size));
252 : bool result =
253 9424093 : v8::internal::SetPermissions(page_allocator_, address, size, access);
254 : DCHECK(result);
255 9424108 : return result;
256 : }
257 :
258 188372 : size_t VirtualMemory::Release(Address free_start) {
259 : DCHECK(IsReserved());
260 : DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
261 : // Notice: Order is important here. The VirtualMemory object might live
262 : // inside the allocated region.
263 :
264 188372 : const size_t old_size = region_.size();
265 188372 : const size_t free_size = old_size - (free_start - region_.begin());
266 188372 : CHECK(InVM(free_start, free_size));
267 188372 : region_.set_size(old_size - free_size);
268 376744 : CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
269 : old_size, region_.size()));
270 188372 : return free_size;
271 : }
272 :
273 2097432 : void VirtualMemory::Free() {
274 : DCHECK(IsReserved());
275 : // Notice: Order is important here. The VirtualMemory object might live
276 : // inside the allocated region.
277 2097432 : v8::PageAllocator* page_allocator = page_allocator_;
278 2097432 : base::AddressRegion region = region_;
279 : Reset();
280 : // FreePages expects size to be aligned to allocation granularity however
281 : // ReleasePages may leave size at only commit granularity. Align it here.
282 6292300 : CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
283 : RoundUp(region.size(), page_allocator->AllocatePageSize())));
284 2097440 : }
285 :
286 8408518 : void VirtualMemory::TakeControl(VirtualMemory* from) {
287 : DCHECK(!IsReserved());
288 8408518 : page_allocator_ = from->page_allocator_;
289 8408518 : region_ = from->region_;
290 : from->Reset();
291 8408518 : }
292 :
293 : } // namespace internal
294 183867 : } // namespace v8
|