Line data Source code
1 : // Copyright 2018 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/isolate-allocator.h"
6 : #include "src/base/bounded-page-allocator.h"
7 : #include "src/isolate.h"
8 : #include "src/ptr-compr.h"
9 : #include "src/utils.h"
10 :
11 : namespace v8 {
12 : namespace internal {
13 :
14 61049 : IsolateAllocator::IsolateAllocator(IsolateAllocationMode mode) {
15 : #if V8_TARGET_ARCH_64_BIT
16 61049 : if (mode == IsolateAllocationMode::kInV8Heap) {
17 1 : Address heap_base = InitReservation();
18 1 : CommitPagesForIsolate(heap_base);
19 61050 : return;
20 : }
21 : #endif // V8_TARGET_ARCH_64_BIT
22 :
23 : // Allocate Isolate in C++ heap.
24 61048 : CHECK_EQ(mode, IsolateAllocationMode::kInCppHeap);
25 61048 : page_allocator_ = GetPlatformPageAllocator();
26 61048 : isolate_memory_ = ::operator new(sizeof(Isolate));
27 : DCHECK(!reservation_.IsReserved());
28 : }
29 :
30 122068 : IsolateAllocator::~IsolateAllocator() {
31 61034 : if (reservation_.IsReserved()) {
32 : // The actual memory will be freed when the |reservation_| will die.
33 : return;
34 : }
35 :
36 : // The memory was allocated in C++ heap.
37 61033 : ::operator delete(isolate_memory_);
38 61034 : }
39 :
40 : #if V8_TARGET_ARCH_64_BIT
41 1 : Address IsolateAllocator::InitReservation() {
42 1 : v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
43 :
44 : // Reserve a 4Gb region so that the middle is 4Gb aligned.
45 : // The VirtualMemory API does not support such an constraint so we have to
46 : // implement it manually here.
47 : size_t reservation_size = kPtrComprHeapReservationSize;
48 : size_t base_alignment = kPtrComprIsolateRootAlignment;
49 :
50 : const int kMaxAttempts = 3;
51 0 : for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
52 : Address hint = RoundDown(reinterpret_cast<Address>(
53 1 : platform_page_allocator->GetRandomMmapAddr()),
54 1 : base_alignment) +
55 1 : kPtrComprIsolateRootBias;
56 :
57 : // Within this reservation there will be a sub-region with proper alignment.
58 : VirtualMemory padded_reservation(platform_page_allocator,
59 : reservation_size * 2,
60 1 : reinterpret_cast<void*>(hint));
61 1 : if (!padded_reservation.IsReserved()) break;
62 :
63 : // Find such a sub-region inside the reservation that it's middle is
64 : // |base_alignment|-aligned.
65 : Address address =
66 : RoundUp(padded_reservation.address() + kPtrComprIsolateRootBias,
67 : base_alignment) -
68 1 : kPtrComprIsolateRootBias;
69 1 : CHECK(padded_reservation.InVM(address, reservation_size));
70 :
71 : // Now free the padded reservation and immediately try to reserve an exact
72 : // region at aligned address. We have to do this dancing because the
73 : // reservation address requirement is more complex than just a certain
74 : // alignment and not all operating systems support freeing parts of reserved
75 : // address space regions.
76 1 : padded_reservation.Free();
77 :
78 : VirtualMemory reservation(platform_page_allocator, reservation_size,
79 1 : reinterpret_cast<void*>(address));
80 1 : if (!reservation.IsReserved()) break;
81 :
82 : // The reservation could still be somewhere else but we can accept it
83 : // if the reservation has the required alignment.
84 : Address aligned_address =
85 : RoundUp(reservation.address() + kPtrComprIsolateRootBias,
86 : base_alignment) -
87 1 : kPtrComprIsolateRootBias;
88 :
89 1 : if (reservation.address() == aligned_address) {
90 1 : reservation_ = std::move(reservation);
91 : break;
92 : }
93 0 : }
94 1 : if (!reservation_.IsReserved()) {
95 : V8::FatalProcessOutOfMemory(nullptr,
96 0 : "Failed to reserve memory for new V8 Isolate");
97 : }
98 :
99 1 : CHECK_EQ(reservation_.size(), reservation_size);
100 :
101 1 : Address heap_base = reservation_.address() + kPtrComprIsolateRootBias;
102 1 : CHECK(IsAligned(heap_base, base_alignment));
103 :
104 1 : return heap_base;
105 : }
106 :
107 1 : void IsolateAllocator::CommitPagesForIsolate(Address heap_base) {
108 1 : v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
109 :
110 : // Simplify BoundedPageAllocator's life by configuring it to use same page
111 : // size as the Heap will use (MemoryChunk::kPageSize).
112 : size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
113 2 : platform_page_allocator->AllocatePageSize());
114 :
115 3 : page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
116 : platform_page_allocator, reservation_.address(), reservation_.size(),
117 : page_size);
118 1 : page_allocator_ = page_allocator_instance_.get();
119 :
120 1 : Address isolate_address = heap_base - Isolate::isolate_root_bias();
121 1 : Address isolate_end = isolate_address + sizeof(Isolate);
122 :
123 : // Inform the bounded page allocator about reserved pages.
124 : {
125 1 : Address reserved_region_address = RoundDown(isolate_address, page_size);
126 : size_t reserved_region_size =
127 1 : RoundUp(isolate_end, page_size) - reserved_region_address;
128 :
129 1 : CHECK(page_allocator_instance_->AllocatePagesAt(
130 : reserved_region_address, reserved_region_size,
131 : PageAllocator::Permission::kNoAccess));
132 : }
133 :
134 : // Commit pages where the Isolate will be stored.
135 : {
136 1 : size_t commit_page_size = platform_page_allocator->CommitPageSize();
137 : Address committed_region_address =
138 1 : RoundDown(isolate_address, commit_page_size);
139 : size_t committed_region_size =
140 1 : RoundUp(isolate_end, commit_page_size) - committed_region_address;
141 :
142 : // We are using |reservation_| directly here because |page_allocator_| has
143 : // bigger commit page size than we actually need.
144 1 : CHECK(reservation_.SetPermissions(committed_region_address,
145 : committed_region_size,
146 : PageAllocator::kReadWrite));
147 :
148 : if (Heap::ShouldZapGarbage()) {
149 : for (Address address = committed_region_address;
150 : address < committed_region_size; address += kSystemPointerSize) {
151 : Memory<Address>(address) = static_cast<Address>(kZapValue);
152 : }
153 : }
154 : }
155 1 : isolate_memory_ = reinterpret_cast<void*>(isolate_address);
156 1 : }
157 : #endif // V8_TARGET_ARCH_64_BIT
158 :
159 : } // namespace internal
160 178779 : } // namespace v8
|