Line data Source code
1 : // Copyright 2018 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/snapshot/embedded-data.h"
6 :
7 : #include "src/assembler-inl.h"
8 : #include "src/callable.h"
9 : #include "src/objects-inl.h"
10 : #include "src/snapshot/snapshot.h"
11 :
12 : namespace v8 {
13 : namespace internal {
14 :
15 : // static
16 62017181 : bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
17 : if (FLAG_embedded_builtins) {
18 62017181 : const Address start = reinterpret_cast<Address>(isolate->embedded_blob());
19 62017160 : return start <= pc && pc < start + isolate->embedded_blob_size();
20 : } else {
21 : return false;
22 : }
23 : }
24 :
25 : // static
26 1490775 : Code InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
27 1490775 : if (!PcIsOffHeap(isolate, address)) return Code();
28 :
29 : EmbeddedData d = EmbeddedData::FromBlob();
30 953085 : if (address < d.InstructionStartOfBuiltin(0)) return Code();
31 :
32 : // Note: Addresses within the padding section between builtins (i.e. within
33 : // start + size <= address < start + padded_size) are interpreted as belonging
34 : // to the preceding builtin.
35 :
36 : int l = 0, r = Builtins::builtin_count;
37 9055866 : while (l < r) {
38 9055866 : const int mid = (l + r) / 2;
39 : Address start = d.InstructionStartOfBuiltin(mid);
40 9055866 : Address end = start + d.PaddedInstructionSizeOfBuiltin(mid);
41 :
42 9055866 : if (address < start) {
43 : r = mid;
44 4683991 : } else if (address >= end) {
45 3730906 : l = mid + 1;
46 : } else {
47 953085 : return isolate->builtins()->builtin(mid);
48 : }
49 : }
50 :
51 0 : UNREACHABLE();
52 : }
53 :
54 : // static
55 56 : void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
56 : uint8_t** data,
57 : uint32_t* size) {
58 56 : EmbeddedData d = EmbeddedData::FromIsolate(isolate);
59 :
60 56 : v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
61 : const uint32_t page_size =
62 56 : static_cast<uint32_t>(page_allocator->AllocatePageSize());
63 : const uint32_t allocated_size = RoundUp(d.size(), page_size);
64 :
65 : uint8_t* allocated_bytes = static_cast<uint8_t*>(
66 56 : AllocatePages(page_allocator, isolate->heap()->GetRandomMmapAddr(),
67 56 : allocated_size, page_size, PageAllocator::kReadWrite));
68 56 : CHECK_NOT_NULL(allocated_bytes);
69 :
70 56 : std::memcpy(allocated_bytes, d.data(), d.size());
71 56 : CHECK(SetPermissions(page_allocator, allocated_bytes, allocated_size,
72 : PageAllocator::kReadExecute));
73 :
74 56 : *data = allocated_bytes;
75 56 : *size = d.size();
76 :
77 : d.Dispose();
78 56 : }
79 :
80 : // static
81 56 : void InstructionStream::FreeOffHeapInstructionStream(uint8_t* data,
82 : uint32_t size) {
83 56 : v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
84 : const uint32_t page_size =
85 56 : static_cast<uint32_t>(page_allocator->AllocatePageSize());
86 56 : CHECK(FreePages(page_allocator, data, RoundUp(size, page_size)));
87 56 : }
88 :
89 : namespace {
90 :
91 85232 : bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code code) {
92 : DCHECK(Builtins::IsIsolateIndependent(code->builtin_index()));
93 85232 : switch (Builtins::KindOf(code->builtin_index())) {
94 : case Builtins::CPP:
95 : case Builtins::TFC:
96 : case Builtins::TFH:
97 : case Builtins::TFJ:
98 : case Builtins::TFS:
99 : break;
100 :
101 : // Bytecode handlers will only ever be used by the interpreter and so there
102 : // will never be a need to use trampolines with them.
103 : case Builtins::BCH:
104 : case Builtins::API:
105 : case Builtins::ASM:
106 : // TODO(jgruber): Extend checks to remaining kinds.
107 : return false;
108 : }
109 :
110 : Callable callable = Builtins::CallableFor(
111 54600 : isolate, static_cast<Builtins::Name>(code->builtin_index()));
112 : CallInterfaceDescriptor descriptor = callable.descriptor();
113 :
114 109200 : if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
115 : return true;
116 : }
117 :
118 367192 : for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
119 : Register reg = descriptor.GetRegisterParameter(i);
120 156296 : if (reg == kOffHeapTrampolineRegister) return true;
121 : }
122 :
123 : return false;
124 : }
125 :
126 56 : void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
127 : static const int kRelocMask =
128 : RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
129 : RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
130 :
131 170520 : for (int i = 0; i < Builtins::builtin_count; i++) {
132 : if (!Builtins::IsIsolateIndependent(i)) continue;
133 :
134 85232 : Code code = isolate->builtins()->builtin(i);
135 85232 : RelocIterator on_heap_it(code, kRelocMask);
136 85232 : RelocIterator off_heap_it(blob, code, kRelocMask);
137 :
138 : #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
139 : defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
140 : defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390)
141 : // On these platforms we emit relative builtin-to-builtin
142 : // jumps for isolate independent builtins in the snapshot. This fixes up the
143 : // relative jumps to the right offsets in the snapshot.
144 : // See also: Code::IsIsolateIndependent.
145 880208 : while (!on_heap_it.done()) {
146 : DCHECK(!off_heap_it.done());
147 :
148 : RelocInfo* rinfo = on_heap_it.rinfo();
149 : DCHECK_EQ(rinfo->rmode(), off_heap_it.rinfo()->rmode());
150 397488 : Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
151 397488 : CHECK(Builtins::IsIsolateIndependentBuiltin(target));
152 :
153 : // Do not emit write-barrier for off-heap writes.
154 : off_heap_it.rinfo()->set_target_address(
155 : blob->InstructionStartOfBuiltin(target->builtin_index()),
156 397488 : SKIP_WRITE_BARRIER);
157 :
158 397488 : on_heap_it.next();
159 397488 : off_heap_it.next();
160 : }
161 : DCHECK(off_heap_it.done());
162 : #else
163 : // Architectures other than x64 and arm/arm64 do not use pc-relative calls
164 : // and thus must not contain embedded code targets. Instead, we use an
165 : // indirection through the root register.
166 : CHECK(on_heap_it.done());
167 : CHECK(off_heap_it.done());
168 : #endif // defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
169 : }
170 56 : }
171 :
172 : } // namespace
173 :
174 : // static
175 56 : EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
176 : Builtins* builtins = isolate->builtins();
177 :
178 : // Store instruction stream lengths and offsets.
179 56 : std::vector<struct Metadata> metadata(kTableSize);
180 :
181 : bool saw_unsafe_builtin = false;
182 : uint32_t raw_data_size = 0;
183 170520 : for (int i = 0; i < Builtins::builtin_count; i++) {
184 85232 : Code code = builtins->builtin(i);
185 :
186 : if (Builtins::IsIsolateIndependent(i)) {
187 : // Sanity-check that the given builtin is isolate-independent and does not
188 : // use the trampoline register in its calling convention.
189 85232 : if (!code->IsIsolateIndependent(isolate)) {
190 : saw_unsafe_builtin = true;
191 0 : fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
192 : }
193 86856 : if (Builtins::IsWasmRuntimeStub(i) &&
194 1624 : RelocInfo::RequiresRelocation(code)) {
195 : // Wasm additionally requires that its runtime stubs must be
196 : // individually PIC (i.e. we must be able to copy each stub outside the
197 : // embedded area without relocations). In particular, that means
198 : // pc-relative calls to other builtins are disallowed.
199 : saw_unsafe_builtin = true;
200 0 : fprintf(stderr, "%s is a wasm runtime stub but needs relocation.\n",
201 0 : Builtins::name(i));
202 : }
203 85232 : if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
204 : saw_unsafe_builtin = true;
205 0 : fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
206 0 : Builtins::name(i));
207 : }
208 :
209 85232 : uint32_t length = static_cast<uint32_t>(code->raw_instruction_size());
210 :
211 : DCHECK_EQ(0, raw_data_size % kCodeAlignment);
212 170464 : metadata[i].instructions_offset = raw_data_size;
213 85232 : metadata[i].instructions_length = length;
214 :
215 : // Align the start of each instruction stream.
216 85232 : raw_data_size += PadAndAlign(length);
217 : } else {
218 : metadata[i].instructions_offset = raw_data_size;
219 : }
220 : }
221 56 : CHECK_WITH_MSG(
222 : !saw_unsafe_builtin,
223 : "One or more builtins marked as isolate-independent either contains "
224 : "isolate-dependent code or aliases the off-heap trampoline register. "
225 : "If in doubt, ask jgruber@");
226 :
227 56 : const uint32_t blob_size = RawDataOffset() + raw_data_size;
228 56 : uint8_t* const blob = new uint8_t[blob_size];
229 : uint8_t* const raw_data_start = blob + RawDataOffset();
230 :
231 : // Initially zap the entire blob, effectively padding the alignment area
232 : // between two builtins with int3's (on x64/ia32).
233 : ZapCode(reinterpret_cast<Address>(blob), blob_size);
234 :
235 : // Hash relevant parts of the Isolate's heap and store the result.
236 : {
237 : STATIC_ASSERT(IsolateHashSize() == kSizetSize);
238 56 : const size_t hash = isolate->HashIsolateForEmbeddedBlob();
239 56 : std::memcpy(blob + IsolateHashOffset(), &hash, IsolateHashSize());
240 : }
241 :
242 : // Write the metadata tables.
243 : DCHECK_EQ(MetadataSize(), sizeof(metadata[0]) * metadata.size());
244 56 : std::memcpy(blob + MetadataOffset(), metadata.data(), MetadataSize());
245 :
246 : // Write the raw data section.
247 170520 : for (int i = 0; i < Builtins::builtin_count; i++) {
248 : if (!Builtins::IsIsolateIndependent(i)) continue;
249 85232 : Code code = builtins->builtin(i);
250 170464 : uint32_t offset = metadata[i].instructions_offset;
251 85232 : uint8_t* dst = raw_data_start + offset;
252 : DCHECK_LE(RawDataOffset() + offset + code->raw_instruction_size(),
253 : blob_size);
254 170464 : std::memcpy(dst, reinterpret_cast<uint8_t*>(code->raw_instruction_start()),
255 : code->raw_instruction_size());
256 : }
257 :
258 : EmbeddedData d(blob, blob_size);
259 :
260 : // Fix up call targets that point to other embedded builtins.
261 56 : FinalizeEmbeddedCodeTargets(isolate, &d);
262 :
263 : // Hash the blob and store the result.
264 : {
265 : STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
266 56 : const size_t hash = d.CreateEmbeddedBlobHash();
267 : std::memcpy(blob + EmbeddedBlobHashOffset(), &hash, EmbeddedBlobHashSize());
268 :
269 : DCHECK_EQ(hash, d.CreateEmbeddedBlobHash());
270 : DCHECK_EQ(hash, d.EmbeddedBlobHash());
271 : }
272 :
273 56 : if (FLAG_serialization_statistics) d.PrintStatistics();
274 :
275 112 : return d;
276 : }
277 :
278 291224369 : Address EmbeddedData::InstructionStartOfBuiltin(int i) const {
279 : DCHECK(Builtins::IsBuiltinId(i));
280 : const struct Metadata* metadata = Metadata();
281 301630808 : const uint8_t* result = RawData() + metadata[i].instructions_offset;
282 : DCHECK_LE(result, data_ + size_);
283 : DCHECK_IMPLIES(result == data_ + size_, InstructionSizeOfBuiltin(i) == 0);
284 301630808 : return reinterpret_cast<Address>(result);
285 : }
286 :
287 36424909 : uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
288 : DCHECK(Builtins::IsBuiltinId(i));
289 : const struct Metadata* metadata = Metadata();
290 45480775 : return metadata[i].instructions_length;
291 : }
292 :
293 56 : size_t EmbeddedData::CreateEmbeddedBlobHash() const {
294 : STATIC_ASSERT(EmbeddedBlobHashOffset() == 0);
295 : STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
296 112 : return base::hash_range(data_ + EmbeddedBlobHashSize(), data_ + size_);
297 : }
298 :
299 0 : void EmbeddedData::PrintStatistics() const {
300 : DCHECK(FLAG_serialization_statistics);
301 :
302 : constexpr int kCount = Builtins::builtin_count;
303 :
304 : int embedded_count = 0;
305 : int instruction_size = 0;
306 : int sizes[kCount];
307 0 : for (int i = 0; i < kCount; i++) {
308 : if (!Builtins::IsIsolateIndependent(i)) continue;
309 0 : const int size = InstructionSizeOfBuiltin(i);
310 0 : instruction_size += size;
311 0 : sizes[embedded_count] = size;
312 0 : embedded_count++;
313 : }
314 :
315 : // Sort for percentiles.
316 0 : std::sort(&sizes[0], &sizes[embedded_count]);
317 :
318 0 : const int k50th = embedded_count * 0.5;
319 0 : const int k75th = embedded_count * 0.75;
320 0 : const int k90th = embedded_count * 0.90;
321 0 : const int k99th = embedded_count * 0.99;
322 :
323 : const int metadata_size = static_cast<int>(
324 : EmbeddedBlobHashSize() + IsolateHashSize() + MetadataSize());
325 :
326 0 : PrintF("EmbeddedData:\n");
327 0 : PrintF(" Total size: %d\n",
328 0 : static_cast<int>(size()));
329 0 : PrintF(" Metadata size: %d\n", metadata_size);
330 0 : PrintF(" Instruction size: %d\n", instruction_size);
331 0 : PrintF(" Padding: %d\n",
332 0 : static_cast<int>(size() - metadata_size - instruction_size));
333 0 : PrintF(" Embedded builtin count: %d\n", embedded_count);
334 0 : PrintF(" Instruction size (50th percentile): %d\n", sizes[k50th]);
335 0 : PrintF(" Instruction size (75th percentile): %d\n", sizes[k75th]);
336 0 : PrintF(" Instruction size (90th percentile): %d\n", sizes[k90th]);
337 0 : PrintF(" Instruction size (99th percentile): %d\n", sizes[k99th]);
338 0 : PrintF("\n");
339 0 : }
340 :
341 : } // namespace internal
342 122036 : } // namespace v8
|