/src/node/deps/v8/include/v8-platform.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2013 the V8 project authors. All rights reserved. |
2 | | // Use of this source code is governed by a BSD-style license that can be |
3 | | // found in the LICENSE file. |
4 | | |
5 | | #ifndef V8_V8_PLATFORM_H_ |
6 | | #define V8_V8_PLATFORM_H_ |
7 | | |
8 | | #include <math.h> |
9 | | #include <stddef.h> |
10 | | #include <stdint.h> |
11 | | #include <stdlib.h> // For abort. |
12 | | |
13 | | #include <memory> |
14 | | #include <string> |
15 | | |
16 | | #include "v8-source-location.h" // NOLINT(build/include_directory) |
17 | | #include "v8config.h" // NOLINT(build/include_directory) |
18 | | |
19 | | namespace v8 { |
20 | | |
21 | | class Isolate; |
22 | | |
23 | | // Valid priorities supported by the task scheduling infrastructure. |
24 | | enum class TaskPriority : uint8_t { |
25 | | /** |
26 | | * Best effort tasks are not critical for performance of the application. The |
27 | | * platform implementation should preempt such tasks if higher priority tasks |
28 | | * arrive. |
29 | | */ |
30 | | kBestEffort, |
31 | | /** |
32 | | * User visible tasks are long running background tasks that will |
33 | | * improve performance and memory usage of the application upon completion. |
34 | | * Example: background compilation and garbage collection. |
35 | | */ |
36 | | kUserVisible, |
37 | | /** |
38 | | * User blocking tasks are highest priority tasks that block the execution |
39 | | * thread (e.g. major garbage collection). They must be finished as soon as |
40 | | * possible. |
41 | | */ |
42 | | kUserBlocking, |
43 | | kMaxPriority = kUserBlocking |
44 | | }; |
45 | | |
46 | | /** |
47 | | * A Task represents a unit of work. |
48 | | */ |
49 | | class Task { |
50 | | public: |
51 | 0 | virtual ~Task() = default; |
52 | | |
53 | | virtual void Run() = 0; |
54 | | }; |
55 | | |
56 | | /** |
57 | | * An IdleTask represents a unit of work to be performed in idle time. |
58 | | * The Run method is invoked with an argument that specifies the deadline in |
59 | | * seconds returned by MonotonicallyIncreasingTime(). |
60 | | * The idle task is expected to complete by this deadline. |
61 | | */ |
62 | | class IdleTask { |
63 | | public: |
64 | | virtual ~IdleTask() = default; |
65 | | virtual void Run(double deadline_in_seconds) = 0; |
66 | | }; |
67 | | |
68 | | /** |
69 | | * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to |
70 | | * post tasks after the isolate gets destructed, but these tasks may not get |
71 | | * executed anymore. All tasks posted to a given TaskRunner will be invoked in |
72 | | * sequence. Tasks can be posted from any thread. |
73 | | */ |
74 | | class TaskRunner { |
75 | | public: |
76 | | /** |
77 | | * Schedules a task to be invoked by this TaskRunner. The TaskRunner |
78 | | * implementation takes ownership of |task|. |
79 | | */ |
80 | | virtual void PostTask(std::unique_ptr<Task> task) = 0; |
81 | | |
82 | | /** |
83 | | * Schedules a task to be invoked by this TaskRunner. The TaskRunner |
84 | | * implementation takes ownership of |task|. The |task| cannot be nested |
85 | | * within other task executions. |
86 | | * |
87 | | * Tasks which shouldn't be interleaved with JS execution must be posted with |
88 | | * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the |
89 | | * embedder may process tasks in a callback which is called during JS |
90 | | * execution. |
91 | | * |
92 | | * In particular, tasks which execute JS must be non-nestable, since JS |
93 | | * execution is not allowed to nest. |
94 | | * |
95 | | * Requires that |TaskRunner::NonNestableTasksEnabled()| is true. |
96 | | */ |
97 | 0 | virtual void PostNonNestableTask(std::unique_ptr<Task> task) {} |
98 | | |
99 | | /** |
100 | | * Schedules a task to be invoked by this TaskRunner. The task is scheduled |
101 | | * after the given number of seconds |delay_in_seconds|. The TaskRunner |
102 | | * implementation takes ownership of |task|. |
103 | | */ |
104 | | virtual void PostDelayedTask(std::unique_ptr<Task> task, |
105 | | double delay_in_seconds) = 0; |
106 | | |
107 | | /** |
108 | | * Schedules a task to be invoked by this TaskRunner. The task is scheduled |
109 | | * after the given number of seconds |delay_in_seconds|. The TaskRunner |
110 | | * implementation takes ownership of |task|. The |task| cannot be nested |
111 | | * within other task executions. |
112 | | * |
113 | | * Tasks which shouldn't be interleaved with JS execution must be posted with |
114 | | * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the |
115 | | * embedder may process tasks in a callback which is called during JS |
116 | | * execution. |
117 | | * |
118 | | * In particular, tasks which execute JS must be non-nestable, since JS |
119 | | * execution is not allowed to nest. |
120 | | * |
121 | | * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true. |
122 | | */ |
123 | | virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task, |
124 | 0 | double delay_in_seconds) {} |
125 | | |
126 | | /** |
127 | | * Schedules an idle task to be invoked by this TaskRunner. The task is |
128 | | * scheduled when the embedder is idle. Requires that |
129 | | * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered |
130 | | * relative to other task types and may be starved for an arbitrarily long |
131 | | * time if no idle time is available. The TaskRunner implementation takes |
132 | | * ownership of |task|. |
133 | | */ |
134 | | virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0; |
135 | | |
136 | | /** |
137 | | * Returns true if idle tasks are enabled for this TaskRunner. |
138 | | */ |
139 | | virtual bool IdleTasksEnabled() = 0; |
140 | | |
141 | | /** |
142 | | * Returns true if non-nestable tasks are enabled for this TaskRunner. |
143 | | */ |
144 | 0 | virtual bool NonNestableTasksEnabled() const { return false; } |
145 | | |
146 | | /** |
147 | | * Returns true if non-nestable delayed tasks are enabled for this TaskRunner. |
148 | | */ |
149 | 0 | virtual bool NonNestableDelayedTasksEnabled() const { return false; } |
150 | | |
151 | 134k | TaskRunner() = default; |
152 | 134k | virtual ~TaskRunner() = default; |
153 | | |
154 | | TaskRunner(const TaskRunner&) = delete; |
155 | | TaskRunner& operator=(const TaskRunner&) = delete; |
156 | | }; |
157 | | |
158 | | /** |
159 | | * Delegate that's passed to Job's worker task, providing an entry point to |
160 | | * communicate with the scheduler. |
161 | | */ |
162 | | class JobDelegate { |
163 | | public: |
164 | | /** |
165 | | * Returns true if this thread *must* return from the worker task on the |
166 | | * current thread ASAP. Workers should periodically invoke ShouldYield (or |
167 | | * YieldIfNeeded()) as often as is reasonable. |
168 | | * After this method returned true, ShouldYield must not be called again. |
169 | | */ |
170 | | virtual bool ShouldYield() = 0; |
171 | | |
172 | | /** |
173 | | * Notifies the scheduler that max concurrency was increased, and the number |
174 | | * of worker should be adjusted accordingly. See Platform::PostJob() for more |
175 | | * details. |
176 | | */ |
177 | | virtual void NotifyConcurrencyIncrease() = 0; |
178 | | |
179 | | /** |
180 | | * Returns a task_id unique among threads currently running this job, such |
181 | | * that GetTaskId() < worker count. To achieve this, the same task_id may be |
182 | | * reused by a different thread after a worker_task returns. |
183 | | */ |
184 | | virtual uint8_t GetTaskId() = 0; |
185 | | |
186 | | /** |
187 | | * Returns true if the current task is called from the thread currently |
188 | | * running JobHandle::Join(). |
189 | | */ |
190 | | virtual bool IsJoiningThread() const = 0; |
191 | | }; |
192 | | |
193 | | /** |
194 | | * Handle returned when posting a Job. Provides methods to control execution of |
195 | | * the posted Job. |
196 | | */ |
197 | | class JobHandle { |
198 | | public: |
199 | | virtual ~JobHandle() = default; |
200 | | |
201 | | /** |
202 | | * Notifies the scheduler that max concurrency was increased, and the number |
203 | | * of worker should be adjusted accordingly. See Platform::PostJob() for more |
204 | | * details. |
205 | | */ |
206 | | virtual void NotifyConcurrencyIncrease() = 0; |
207 | | |
208 | | /** |
209 | | * Contributes to the job on this thread. Doesn't return until all tasks have |
210 | | * completed and max concurrency becomes 0. When Join() is called and max |
211 | | * concurrency reaches 0, it should not increase again. This also promotes |
212 | | * this Job's priority to be at least as high as the calling thread's |
213 | | * priority. |
214 | | */ |
215 | | virtual void Join() = 0; |
216 | | |
217 | | /** |
218 | | * Forces all existing workers to yield ASAP. Waits until they have all |
219 | | * returned from the Job's callback before returning. |
220 | | */ |
221 | | virtual void Cancel() = 0; |
222 | | |
223 | | /* |
224 | | * Forces all existing workers to yield ASAP but doesn’t wait for them. |
225 | | * Warning, this is dangerous if the Job's callback is bound to or has access |
226 | | * to state which may be deleted after this call. |
227 | | */ |
228 | | virtual void CancelAndDetach() = 0; |
229 | | |
230 | | /** |
231 | | * Returns true if there's any work pending or any worker running. |
232 | | */ |
233 | | virtual bool IsActive() = 0; |
234 | | |
235 | | /** |
236 | | * Returns true if associated with a Job and other methods may be called. |
237 | | * Returns false after Join() or Cancel() was called. This may return true |
238 | | * even if no workers are running and IsCompleted() returns true |
239 | | */ |
240 | | virtual bool IsValid() = 0; |
241 | | |
242 | | /** |
243 | | * Returns true if job priority can be changed. |
244 | | */ |
245 | 0 | virtual bool UpdatePriorityEnabled() const { return false; } |
246 | | |
247 | | /** |
248 | | * Update this Job's priority. |
249 | | */ |
250 | 0 | virtual void UpdatePriority(TaskPriority new_priority) {} |
251 | | }; |
252 | | |
253 | | /** |
254 | | * A JobTask represents work to run in parallel from Platform::PostJob(). |
255 | | */ |
256 | | class JobTask { |
257 | | public: |
258 | | virtual ~JobTask() = default; |
259 | | |
260 | | virtual void Run(JobDelegate* delegate) = 0; |
261 | | |
262 | | /** |
263 | | * Controls the maximum number of threads calling Run() concurrently, given |
264 | | * the number of threads currently assigned to this job and executing Run(). |
265 | | * Run() is only invoked if the number of threads previously running Run() was |
266 | | * less than the value returned. In general, this should return the latest |
267 | | * number of incomplete work items (smallest unit of work) left to process, |
268 | | * including items that are currently in progress. |worker_count| is the |
269 | | * number of threads currently assigned to this job which some callers may |
270 | | * need to determine their return value. Since GetMaxConcurrency() is a leaf |
271 | | * function, it must not call back any JobHandle methods. |
272 | | */ |
273 | | virtual size_t GetMaxConcurrency(size_t worker_count) const = 0; |
274 | | }; |
275 | | |
276 | | /** |
277 | | * A "blocking call" refers to any call that causes the calling thread to wait |
278 | | * off-CPU. It includes but is not limited to calls that wait on synchronous |
279 | | * file I/O operations: read or write a file from disk, interact with a pipe or |
280 | | * a socket, rename or delete a file, enumerate files in a directory, etc. |
281 | | * Acquiring a low contention lock is not considered a blocking call. |
282 | | */ |
283 | | |
284 | | /** |
285 | | * BlockingType indicates the likelihood that a blocking call will actually |
286 | | * block. |
287 | | */ |
288 | | enum class BlockingType { |
289 | | // The call might block (e.g. file I/O that might hit in memory cache). |
290 | | kMayBlock, |
291 | | // The call will definitely block (e.g. cache already checked and now pinging |
292 | | // server synchronously). |
293 | | kWillBlock |
294 | | }; |
295 | | |
296 | | /** |
297 | | * This class is instantiated with CreateBlockingScope() in every scope where a |
298 | | * blocking call is made and serves as a precise annotation of the scope that |
299 | | * may/will block. May be implemented by an embedder to adjust the thread count. |
300 | | * CPU usage should be minimal within that scope. ScopedBlockingCalls can be |
301 | | * nested. |
302 | | */ |
303 | | class ScopedBlockingCall { |
304 | | public: |
305 | | virtual ~ScopedBlockingCall() = default; |
306 | | }; |
307 | | |
308 | | /** |
309 | | * The interface represents complex arguments to trace events. |
310 | | */ |
311 | | class ConvertableToTraceFormat { |
312 | | public: |
313 | 0 | virtual ~ConvertableToTraceFormat() = default; |
314 | | |
315 | | /** |
316 | | * Append the class info to the provided |out| string. The appended |
317 | | * data must be a valid JSON object. Strings must be properly quoted, and |
318 | | * escaped. There is no processing applied to the content after it is |
319 | | * appended. |
320 | | */ |
321 | | virtual void AppendAsTraceFormat(std::string* out) const = 0; |
322 | | }; |
323 | | |
324 | | /** |
325 | | * V8 Tracing controller. |
326 | | * |
327 | | * Can be implemented by an embedder to record trace events from V8. |
328 | | * |
329 | | * Will become obsolete in Perfetto SDK build (v8_use_perfetto = true). |
330 | | */ |
331 | | class TracingController { |
332 | | public: |
333 | 0 | virtual ~TracingController() = default; |
334 | | |
335 | | // In Perfetto mode, trace events are written using Perfetto's Track Event |
336 | | // API directly without going through the embedder. However, it is still |
337 | | // possible to observe tracing being enabled and disabled. |
338 | | #if !defined(V8_USE_PERFETTO) |
339 | | /** |
340 | | * Called by TRACE_EVENT* macros, don't call this directly. |
341 | | * The name parameter is a category group for example: |
342 | | * TRACE_EVENT0("v8,parse", "V8.Parse") |
343 | | * The pointer returned points to a value with zero or more of the bits |
344 | | * defined in CategoryGroupEnabledFlags. |
345 | | **/ |
346 | 0 | virtual const uint8_t* GetCategoryGroupEnabled(const char* name) { |
347 | 0 | static uint8_t no = 0; |
348 | 0 | return &no; |
349 | 0 | } |
350 | | |
351 | | /** |
352 | | * Adds a trace event to the platform tracing system. These function calls are |
353 | | * usually the result of a TRACE_* macro from trace_event_common.h when |
354 | | * tracing and the category of the particular trace are enabled. It is not |
355 | | * advisable to call these functions on their own; they are really only meant |
356 | | * to be used by the trace macros. The returned handle can be used by |
357 | | * UpdateTraceEventDuration to update the duration of COMPLETE events. |
358 | | */ |
359 | | virtual uint64_t AddTraceEvent( |
360 | | char phase, const uint8_t* category_enabled_flag, const char* name, |
361 | | const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args, |
362 | | const char** arg_names, const uint8_t* arg_types, |
363 | | const uint64_t* arg_values, |
364 | | std::unique_ptr<ConvertableToTraceFormat>* arg_convertables, |
365 | 0 | unsigned int flags) { |
366 | 0 | return 0; |
367 | 0 | } |
368 | | virtual uint64_t AddTraceEventWithTimestamp( |
369 | | char phase, const uint8_t* category_enabled_flag, const char* name, |
370 | | const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args, |
371 | | const char** arg_names, const uint8_t* arg_types, |
372 | | const uint64_t* arg_values, |
373 | | std::unique_ptr<ConvertableToTraceFormat>* arg_convertables, |
374 | 0 | unsigned int flags, int64_t timestamp) { |
375 | 0 | return 0; |
376 | 0 | } |
377 | | |
378 | | /** |
379 | | * Sets the duration field of a COMPLETE trace event. It must be called with |
380 | | * the handle returned from AddTraceEvent(). |
381 | | **/ |
382 | | virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag, |
383 | 0 | const char* name, uint64_t handle) {} |
384 | | #endif // !defined(V8_USE_PERFETTO) |
385 | | |
386 | | class TraceStateObserver { |
387 | | public: |
388 | 134k | virtual ~TraceStateObserver() = default; |
389 | | virtual void OnTraceEnabled() = 0; |
390 | | virtual void OnTraceDisabled() = 0; |
391 | | }; |
392 | | |
393 | | /** |
394 | | * Adds tracing state change observer. |
395 | | * Does nothing in Perfetto SDK build (v8_use_perfetto = true). |
396 | | */ |
397 | 0 | virtual void AddTraceStateObserver(TraceStateObserver*) {} |
398 | | |
399 | | /** |
400 | | * Removes tracing state change observer. |
401 | | * Does nothing in Perfetto SDK build (v8_use_perfetto = true). |
402 | | */ |
403 | 0 | virtual void RemoveTraceStateObserver(TraceStateObserver*) {} |
404 | | }; |
405 | | |
406 | | /** |
407 | | * A V8 memory page allocator. |
408 | | * |
409 | | * Can be implemented by an embedder to manage large host OS allocations. |
410 | | */ |
411 | | class PageAllocator { |
412 | | public: |
413 | | virtual ~PageAllocator() = default; |
414 | | |
415 | | /** |
416 | | * Gets the page granularity for AllocatePages and FreePages. Addresses and |
417 | | * lengths for those calls should be multiples of AllocatePageSize(). |
418 | | */ |
419 | | virtual size_t AllocatePageSize() = 0; |
420 | | |
421 | | /** |
422 | | * Gets the page granularity for SetPermissions and ReleasePages. Addresses |
423 | | * and lengths for those calls should be multiples of CommitPageSize(). |
424 | | */ |
425 | | virtual size_t CommitPageSize() = 0; |
426 | | |
427 | | /** |
428 | | * Sets the random seed so that GetRandomMmapAddr() will generate repeatable |
429 | | * sequences of random mmap addresses. |
430 | | */ |
431 | | virtual void SetRandomMmapSeed(int64_t seed) = 0; |
432 | | |
433 | | /** |
434 | | * Returns a randomized address, suitable for memory allocation under ASLR. |
435 | | * The address will be aligned to AllocatePageSize. |
436 | | */ |
437 | | virtual void* GetRandomMmapAddr() = 0; |
438 | | |
439 | | /** |
440 | | * Memory permissions. |
441 | | */ |
442 | | enum Permission { |
443 | | kNoAccess, |
444 | | kRead, |
445 | | kReadWrite, |
446 | | kReadWriteExecute, |
447 | | kReadExecute, |
448 | | // Set this when reserving memory that will later require kReadWriteExecute |
449 | | // permissions. The resulting behavior is platform-specific, currently |
450 | | // this is used to set the MAP_JIT flag on Apple Silicon. |
451 | | // TODO(jkummerow): Remove this when Wasm has a platform-independent |
452 | | // w^x implementation. |
453 | | // TODO(saelo): Remove this once all JIT pages are allocated through the |
454 | | // VirtualAddressSpace API. |
455 | | kNoAccessWillJitLater |
456 | | }; |
457 | | |
458 | | /** |
459 | | * Allocates memory in range with the given alignment and permission. |
460 | | */ |
461 | | virtual void* AllocatePages(void* address, size_t length, size_t alignment, |
462 | | Permission permissions) = 0; |
463 | | |
464 | | /** |
465 | | * Frees memory in a range that was allocated by a call to AllocatePages. |
466 | | */ |
467 | | virtual bool FreePages(void* address, size_t length) = 0; |
468 | | |
469 | | /** |
470 | | * Releases memory in a range that was allocated by a call to AllocatePages. |
471 | | */ |
472 | | virtual bool ReleasePages(void* address, size_t length, |
473 | | size_t new_length) = 0; |
474 | | |
475 | | /** |
476 | | * Sets permissions on pages in an allocated range. |
477 | | */ |
478 | | virtual bool SetPermissions(void* address, size_t length, |
479 | | Permission permissions) = 0; |
480 | | |
481 | | /** |
482 | | * Recommits discarded pages in the given range with given permissions. |
483 | | * Discarded pages must be recommitted with their original permissions |
484 | | * before they are used again. |
485 | | */ |
486 | | virtual bool RecommitPages(void* address, size_t length, |
487 | 0 | Permission permissions) { |
488 | 0 | // TODO(v8:12797): make it pure once it's implemented on Chromium side. |
489 | 0 | return false; |
490 | 0 | } |
491 | | |
492 | | /** |
493 | | * Frees memory in the given [address, address + size) range. address and size |
494 | | * should be operating system page-aligned. The next write to this |
495 | | * memory area brings the memory transparently back. This should be treated as |
496 | | * a hint to the OS that the pages are no longer needed. It does not guarantee |
497 | | * that the pages will be discarded immediately or at all. |
498 | | */ |
499 | 0 | virtual bool DiscardSystemPages(void* address, size_t size) { return true; } |
500 | | |
501 | | /** |
502 | | * Decommits any wired memory pages in the given range, allowing the OS to |
503 | | * reclaim them, and marks the region as inacessible (kNoAccess). The address |
504 | | * range stays reserved and can be accessed again later by changing its |
505 | | * permissions. However, in that case the memory content is guaranteed to be |
506 | | * zero-initialized again. The memory must have been previously allocated by a |
507 | | * call to AllocatePages. Returns true on success, false otherwise. |
508 | | */ |
509 | | virtual bool DecommitPages(void* address, size_t size) = 0; |
510 | | |
511 | | /** |
512 | | * INTERNAL ONLY: This interface has not been stabilised and may change |
513 | | * without notice from one release to another without being deprecated first. |
514 | | */ |
515 | | class SharedMemoryMapping { |
516 | | public: |
517 | | // Implementations are expected to free the shared memory mapping in the |
518 | | // destructor. |
519 | | virtual ~SharedMemoryMapping() = default; |
520 | | virtual void* GetMemory() const = 0; |
521 | | }; |
522 | | |
523 | | /** |
524 | | * INTERNAL ONLY: This interface has not been stabilised and may change |
525 | | * without notice from one release to another without being deprecated first. |
526 | | */ |
527 | | class SharedMemory { |
528 | | public: |
529 | | // Implementations are expected to free the shared memory in the destructor. |
530 | | virtual ~SharedMemory() = default; |
531 | | virtual std::unique_ptr<SharedMemoryMapping> RemapTo( |
532 | | void* new_address) const = 0; |
533 | | virtual void* GetMemory() const = 0; |
534 | | virtual size_t GetSize() const = 0; |
535 | | }; |
536 | | |
537 | | /** |
538 | | * INTERNAL ONLY: This interface has not been stabilised and may change |
539 | | * without notice from one release to another without being deprecated first. |
540 | | * |
541 | | * Reserve pages at a fixed address returning whether the reservation is |
542 | | * possible. The reserved memory is detached from the PageAllocator and so |
543 | | * should not be freed by it. It's intended for use with |
544 | | * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory. |
545 | | */ |
546 | 0 | virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) { |
547 | 0 | return false; |
548 | 0 | } |
549 | | |
550 | | /** |
551 | | * INTERNAL ONLY: This interface has not been stabilised and may change |
552 | | * without notice from one release to another without being deprecated first. |
553 | | * |
554 | | * Allocates shared memory pages. Not all PageAllocators need support this and |
555 | | * so this method need not be overridden. |
556 | | * Allocates a new read-only shared memory region of size |length| and copies |
557 | | * the memory at |original_address| into it. |
558 | | */ |
559 | | virtual std::unique_ptr<SharedMemory> AllocateSharedPages( |
560 | 0 | size_t length, const void* original_address) { |
561 | 0 | return {}; |
562 | 0 | } |
563 | | |
564 | | /** |
565 | | * INTERNAL ONLY: This interface has not been stabilised and may change |
566 | | * without notice from one release to another without being deprecated first. |
567 | | * |
568 | | * If not overridden and changed to return true, V8 will not attempt to call |
569 | | * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages |
570 | | * and RemapSharedPages must also be overridden. |
571 | | */ |
572 | 0 | virtual bool CanAllocateSharedPages() { return false; } |
573 | | }; |
574 | | |
575 | | /** |
576 | | * An allocator that uses per-thread permissions to protect the memory. |
577 | | * |
578 | | * The implementation is platform/hardware specific, e.g. using pkeys on x64. |
579 | | * |
580 | | * INTERNAL ONLY: This interface has not been stabilised and may change |
581 | | * without notice from one release to another without being deprecated first. |
582 | | */ |
583 | | class ThreadIsolatedAllocator { |
584 | | public: |
585 | | virtual ~ThreadIsolatedAllocator() = default; |
586 | | |
587 | | virtual void* Allocate(size_t size) = 0; |
588 | | |
589 | | virtual void Free(void* object) = 0; |
590 | | |
591 | | enum class Type { |
592 | | kPkey, |
593 | | }; |
594 | | |
595 | | virtual Type Type() const = 0; |
596 | | |
597 | | /** |
598 | | * Return the pkey used to implement the thread isolation if Type == kPkey. |
599 | | */ |
600 | 0 | virtual int Pkey() const { return -1; } |
601 | | |
602 | | /** |
603 | | * Per-thread permissions can be reset on signal handler entry. Even reading |
604 | | * ThreadIsolated memory will segfault in that case. |
605 | | * Call this function on signal handler entry to ensure that read permissions |
606 | | * are restored. |
607 | | */ |
608 | | static void SetDefaultPermissionsForSignalHandler(); |
609 | | }; |
610 | | |
611 | | // Opaque type representing a handle to a shared memory region. |
612 | | using PlatformSharedMemoryHandle = intptr_t; |
613 | | static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1; |
614 | | |
615 | | // Conversion routines from the platform-dependent shared memory identifiers |
616 | | // into the opaque PlatformSharedMemoryHandle type. These use the underlying |
617 | | // types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t) |
618 | | // to avoid pulling in large OS header files into this header file. Instead, |
619 | | // the users of these routines are expected to include the respecitve OS |
620 | | // headers in addition to this one. |
621 | | #if V8_OS_DARWIN |
622 | | // Convert between a shared memory handle and a mach_port_t referencing a memory |
623 | | // entry object. |
624 | | inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry( |
625 | | unsigned int port) { |
626 | | return static_cast<PlatformSharedMemoryHandle>(port); |
627 | | } |
628 | | inline unsigned int MachMemoryEntryFromSharedMemoryHandle( |
629 | | PlatformSharedMemoryHandle handle) { |
630 | | return static_cast<unsigned int>(handle); |
631 | | } |
632 | | #elif V8_OS_FUCHSIA |
633 | | // Convert between a shared memory handle and a zx_handle_t to a VMO. |
634 | | inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) { |
635 | | return static_cast<PlatformSharedMemoryHandle>(handle); |
636 | | } |
637 | | inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) { |
638 | | return static_cast<uint32_t>(handle); |
639 | | } |
640 | | #elif V8_OS_WIN |
641 | | // Convert between a shared memory handle and a Windows HANDLE to a file mapping |
642 | | // object. |
643 | | inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping( |
644 | | void* handle) { |
645 | | return reinterpret_cast<PlatformSharedMemoryHandle>(handle); |
646 | | } |
647 | | inline void* FileMappingFromSharedMemoryHandle( |
648 | | PlatformSharedMemoryHandle handle) { |
649 | | return reinterpret_cast<void*>(handle); |
650 | | } |
651 | | #else |
652 | | // Convert between a shared memory handle and a file descriptor. |
653 | 0 | inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) { |
654 | 0 | return static_cast<PlatformSharedMemoryHandle>(fd); |
655 | 0 | } |
656 | | inline int FileDescriptorFromSharedMemoryHandle( |
657 | 0 | PlatformSharedMemoryHandle handle) { |
658 | 0 | return static_cast<int>(handle); |
659 | 0 | } |
660 | | #endif |
661 | | |
662 | | /** |
663 | | * Possible permissions for memory pages. |
664 | | */ |
665 | | enum class PagePermissions { |
666 | | kNoAccess, |
667 | | kRead, |
668 | | kReadWrite, |
669 | | kReadWriteExecute, |
670 | | kReadExecute, |
671 | | }; |
672 | | |
673 | | /** |
674 | | * Class to manage a virtual memory address space. |
675 | | * |
676 | | * This class represents a contiguous region of virtual address space in which |
677 | | * sub-spaces and (private or shared) memory pages can be allocated, freed, and |
678 | | * modified. This interface is meant to eventually replace the PageAllocator |
679 | | * interface, and can be used as an alternative in the meantime. |
680 | | * |
681 | | * This API is not yet stable and may change without notice! |
682 | | */ |
683 | | class VirtualAddressSpace { |
684 | | public: |
685 | | using Address = uintptr_t; |
686 | | |
687 | | VirtualAddressSpace(size_t page_size, size_t allocation_granularity, |
688 | | Address base, size_t size, |
689 | | PagePermissions max_page_permissions) |
690 | | : page_size_(page_size), |
691 | | allocation_granularity_(allocation_granularity), |
692 | | base_(base), |
693 | | size_(size), |
694 | 0 | max_page_permissions_(max_page_permissions) {} |
695 | | |
696 | | virtual ~VirtualAddressSpace() = default; |
697 | | |
698 | | /** |
699 | | * The page size used inside this space. Guaranteed to be a power of two. |
700 | | * Used as granularity for all page-related operations except for allocation, |
701 | | * which use the allocation_granularity(), see below. |
702 | | * |
703 | | * \returns the page size in bytes. |
704 | | */ |
705 | 0 | size_t page_size() const { return page_size_; } |
706 | | |
707 | | /** |
708 | | * The granularity of page allocations and, by extension, of subspace |
709 | | * allocations. This is guaranteed to be a power of two and a multiple of the |
710 | | * page_size(). In practice, this is equal to the page size on most OSes, but |
711 | | * on Windows it is usually 64KB, while the page size is 4KB. |
712 | | * |
713 | | * \returns the allocation granularity in bytes. |
714 | | */ |
715 | 0 | size_t allocation_granularity() const { return allocation_granularity_; } |
716 | | |
717 | | /** |
718 | | * The base address of the address space managed by this instance. |
719 | | * |
720 | | * \returns the base address of this address space. |
721 | | */ |
722 | 0 | Address base() const { return base_; } |
723 | | |
724 | | /** |
725 | | * The size of the address space managed by this instance. |
726 | | * |
727 | | * \returns the size of this address space in bytes. |
728 | | */ |
729 | 0 | size_t size() const { return size_; } |
730 | | |
731 | | /** |
732 | | * The maximum page permissions that pages allocated inside this space can |
733 | | * obtain. |
734 | | * |
735 | | * \returns the maximum page permissions. |
736 | | */ |
737 | 0 | PagePermissions max_page_permissions() const { return max_page_permissions_; } |
738 | | |
739 | | /** |
740 | | * Whether the |address| is inside the address space managed by this instance. |
741 | | * |
742 | | * \returns true if it is inside the address space, false if not. |
743 | | */ |
744 | 0 | bool Contains(Address address) const { |
745 | 0 | return (address >= base()) && (address < base() + size()); |
746 | 0 | } |
747 | | |
748 | | /** |
749 | | * Sets the random seed so that GetRandomPageAddress() will generate |
750 | | * repeatable sequences of random addresses. |
751 | | * |
752 | | * \param The seed for the PRNG. |
753 | | */ |
754 | | virtual void SetRandomSeed(int64_t seed) = 0; |
755 | | |
756 | | /** |
757 | | * Returns a random address inside this address space, suitable for page |
758 | | * allocations hints. |
759 | | * |
760 | | * \returns a random address aligned to allocation_granularity(). |
761 | | */ |
762 | | virtual Address RandomPageAddress() = 0; |
763 | | |
764 | | /** |
765 | | * Allocates private memory pages with the given alignment and permissions. |
766 | | * |
767 | | * \param hint If nonzero, the allocation is attempted to be placed at the |
768 | | * given address first. If that fails, the allocation is attempted to be |
769 | | * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying |
770 | | * zero for the hint always causes this function to choose a random address. |
771 | | * The hint, if specified, must be aligned to the specified alignment. |
772 | | * |
773 | | * \param size The size of the allocation in bytes. Must be a multiple of the |
774 | | * allocation_granularity(). |
775 | | * |
776 | | * \param alignment The alignment of the allocation in bytes. Must be a |
777 | | * multiple of the allocation_granularity() and should be a power of two. |
778 | | * |
779 | | * \param permissions The page permissions of the newly allocated pages. |
780 | | * |
781 | | * \returns the start address of the allocated pages on success, zero on |
782 | | * failure. |
783 | | */ |
784 | | static constexpr Address kNoHint = 0; |
785 | | virtual V8_WARN_UNUSED_RESULT Address |
786 | | AllocatePages(Address hint, size_t size, size_t alignment, |
787 | | PagePermissions permissions) = 0; |
788 | | |
789 | | /** |
790 | | * Frees previously allocated pages. |
791 | | * |
792 | | * This function will terminate the process on failure as this implies a bug |
793 | | * in the client. As such, there is no return value. |
794 | | * |
795 | | * \param address The start address of the pages to free. This address must |
796 | | * have been obtained through a call to AllocatePages. |
797 | | * |
798 | | * \param size The size in bytes of the region to free. This must match the |
799 | | * size passed to AllocatePages when the pages were allocated. |
800 | | */ |
801 | | virtual void FreePages(Address address, size_t size) = 0; |
802 | | |
803 | | /** |
804 | | * Sets permissions of all allocated pages in the given range. |
805 | | * |
806 | | * This operation can fail due to OOM, in which case false is returned. If |
807 | | * the operation fails for a reason other than OOM, this function will |
808 | | * terminate the process as this implies a bug in the client. |
809 | | * |
810 | | * \param address The start address of the range. Must be aligned to |
811 | | * page_size(). |
812 | | * |
813 | | * \param size The size in bytes of the range. Must be a multiple |
814 | | * of page_size(). |
815 | | * |
816 | | * \param permissions The new permissions for the range. |
817 | | * |
818 | | * \returns true on success, false on OOM. |
819 | | */ |
820 | | virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions( |
821 | | Address address, size_t size, PagePermissions permissions) = 0; |
822 | | |
823 | | /** |
824 | | * Creates a guard region at the specified address. |
825 | | * |
826 | | * Guard regions are guaranteed to cause a fault when accessed and generally |
827 | | * do not count towards any memory consumption limits. Further, allocating |
828 | | * guard regions can usually not fail in subspaces if the region does not |
829 | | * overlap with another region, subspace, or page allocation. |
830 | | * |
831 | | * \param address The start address of the guard region. Must be aligned to |
832 | | * the allocation_granularity(). |
833 | | * |
834 | | * \param size The size of the guard region in bytes. Must be a multiple of |
835 | | * the allocation_granularity(). |
836 | | * |
837 | | * \returns true on success, false otherwise. |
838 | | */ |
839 | | virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address, |
840 | | size_t size) = 0; |
841 | | |
842 | | /** |
843 | | * Frees an existing guard region. |
844 | | * |
845 | | * This function will terminate the process on failure as this implies a bug |
846 | | * in the client. As such, there is no return value. |
847 | | * |
848 | | * \param address The start address of the guard region to free. This address |
849 | | * must have previously been used as address parameter in a successful |
850 | | * invocation of AllocateGuardRegion. |
851 | | * |
852 | | * \param size The size in bytes of the guard region to free. This must match |
853 | | * the size passed to AllocateGuardRegion when the region was created. |
854 | | */ |
855 | | virtual void FreeGuardRegion(Address address, size_t size) = 0; |
856 | | |
857 | | /** |
858 | | * Allocates shared memory pages with the given permissions. |
859 | | * |
860 | | * \param hint Placement hint. See AllocatePages. |
861 | | * |
862 | | * \param size The size of the allocation in bytes. Must be a multiple of the |
863 | | * allocation_granularity(). |
864 | | * |
865 | | * \param permissions The page permissions of the newly allocated pages. |
866 | | * |
867 | | * \param handle A platform-specific handle to a shared memory object. See |
868 | | * the SharedMemoryHandleFromX routines above for ways to obtain these. |
869 | | * |
870 | | * \param offset The offset in the shared memory object at which the mapping |
871 | | * should start. Must be a multiple of the allocation_granularity(). |
872 | | * |
873 | | * \returns the start address of the allocated pages on success, zero on |
874 | | * failure. |
875 | | */ |
876 | | virtual V8_WARN_UNUSED_RESULT Address |
877 | | AllocateSharedPages(Address hint, size_t size, PagePermissions permissions, |
878 | | PlatformSharedMemoryHandle handle, uint64_t offset) = 0; |
879 | | |
880 | | /** |
881 | | * Frees previously allocated shared pages. |
882 | | * |
883 | | * This function will terminate the process on failure as this implies a bug |
884 | | * in the client. As such, there is no return value. |
885 | | * |
886 | | * \param address The start address of the pages to free. This address must |
887 | | * have been obtained through a call to AllocateSharedPages. |
888 | | * |
889 | | * \param size The size in bytes of the region to free. This must match the |
890 | | * size passed to AllocateSharedPages when the pages were allocated. |
891 | | */ |
892 | | virtual void FreeSharedPages(Address address, size_t size) = 0; |
893 | | |
894 | | /** |
895 | | * Whether this instance can allocate subspaces or not. |
896 | | * |
897 | | * \returns true if subspaces can be allocated, false if not. |
898 | | */ |
899 | | virtual bool CanAllocateSubspaces() = 0; |
900 | | |
901 | | /* |
902 | | * Allocate a subspace. |
903 | | * |
904 | | * The address space of a subspace stays reserved in the parent space for the |
905 | | * lifetime of the subspace. As such, it is guaranteed that page allocations |
906 | | * on the parent space cannot end up inside a subspace. |
907 | | * |
908 | | * \param hint Hints where the subspace should be allocated. See |
909 | | * AllocatePages() for more details. |
910 | | * |
911 | | * \param size The size in bytes of the subspace. Must be a multiple of the |
912 | | * allocation_granularity(). |
913 | | * |
914 | | * \param alignment The alignment of the subspace in bytes. Must be a multiple |
915 | | * of the allocation_granularity() and should be a power of two. |
916 | | * |
917 | | * \param max_page_permissions The maximum permissions that pages allocated in |
918 | | * the subspace can obtain. |
919 | | * |
920 | | * \returns a new subspace or nullptr on failure. |
921 | | */ |
922 | | virtual std::unique_ptr<VirtualAddressSpace> AllocateSubspace( |
923 | | Address hint, size_t size, size_t alignment, |
924 | | PagePermissions max_page_permissions) = 0; |
925 | | |
926 | | // |
927 | | // TODO(v8) maybe refactor the methods below before stabilizing the API. For |
928 | | // example by combining them into some form of page operation method that |
929 | | // takes a command enum as parameter. |
930 | | // |
931 | | |
932 | | /** |
933 | | * Recommits discarded pages in the given range with given permissions. |
934 | | * Discarded pages must be recommitted with their original permissions |
935 | | * before they are used again. |
936 | | * |
937 | | * \param address The start address of the range. Must be aligned to |
938 | | * page_size(). |
939 | | * |
940 | | * \param size The size in bytes of the range. Must be a multiple |
941 | | * of page_size(). |
942 | | * |
943 | | * \param permissions The permissions for the range that the pages must have. |
944 | | * |
945 | | * \returns true on success, false otherwise. |
946 | | */ |
947 | | virtual V8_WARN_UNUSED_RESULT bool RecommitPages( |
948 | | Address address, size_t size, PagePermissions permissions) = 0; |
949 | | |
950 | | /** |
951 | | * Frees memory in the given [address, address + size) range. address and |
952 | | * size should be aligned to the page_size(). The next write to this memory |
953 | | * area brings the memory transparently back. This should be treated as a |
954 | | * hint to the OS that the pages are no longer needed. It does not guarantee |
955 | | * that the pages will be discarded immediately or at all. |
956 | | * |
957 | | * \returns true on success, false otherwise. Since this method is only a |
958 | | * hint, a successful invocation does not imply that pages have been removed. |
959 | | */ |
960 | | virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address, |
961 | 0 | size_t size) { |
962 | 0 | return true; |
963 | 0 | } |
964 | | /** |
965 | | * Decommits any wired memory pages in the given range, allowing the OS to |
966 | | * reclaim them, and marks the region as inacessible (kNoAccess). The address |
967 | | * range stays reserved and can be accessed again later by changing its |
968 | | * permissions. However, in that case the memory content is guaranteed to be |
969 | | * zero-initialized again. The memory must have been previously allocated by a |
970 | | * call to AllocatePages. |
971 | | * |
972 | | * \returns true on success, false otherwise. |
973 | | */ |
974 | | virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address, |
975 | | size_t size) = 0; |
976 | | |
977 | | private: |
978 | | const size_t page_size_; |
979 | | const size_t allocation_granularity_; |
980 | | const Address base_; |
981 | | const size_t size_; |
982 | | const PagePermissions max_page_permissions_; |
983 | | }; |
984 | | |
985 | | /** |
986 | | * V8 Allocator used for allocating zone backings. |
987 | | */ |
988 | | class ZoneBackingAllocator { |
989 | | public: |
990 | | using MallocFn = void* (*)(size_t); |
991 | | using FreeFn = void (*)(void*); |
992 | | |
993 | 399k | virtual MallocFn GetMallocFn() const { return ::malloc; } |
994 | 399k | virtual FreeFn GetFreeFn() const { return ::free; } |
995 | | }; |
996 | | |
997 | | /** |
998 | | * Observer used by V8 to notify the embedder about entering/leaving sections |
999 | | * with high throughput of malloc/free operations. |
1000 | | */ |
1001 | | class HighAllocationThroughputObserver { |
1002 | | public: |
1003 | 141k | virtual void EnterSection() {} |
1004 | 141k | virtual void LeaveSection() {} |
1005 | | }; |
1006 | | |
1007 | | /** |
1008 | | * V8 Platform abstraction layer. |
1009 | | * |
1010 | | * The embedder has to provide an implementation of this interface before |
1011 | | * initializing the rest of V8. |
1012 | | */ |
1013 | | class Platform { |
1014 | | public: |
1015 | 0 | virtual ~Platform() = default; |
1016 | | |
1017 | | /** |
1018 | | * Allows the embedder to manage memory page allocations. |
1019 | | * Returning nullptr will cause V8 to use the default page allocator. |
1020 | | */ |
1021 | | virtual PageAllocator* GetPageAllocator() = 0; |
1022 | | |
1023 | | /** |
1024 | | * Allows the embedder to provide an allocator that uses per-thread memory |
1025 | | * permissions to protect allocations. |
1026 | | * Returning nullptr will cause V8 to disable protections that rely on this |
1027 | | * feature. |
1028 | | */ |
1029 | 132k | virtual ThreadIsolatedAllocator* GetThreadIsolatedAllocator() { |
1030 | 132k | return nullptr; |
1031 | 132k | } |
1032 | | |
1033 | | /** |
1034 | | * Allows the embedder to specify a custom allocator used for zones. |
1035 | | */ |
1036 | 798k | virtual ZoneBackingAllocator* GetZoneBackingAllocator() { |
1037 | 798k | static ZoneBackingAllocator default_allocator; |
1038 | 798k | return &default_allocator; |
1039 | 798k | } |
1040 | | |
1041 | | /** |
1042 | | * Enables the embedder to respond in cases where V8 can't allocate large |
1043 | | * blocks of memory. V8 retries the failed allocation once after calling this |
1044 | | * method. On success, execution continues; otherwise V8 exits with a fatal |
1045 | | * error. |
1046 | | * Embedder overrides of this function must NOT call back into V8. |
1047 | | */ |
1048 | 0 | virtual void OnCriticalMemoryPressure() {} |
1049 | | |
1050 | | /** |
1051 | | * Gets the max number of worker threads that may be used to execute |
1052 | | * concurrent work scheduled for any single TaskPriority by |
1053 | | * Call(BlockingTask)OnWorkerThread() or PostJob(). This can be used to |
1054 | | * estimate the number of tasks a work package should be split into. A return |
1055 | | * value of 0 means that there are no worker threads available. Note that a |
1056 | | * value of 0 won't prohibit V8 from posting tasks using |CallOnWorkerThread|. |
1057 | | */ |
1058 | | virtual int NumberOfWorkerThreads() = 0; |
1059 | | |
1060 | | /** |
1061 | | * Returns a TaskRunner which can be used to post a task on the foreground. |
1062 | | * The TaskRunner's NonNestableTasksEnabled() must be true. This function |
1063 | | * should only be called from a foreground thread. |
1064 | | * TODO(chromium:1448758): Deprecate once |GetForegroundTaskRunner(Isolate*, |
1065 | | * TaskPriority)| is ready. |
1066 | | */ |
1067 | | virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner( |
1068 | 0 | Isolate* isolate) { |
1069 | 0 | return GetForegroundTaskRunner(isolate, TaskPriority::kUserBlocking); |
1070 | 0 | } |
1071 | | |
1072 | | /** |
1073 | | * Returns a TaskRunner with a specific |priority| which can be used to post a |
1074 | | * task on the foreground thread. The TaskRunner's NonNestableTasksEnabled() |
1075 | | * must be true. This function should only be called from a foreground thread. |
1076 | | * TODO(chromium:1448758): Make pure virtual once embedders implement it. |
1077 | | */ |
1078 | | virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner( |
1079 | 0 | Isolate* isolate, TaskPriority priority) { |
1080 | 0 | return nullptr; |
1081 | 0 | } |
1082 | | |
1083 | | /** |
1084 | | * Schedules a task to be invoked on a worker thread. |
1085 | | * Embedders should override PostTaskOnWorkerThreadImpl() instead of |
1086 | | * CallOnWorkerThread(). |
1087 | | */ |
1088 | | void CallOnWorkerThread( |
1089 | | std::unique_ptr<Task> task, |
1090 | 0 | const SourceLocation& location = SourceLocation::Current()) { |
1091 | 0 | PostTaskOnWorkerThreadImpl(TaskPriority::kUserVisible, std::move(task), |
1092 | 0 | location); |
1093 | 0 | } |
1094 | | |
1095 | | /** |
1096 | | * Schedules a task that blocks the main thread to be invoked with |
1097 | | * high-priority on a worker thread. |
1098 | | * Embedders should override PostTaskOnWorkerThreadImpl() instead of |
1099 | | * CallBlockingTaskOnWorkerThread(). |
1100 | | */ |
1101 | | void CallBlockingTaskOnWorkerThread( |
1102 | | std::unique_ptr<Task> task, |
1103 | 0 | const SourceLocation& location = SourceLocation::Current()) { |
1104 | 0 | // Embedders may optionally override this to process these tasks in a high |
1105 | 0 | // priority pool. |
1106 | 0 | PostTaskOnWorkerThreadImpl(TaskPriority::kUserBlocking, std::move(task), |
1107 | 0 | location); |
1108 | 0 | } |
1109 | | |
1110 | | /** |
1111 | | * Schedules a task to be invoked with low-priority on a worker thread. |
1112 | | * Embedders should override PostTaskOnWorkerThreadImpl() instead of |
1113 | | * CallLowPriorityTaskOnWorkerThread(). |
1114 | | */ |
1115 | | void CallLowPriorityTaskOnWorkerThread( |
1116 | | std::unique_ptr<Task> task, |
1117 | 0 | const SourceLocation& location = SourceLocation::Current()) { |
1118 | 0 | // Embedders may optionally override this to process these tasks in a low |
1119 | 0 | // priority pool. |
1120 | 0 | PostTaskOnWorkerThreadImpl(TaskPriority::kBestEffort, std::move(task), |
1121 | 0 | location); |
1122 | 0 | } |
1123 | | |
1124 | | /** |
1125 | | * Schedules a task to be invoked on a worker thread after |delay_in_seconds| |
1126 | | * expires. |
1127 | | * Embedders should override PostDelayedTaskOnWorkerThreadImpl() instead of |
1128 | | * CallDelayedOnWorkerThread(). |
1129 | | */ |
1130 | | void CallDelayedOnWorkerThread( |
1131 | | std::unique_ptr<Task> task, double delay_in_seconds, |
1132 | 0 | const SourceLocation& location = SourceLocation::Current()) { |
1133 | 0 | PostDelayedTaskOnWorkerThreadImpl(TaskPriority::kUserVisible, |
1134 | 0 | std::move(task), delay_in_seconds, |
1135 | 0 | location); |
1136 | 0 | } |
1137 | | |
1138 | | /** |
1139 | | * Returns true if idle tasks are enabled for the given |isolate|. |
1140 | | */ |
1141 | 0 | virtual bool IdleTasksEnabled(Isolate* isolate) { return false; } |
1142 | | |
1143 | | /** |
1144 | | * Posts |job_task| to run in parallel. Returns a JobHandle associated with |
1145 | | * the Job, which can be joined or canceled. |
1146 | | * This avoids degenerate cases: |
1147 | | * - Calling CallOnWorkerThread() for each work item, causing significant |
1148 | | * overhead. |
1149 | | * - Fixed number of CallOnWorkerThread() calls that split the work and might |
1150 | | * run for a long time. This is problematic when many components post |
1151 | | * "num cores" tasks and all expect to use all the cores. In these cases, |
1152 | | * the scheduler lacks context to be fair to multiple same-priority requests |
1153 | | * and/or ability to request lower priority work to yield when high priority |
1154 | | * work comes in. |
1155 | | * A canonical implementation of |job_task| looks like: |
1156 | | * class MyJobTask : public JobTask { |
1157 | | * public: |
1158 | | * MyJobTask(...) : worker_queue_(...) {} |
1159 | | * // JobTask: |
1160 | | * void Run(JobDelegate* delegate) override { |
1161 | | * while (!delegate->ShouldYield()) { |
1162 | | * // Smallest unit of work. |
1163 | | * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe. |
1164 | | * if (!work_item) return; |
1165 | | * ProcessWork(work_item); |
1166 | | * } |
1167 | | * } |
1168 | | * |
1169 | | * size_t GetMaxConcurrency() const override { |
1170 | | * return worker_queue_.GetSize(); // Thread safe. |
1171 | | * } |
1172 | | * }; |
1173 | | * auto handle = PostJob(TaskPriority::kUserVisible, |
1174 | | * std::make_unique<MyJobTask>(...)); |
1175 | | * handle->Join(); |
1176 | | * |
1177 | | * PostJob() and methods of the returned JobHandle/JobDelegate, must never be |
1178 | | * called while holding a lock that could be acquired by JobTask::Run or |
1179 | | * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is |
1180 | | * because [1] JobTask::GetMaxConcurrency may be invoked while holding |
1181 | | * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B) |
1182 | | * if that lock is *never* held while calling back into JobHandle from any |
1183 | | * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or |
1184 | | * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle |
1185 | | * (B=>JobHandle::foo=>B deadlock). |
1186 | | * Embedders should override CreateJobImpl() instead of PostJob(). |
1187 | | */ |
1188 | | std::unique_ptr<JobHandle> PostJob( |
1189 | | TaskPriority priority, std::unique_ptr<JobTask> job_task, |
1190 | 0 | const SourceLocation& location = SourceLocation::Current()) { |
1191 | 0 | auto handle = CreateJob(priority, std::move(job_task), location); |
1192 | 0 | handle->NotifyConcurrencyIncrease(); |
1193 | 0 | return handle; |
1194 | 0 | } |
1195 | | |
1196 | | /** |
1197 | | * Creates and returns a JobHandle associated with a Job. Unlike PostJob(), |
1198 | | * this doesn't immediately schedules |worker_task| to run; the Job is then |
1199 | | * scheduled by calling either NotifyConcurrencyIncrease() or Join(). |
1200 | | * |
1201 | | * A sufficient CreateJob() implementation that uses the default Job provided |
1202 | | * in libplatform looks like: |
1203 | | * std::unique_ptr<JobHandle> CreateJob( |
1204 | | * TaskPriority priority, std::unique_ptr<JobTask> job_task) override { |
1205 | | * return v8::platform::NewDefaultJobHandle( |
1206 | | * this, priority, std::move(job_task), NumberOfWorkerThreads()); |
1207 | | * } |
1208 | | * |
1209 | | * Embedders should override CreateJobImpl() instead of CreateJob(). |
1210 | | */ |
1211 | | std::unique_ptr<JobHandle> CreateJob( |
1212 | | TaskPriority priority, std::unique_ptr<JobTask> job_task, |
1213 | 0 | const SourceLocation& location = SourceLocation::Current()) { |
1214 | 0 | return CreateJobImpl(priority, std::move(job_task), location); |
1215 | 0 | } |
1216 | | |
1217 | | /** |
1218 | | * Instantiates a ScopedBlockingCall to annotate a scope that may/will block. |
1219 | | */ |
1220 | | virtual std::unique_ptr<ScopedBlockingCall> CreateBlockingScope( |
1221 | 291 | BlockingType blocking_type) { |
1222 | 291 | return nullptr; |
1223 | 291 | } |
1224 | | |
1225 | | /** |
1226 | | * Monotonically increasing time in seconds from an arbitrary fixed point in |
1227 | | * the past. This function is expected to return at least |
1228 | | * millisecond-precision values. For this reason, |
1229 | | * it is recommended that the fixed point be no further in the past than |
1230 | | * the epoch. |
1231 | | **/ |
1232 | | virtual double MonotonicallyIncreasingTime() = 0; |
1233 | | |
1234 | | /** |
1235 | | * Current wall-clock time in milliseconds since epoch. Use |
1236 | | * CurrentClockTimeMillisHighResolution() when higher precision is |
1237 | | * required. |
1238 | | */ |
1239 | 0 | virtual int64_t CurrentClockTimeMilliseconds() { |
1240 | 0 | return static_cast<int64_t>(floor(CurrentClockTimeMillis())); |
1241 | 0 | } |
1242 | | |
1243 | | /** |
1244 | | * This function is deprecated and will be deleted. Use either |
1245 | | * CurrentClockTimeMilliseconds() or |
1246 | | * CurrentClockTimeMillisecondsHighResolution(). |
1247 | | */ |
1248 | | virtual double CurrentClockTimeMillis() = 0; |
1249 | | |
1250 | | /** |
1251 | | * Same as CurrentClockTimeMilliseconds(), but with more precision. |
1252 | | */ |
1253 | 0 | virtual double CurrentClockTimeMillisecondsHighResolution() { |
1254 | 0 | return CurrentClockTimeMillis(); |
1255 | 0 | } |
1256 | | |
1257 | | typedef void (*StackTracePrinter)(); |
1258 | | |
1259 | | /** |
1260 | | * Returns a function pointer that print a stack trace of the current stack |
1261 | | * on invocation. Disables printing of the stack trace if nullptr. |
1262 | | */ |
1263 | 0 | virtual StackTracePrinter GetStackTracePrinter() { return nullptr; } |
1264 | | |
1265 | | /** |
1266 | | * Returns an instance of a v8::TracingController. This must be non-nullptr. |
1267 | | */ |
1268 | | virtual TracingController* GetTracingController() = 0; |
1269 | | |
1270 | | /** |
1271 | | * Tells the embedder to generate and upload a crashdump during an unexpected |
1272 | | * but non-critical scenario. |
1273 | | */ |
1274 | 0 | virtual void DumpWithoutCrashing() {} |
1275 | | |
1276 | | /** |
1277 | | * Allows the embedder to observe sections with high throughput allocation |
1278 | | * operations. |
1279 | | */ |
1280 | | virtual HighAllocationThroughputObserver* |
1281 | 141k | GetHighAllocationThroughputObserver() { |
1282 | 141k | static HighAllocationThroughputObserver default_observer; |
1283 | 141k | return &default_observer; |
1284 | 141k | } |
1285 | | |
1286 | | protected: |
1287 | | /** |
1288 | | * Default implementation of current wall-clock time in milliseconds |
1289 | | * since epoch. Useful for implementing |CurrentClockTimeMillis| if |
1290 | | * nothing special needed. |
1291 | | */ |
1292 | | V8_EXPORT static double SystemClockTimeMillis(); |
1293 | | |
1294 | | /** |
1295 | | * Creates and returns a JobHandle associated with a Job. |
1296 | | */ |
1297 | | virtual std::unique_ptr<JobHandle> CreateJobImpl( |
1298 | | TaskPriority priority, std::unique_ptr<JobTask> job_task, |
1299 | | const SourceLocation& location) = 0; |
1300 | | |
1301 | | /** |
1302 | | * Schedules a task with |priority| to be invoked on a worker thread. |
1303 | | */ |
1304 | | virtual void PostTaskOnWorkerThreadImpl(TaskPriority priority, |
1305 | | std::unique_ptr<Task> task, |
1306 | | const SourceLocation& location) = 0; |
1307 | | |
1308 | | /** |
1309 | | * Schedules a task with |priority| to be invoked on a worker thread after |
1310 | | * |delay_in_seconds| expires. |
1311 | | */ |
1312 | | virtual void PostDelayedTaskOnWorkerThreadImpl( |
1313 | | TaskPriority priority, std::unique_ptr<Task> task, |
1314 | | double delay_in_seconds, const SourceLocation& location) = 0; |
1315 | | }; |
1316 | | |
1317 | | } // namespace v8 |
1318 | | |
1319 | | #endif // V8_V8_PLATFORM_H_ |