Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #ifndef V8_ISOLATE_H_
6 : #define V8_ISOLATE_H_
7 :
8 : #include <memory>
9 : #include <queue>
10 :
11 : #include "include/v8-debug.h"
12 : #include "src/allocation.h"
13 : #include "src/base/atomicops.h"
14 : #include "src/builtins/builtins.h"
15 : #include "src/contexts.h"
16 : #include "src/date.h"
17 : #include "src/debug/debug-interface.h"
18 : #include "src/execution.h"
19 : #include "src/frames.h"
20 : #include "src/futex-emulation.h"
21 : #include "src/global-handles.h"
22 : #include "src/handles.h"
23 : #include "src/heap/heap.h"
24 : #include "src/messages.h"
25 : #include "src/regexp/regexp-stack.h"
26 : #include "src/runtime/runtime.h"
27 : #include "src/zone/zone.h"
28 :
29 : class TestIsolate;
30 :
31 : namespace v8 {
32 :
33 : namespace base {
34 : class RandomNumberGenerator;
35 : }
36 :
37 : namespace debug {
38 : class ConsoleDelegate;
39 : }
40 :
41 : namespace internal {
42 :
43 : class AccessCompilerData;
44 : class AddressToIndexHashMap;
45 : class AstStringConstants;
46 : class BasicBlockProfiler;
47 : class Bootstrapper;
48 : class CancelableTaskManager;
49 : class CallInterfaceDescriptorData;
50 : class CodeAgingHelper;
51 : class CodeEventDispatcher;
52 : class CodeGenerator;
53 : class CodeRange;
54 : class CodeStubDescriptor;
55 : class CodeTracer;
56 : class CompilationCache;
57 : class CompilerDispatcher;
58 : class CompilationStatistics;
59 : class ContextSlotCache;
60 : class Counters;
61 : class CpuFeatures;
62 : class CpuProfiler;
63 : class DeoptimizerData;
64 : class DescriptorLookupCache;
65 : class Deserializer;
66 : class EmptyStatement;
67 : class ExternalCallbackScope;
68 : class ExternalReferenceTable;
69 : class Factory;
70 : class HandleScopeImplementer;
71 : class HeapObjectToIndexHashMap;
72 : class HeapProfiler;
73 : class HStatistics;
74 : class HTracer;
75 : class InlineRuntimeFunctionsTable;
76 : class InnerPointerToCodeCache;
77 : class Logger;
78 : class MaterializedObjectStore;
79 : class OptimizingCompileDispatcher;
80 : class RegExpStack;
81 : class RootVisitor;
82 : class RuntimeProfiler;
83 : class SaveContext;
84 : class SetupIsolateDelegate;
85 : class StatsTable;
86 : class StringTracker;
87 : class StubCache;
88 : class SweeperThread;
89 : class ThreadManager;
90 : class ThreadState;
91 : class ThreadVisitor; // Defined in v8threads.h
92 : class UnicodeCache;
93 : template <StateTag Tag> class VMState;
94 :
95 : // 'void function pointer', used to roundtrip the
96 : // ExternalReference::ExternalReferenceRedirector since we can not include
97 : // assembler.h, where it is defined, here.
98 : typedef void* ExternalReferenceRedirectorPointer();
99 :
100 :
101 : class Debug;
102 : class PromiseOnStack;
103 : class Redirection;
104 : class Simulator;
105 :
106 : namespace interpreter {
107 : class Interpreter;
108 : }
109 :
110 : #define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
111 : do { \
112 : Isolate* __isolate__ = (isolate); \
113 : if (__isolate__->has_scheduled_exception()) { \
114 : return __isolate__->PromoteScheduledException(); \
115 : } \
116 : } while (false)
117 :
118 : // Macros for MaybeHandle.
119 :
120 : #define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
121 : do { \
122 : Isolate* __isolate__ = (isolate); \
123 : if (__isolate__->has_scheduled_exception()) { \
124 : __isolate__->PromoteScheduledException(); \
125 : return value; \
126 : } \
127 : } while (false)
128 :
129 : #define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
130 : RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
131 :
132 : #define RETURN_RESULT_OR_FAILURE(isolate, call) \
133 : do { \
134 : Handle<Object> __result__; \
135 : Isolate* __isolate__ = (isolate); \
136 : if (!(call).ToHandle(&__result__)) { \
137 : DCHECK(__isolate__->has_pending_exception()); \
138 : return __isolate__->heap()->exception(); \
139 : } \
140 : return *__result__; \
141 : } while (false)
142 :
143 : #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
144 : do { \
145 : if (!(call).ToHandle(&dst)) { \
146 : DCHECK((isolate)->has_pending_exception()); \
147 : return value; \
148 : } \
149 : } while (false)
150 :
151 : #define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
152 : do { \
153 : Isolate* __isolate__ = (isolate); \
154 : ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call, \
155 : __isolate__->heap()->exception()); \
156 : } while (false)
157 :
158 : #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
159 : ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
160 :
161 : #define THROW_NEW_ERROR(isolate, call, T) \
162 : do { \
163 : Isolate* __isolate__ = (isolate); \
164 : return __isolate__->Throw<T>(__isolate__->factory()->call); \
165 : } while (false)
166 :
167 : #define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
168 : do { \
169 : Isolate* __isolate__ = (isolate); \
170 : return __isolate__->Throw(*__isolate__->factory()->call); \
171 : } while (false)
172 :
173 : #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
174 : do { \
175 : if ((call).is_null()) { \
176 : DCHECK((isolate)->has_pending_exception()); \
177 : return value; \
178 : } \
179 : } while (false)
180 :
181 : #define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
182 : do { \
183 : Isolate* __isolate__ = (isolate); \
184 : RETURN_ON_EXCEPTION_VALUE(__isolate__, call, \
185 : __isolate__->heap()->exception()); \
186 : } while (false);
187 :
188 : #define RETURN_ON_EXCEPTION(isolate, call, T) \
189 : RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
190 :
191 :
192 : #define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
193 : C(Handler, handler) \
194 : C(CEntryFP, c_entry_fp) \
195 : C(CFunction, c_function) \
196 : C(Context, context) \
197 : C(PendingException, pending_exception) \
198 : C(PendingHandlerContext, pending_handler_context) \
199 : C(PendingHandlerCode, pending_handler_code) \
200 : C(PendingHandlerOffset, pending_handler_offset) \
201 : C(PendingHandlerFP, pending_handler_fp) \
202 : C(PendingHandlerSP, pending_handler_sp) \
203 : C(ExternalCaughtException, external_caught_exception) \
204 : C(JSEntrySP, js_entry_sp)
205 :
206 : #define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var, \
207 : limit_check, increment, body) \
208 : do { \
209 : loop_var_type init; \
210 : loop_var_type for_with_handle_limit = loop_var; \
211 : Isolate* for_with_handle_isolate = isolate; \
212 : while (limit_check) { \
213 : for_with_handle_limit += 1024; \
214 : HandleScope loop_scope(for_with_handle_isolate); \
215 : for (; limit_check && loop_var < for_with_handle_limit; increment) { \
216 : body \
217 : } \
218 : } \
219 : } while (false)
220 :
221 : // Platform-independent, reliable thread identifier.
222 : class ThreadId {
223 : public:
224 : // Creates an invalid ThreadId.
225 60782 : ThreadId() { base::NoBarrier_Store(&id_, kInvalidId); }
226 :
227 576541 : ThreadId& operator=(const ThreadId& other) {
228 1153082 : base::NoBarrier_Store(&id_, base::NoBarrier_Load(&other.id_));
229 576541 : return *this;
230 : }
231 :
232 : // Returns ThreadId for current thread.
233 6748431 : static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
234 :
235 : // Returns invalid ThreadId (guaranteed not to be equal to any thread).
236 470310 : static ThreadId Invalid() { return ThreadId(kInvalidId); }
237 :
238 : // Compares ThreadIds for equality.
239 : INLINE(bool Equals(const ThreadId& other) const) {
240 5540230 : return base::NoBarrier_Load(&id_) == base::NoBarrier_Load(&other.id_);
241 : }
242 :
243 : // Checks whether this ThreadId refers to any thread.
244 : INLINE(bool IsValid() const) {
245 29437 : return base::NoBarrier_Load(&id_) != kInvalidId;
246 : }
247 :
248 : // Converts ThreadId to an integer representation
249 : // (required for public API: V8::V8::GetCurrentThreadId).
250 : int ToInteger() const { return static_cast<int>(base::NoBarrier_Load(&id_)); }
251 :
252 : // Converts ThreadId to an integer representation
253 : // (required for public API: V8::V8::TerminateExecution).
254 : static ThreadId FromInteger(int id) { return ThreadId(id); }
255 :
256 : private:
257 : static const int kInvalidId = -1;
258 :
259 : explicit ThreadId(int id) { base::NoBarrier_Store(&id_, id); }
260 :
261 : static int AllocateThreadId();
262 :
263 : V8_EXPORT_PRIVATE static int GetCurrentThreadId();
264 :
265 : base::Atomic32 id_;
266 :
267 : static base::Atomic32 highest_thread_id_;
268 :
269 : friend class Isolate;
270 : };
271 :
272 :
273 : #define FIELD_ACCESSOR(type, name) \
274 : inline void set_##name(type v) { name##_ = v; } \
275 : inline type name() const { return name##_; }
276 :
277 :
278 : class ThreadLocalTop BASE_EMBEDDED {
279 : public:
280 : // Does early low-level initialization that does not depend on the
281 : // isolate being present.
282 : ThreadLocalTop();
283 :
284 : // Initialize the thread data.
285 : void Initialize();
286 :
287 : // Get the top C++ try catch handler or NULL if none are registered.
288 : //
289 : // This method is not guaranteed to return an address that can be
290 : // used for comparison with addresses into the JS stack. If such an
291 : // address is needed, use try_catch_handler_address.
292 46718516 : FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
293 :
294 : // Get the address of the top C++ try catch handler or NULL if
295 : // none are registered.
296 : //
297 : // This method always returns an address that can be compared to
298 : // pointers into the JavaScript stack. When running on actual
299 : // hardware, try_catch_handler_address and TryCatchHandler return
300 : // the same pointer. When running on a simulator with a separate JS
301 : // stack, try_catch_handler_address returns a JS stack address that
302 : // corresponds to the place on the JS stack where the C++ handler
303 : // would have been if the stack were not separate.
304 191105 : Address try_catch_handler_address() {
305 : return reinterpret_cast<Address>(
306 : v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
307 : }
308 :
309 : void Free();
310 :
311 : Isolate* isolate_;
312 : // The context where the current execution method is created and for variable
313 : // lookups.
314 : Context* context_;
315 : ThreadId thread_id_;
316 : Object* pending_exception_;
317 :
318 : // Communication channel between Isolate::FindHandler and the CEntryStub.
319 : Context* pending_handler_context_;
320 : Code* pending_handler_code_;
321 : intptr_t pending_handler_offset_;
322 : Address pending_handler_fp_;
323 : Address pending_handler_sp_;
324 :
325 : // Communication channel between Isolate::Throw and message consumers.
326 : bool rethrowing_message_;
327 : Object* pending_message_obj_;
328 :
329 : // Use a separate value for scheduled exceptions to preserve the
330 : // invariants that hold about pending_exception. We may want to
331 : // unify them later.
332 : Object* scheduled_exception_;
333 : bool external_caught_exception_;
334 : SaveContext* save_context_;
335 :
336 : // Stack.
337 : Address c_entry_fp_; // the frame pointer of the top c entry frame
338 : Address handler_; // try-blocks are chained through the stack
339 : Address c_function_; // C function that was called at c entry.
340 :
341 : // Throwing an exception may cause a Promise rejection. For this purpose
342 : // we keep track of a stack of nested promises and the corresponding
343 : // try-catch handlers.
344 : PromiseOnStack* promise_on_stack_;
345 :
346 : #ifdef USE_SIMULATOR
347 : Simulator* simulator_;
348 : #endif
349 :
350 : Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
351 : // the external callback we're currently in
352 : ExternalCallbackScope* external_callback_scope_;
353 : StateTag current_vm_state_;
354 :
355 : // Call back function to report unsafe JS accesses.
356 : v8::FailedAccessCheckCallback failed_access_check_callback_;
357 :
358 : private:
359 : void InitializeInternal();
360 :
361 : v8::TryCatch* try_catch_handler_;
362 : };
363 :
364 :
365 : #if USE_SIMULATOR
366 :
367 : #define ISOLATE_INIT_SIMULATOR_LIST(V) \
368 : V(bool, simulator_initialized, false) \
369 : V(base::CustomMatcherHashMap*, simulator_i_cache, NULL) \
370 : V(Redirection*, simulator_redirection, NULL)
371 : #else
372 :
373 : #define ISOLATE_INIT_SIMULATOR_LIST(V)
374 :
375 : #endif
376 :
377 :
378 : #ifdef DEBUG
379 :
380 : #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
381 : V(CommentStatistic, paged_space_comments_statistics, \
382 : CommentStatistic::kMaxComments + 1) \
383 : V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS)
384 : #else
385 :
386 : #define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
387 :
388 : #endif
389 :
390 : #define ISOLATE_INIT_ARRAY_LIST(V) \
391 : /* SerializerDeserializer state. */ \
392 : V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
393 : V(int, bad_char_shift_table, kUC16AlphabetSize) \
394 : V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
395 : V(int, suffix_table, (kBMMaxShift + 1)) \
396 : ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
397 :
398 : typedef List<HeapObject*> DebugObjectCache;
399 :
400 : #define ISOLATE_INIT_LIST(V) \
401 : /* Assembler state. */ \
402 : V(FatalErrorCallback, exception_behavior, nullptr) \
403 : V(OOMErrorCallback, oom_behavior, nullptr) \
404 : V(LogEventCallback, event_logger, nullptr) \
405 : V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
406 : V(ExtensionCallback, wasm_module_callback, &NoExtension) \
407 : V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
408 : V(ExtensionCallback, wasm_compile_callback, &NoExtension) \
409 : V(ExtensionCallback, wasm_instantiate_callback, &NoExtension) \
410 : V(ExternalReferenceRedirectorPointer*, external_reference_redirector, \
411 : nullptr) \
412 : /* State for Relocatable. */ \
413 : V(Relocatable*, relocatable_top, nullptr) \
414 : V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
415 : V(Object*, string_stream_current_security_token, nullptr) \
416 : V(ExternalReferenceTable*, external_reference_table, nullptr) \
417 : V(intptr_t*, api_external_references, nullptr) \
418 : V(AddressToIndexHashMap*, external_reference_map, nullptr) \
419 : V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
420 : V(int, pending_microtask_count, 0) \
421 : V(HStatistics*, hstatistics, nullptr) \
422 : V(CompilationStatistics*, turbo_statistics, nullptr) \
423 : V(HTracer*, htracer, nullptr) \
424 : V(CodeTracer*, code_tracer, nullptr) \
425 : V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
426 : V(PromiseRejectCallback, promise_reject_callback, nullptr) \
427 : V(const v8::StartupData*, snapshot_blob, nullptr) \
428 : V(int, code_and_metadata_size, 0) \
429 : V(int, bytecode_and_metadata_size, 0) \
430 : /* true if being profiled. Causes collection of extra compile info. */ \
431 : V(bool, is_profiling, false) \
432 : /* true if a trace is being formatted through Error.prepareStackTrace. */ \
433 : V(bool, formatting_stack_trace, false) \
434 : /* Perform side effect checks on function call and API callbacks. */ \
435 : V(bool, needs_side_effect_check, false) \
436 : /* Current code coverage mode */ \
437 : V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort) \
438 : V(int, last_stack_frame_info_id, 0) \
439 : ISOLATE_INIT_SIMULATOR_LIST(V)
440 :
441 : #define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
442 : inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
443 : inline type name() const { return thread_local_top_.name##_; }
444 :
445 : #define THREAD_LOCAL_TOP_ADDRESS(type, name) \
446 : type* name##_address() { return &thread_local_top_.name##_; }
447 :
448 :
449 : class Isolate {
450 : // These forward declarations are required to make the friend declarations in
451 : // PerIsolateThreadData work on some older versions of gcc.
452 : class ThreadDataTable;
453 : class EntryStackItem;
454 : public:
455 : ~Isolate();
456 :
457 : // A thread has a PerIsolateThreadData instance for each isolate that it has
458 : // entered. That instance is allocated when the isolate is initially entered
459 : // and reused on subsequent entries.
460 : class PerIsolateThreadData {
461 : public:
462 : PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
463 : : isolate_(isolate),
464 : thread_id_(thread_id),
465 : stack_limit_(0),
466 : thread_state_(NULL),
467 : #if USE_SIMULATOR
468 : simulator_(NULL),
469 : #endif
470 : next_(NULL),
471 67265 : prev_(NULL) { }
472 : ~PerIsolateThreadData();
473 : Isolate* isolate() const { return isolate_; }
474 : ThreadId thread_id() const { return thread_id_; }
475 :
476 6863 : FIELD_ACCESSOR(uintptr_t, stack_limit)
477 49934 : FIELD_ACCESSOR(ThreadState*, thread_state)
478 :
479 : #if USE_SIMULATOR
480 : FIELD_ACCESSOR(Simulator*, simulator)
481 : #endif
482 :
483 2816122 : bool Matches(Isolate* isolate, ThreadId thread_id) const {
484 5470956 : return isolate_ == isolate && thread_id_.Equals(thread_id);
485 : }
486 :
487 : private:
488 : Isolate* isolate_;
489 : ThreadId thread_id_;
490 : uintptr_t stack_limit_;
491 : ThreadState* thread_state_;
492 :
493 : #if USE_SIMULATOR
494 : Simulator* simulator_;
495 : #endif
496 :
497 : PerIsolateThreadData* next_;
498 : PerIsolateThreadData* prev_;
499 :
500 : friend class Isolate;
501 : friend class ThreadDataTable;
502 : friend class EntryStackItem;
503 :
504 : DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
505 : };
506 :
507 :
508 : enum AddressId {
509 : #define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
510 : FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
511 : #undef DECLARE_ENUM
512 : kIsolateAddressCount
513 : };
514 :
515 : static void InitializeOncePerProcess();
516 :
517 : // Returns the PerIsolateThreadData for the current thread (or NULL if one is
518 : // not currently set).
519 : static PerIsolateThreadData* CurrentPerIsolateThreadData() {
520 : return reinterpret_cast<PerIsolateThreadData*>(
521 252570 : base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
522 : }
523 :
524 : // Returns the isolate inside which the current thread is running.
525 : INLINE(static Isolate* Current()) {
526 : DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
527 : Isolate* isolate = reinterpret_cast<Isolate*>(
528 12382412 : base::Thread::GetExistingThreadLocal(isolate_key_));
529 : DCHECK(isolate != NULL);
530 : return isolate;
531 : }
532 :
533 : // Usually called by Init(), but can be called early e.g. to allow
534 : // testing components that require logging but not the whole
535 : // isolate.
536 : //
537 : // Safe to call more than once.
538 : void InitializeLoggingAndCounters();
539 :
540 : bool Init(Deserializer* des);
541 :
542 : // True if at least one thread Enter'ed this isolate.
543 238959 : bool IsInUse() { return entry_stack_ != NULL; }
544 :
545 : // Destroys the non-default isolates.
546 : // Sets default isolate into "has_been_disposed" state rather then destroying,
547 : // for legacy API reasons.
548 : void TearDown();
549 :
550 : void ReleaseManagedObjects();
551 :
552 : static void GlobalTearDown();
553 :
554 : void ClearSerializerData();
555 :
556 : // Find the PerThread for this particular (isolate, thread) combination
557 : // If one does not yet exist, return null.
558 : PerIsolateThreadData* FindPerThreadDataForThisThread();
559 :
560 : // Find the PerThread for given (isolate, thread) combination
561 : // If one does not yet exist, return null.
562 : PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
563 :
564 : // Discard the PerThread for this particular (isolate, thread) combination
565 : // If one does not yet exist, no-op.
566 : void DiscardPerThreadDataForThisThread();
567 :
568 : // Returns the key used to store the pointer to the current isolate.
569 : // Used internally for V8 threads that do not execute JavaScript but still
570 : // are part of the domain of an isolate (like the context switcher).
571 : static base::Thread::LocalStorageKey isolate_key() {
572 : return isolate_key_;
573 : }
574 :
575 : // Returns the key used to store process-wide thread IDs.
576 : static base::Thread::LocalStorageKey thread_id_key() {
577 : return thread_id_key_;
578 : }
579 :
580 : static base::Thread::LocalStorageKey per_isolate_thread_data_key();
581 :
582 : // Mutex for serializing access to break control structures.
583 : base::RecursiveMutex* break_access() { return &break_access_; }
584 :
585 : Address get_address_from_id(AddressId id);
586 :
587 : // Access to top context (where the current function object was created).
588 17326802 : Context* context() { return thread_local_top_.context_; }
589 : inline void set_context(Context* context);
590 : Context** context_address() { return &thread_local_top_.context_; }
591 :
592 140896168 : THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
593 :
594 : // Access to current thread id.
595 185520 : THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
596 :
597 : // Interface to pending exception.
598 : inline Object* pending_exception();
599 : inline void set_pending_exception(Object* exception_obj);
600 : inline void clear_pending_exception();
601 :
602 : THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
603 :
604 : inline bool has_pending_exception();
605 :
606 : THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
607 : THREAD_LOCAL_TOP_ADDRESS(Code*, pending_handler_code)
608 : THREAD_LOCAL_TOP_ADDRESS(intptr_t, pending_handler_offset)
609 : THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
610 : THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
611 :
612 13763 : THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
613 :
614 : v8::TryCatch* try_catch_handler() {
615 24831468 : return thread_local_top_.try_catch_handler();
616 : }
617 : bool* external_caught_exception_address() {
618 : return &thread_local_top_.external_caught_exception_;
619 : }
620 :
621 : THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
622 :
623 : inline void clear_pending_message();
624 : Address pending_message_obj_address() {
625 : return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
626 : }
627 :
628 : inline Object* scheduled_exception();
629 : inline bool has_scheduled_exception();
630 : inline void clear_scheduled_exception();
631 :
632 : bool IsJavaScriptHandlerOnTop(Object* exception);
633 : bool IsExternalHandlerOnTop(Object* exception);
634 :
635 : inline bool is_catchable_by_javascript(Object* exception);
636 : inline bool is_catchable_by_wasm(Object* exception);
637 :
638 : // JS execution stack (see frames.h).
639 : static Address c_entry_fp(ThreadLocalTop* thread) {
640 : return thread->c_entry_fp_;
641 : }
642 : static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
643 : Address c_function() { return thread_local_top_.c_function_; }
644 :
645 : inline Address* c_entry_fp_address() {
646 : return &thread_local_top_.c_entry_fp_;
647 : }
648 : inline Address* handler_address() { return &thread_local_top_.handler_; }
649 : inline Address* c_function_address() {
650 : return &thread_local_top_.c_function_;
651 : }
652 :
653 : // Bottom JS entry.
654 : Address js_entry_sp() {
655 : return thread_local_top_.js_entry_sp_;
656 : }
657 : inline Address* js_entry_sp_address() {
658 : return &thread_local_top_.js_entry_sp_;
659 : }
660 :
661 : // Returns the global object of the current context. It could be
662 : // a builtin object, or a JS global object.
663 : inline Handle<JSGlobalObject> global_object();
664 :
665 : // Returns the global proxy object of the current context.
666 : inline Handle<JSObject> global_proxy();
667 :
668 : static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
669 6863 : void FreeThreadResources() { thread_local_top_.Free(); }
670 :
671 : // This method is called by the api after operations that may throw
672 : // exceptions. If an exception was thrown and not handled by an external
673 : // handler the exception is scheduled to be rethrown when we return to running
674 : // JavaScript code. If an exception is scheduled true is returned.
675 : bool OptionalRescheduleException(bool is_bottom_call);
676 :
677 : // Push and pop a promise and the current try-catch handler.
678 : void PushPromise(Handle<JSObject> promise);
679 : void PopPromise();
680 :
681 : // Return the relevant Promise that a throw/rejection pertains to, based
682 : // on the contents of the Promise stack
683 : Handle<Object> GetPromiseOnStackOnThrow();
684 :
685 : // Heuristically guess whether a Promise is handled by user catch handler
686 : bool PromiseHasUserDefinedRejectHandler(Handle<Object> promise);
687 :
688 : class ExceptionScope {
689 : public:
690 : // Scope currently can only be used for regular exceptions,
691 : // not termination exception.
692 : inline explicit ExceptionScope(Isolate* isolate);
693 : inline ~ExceptionScope();
694 :
695 : private:
696 : Isolate* isolate_;
697 : Handle<Object> pending_exception_;
698 : };
699 :
700 : void SetCaptureStackTraceForUncaughtExceptions(
701 : bool capture,
702 : int frame_limit,
703 : StackTrace::StackTraceOptions options);
704 :
705 : void SetAbortOnUncaughtExceptionCallback(
706 : v8::Isolate::AbortOnUncaughtExceptionCallback callback);
707 :
708 : enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
709 : void PrintCurrentStackTrace(FILE* out);
710 : void PrintStack(StringStream* accumulator,
711 : PrintStackMode mode = kPrintStackVerbose);
712 : void PrintStack(FILE* out, PrintStackMode mode = kPrintStackVerbose);
713 : Handle<String> StackTraceString();
714 : // Stores a stack trace in a stack-allocated temporary buffer which will
715 : // end up in the minidump for debugging purposes.
716 : NO_INLINE(void PushStackTraceAndDie(unsigned int magic1, void* ptr1,
717 : void* ptr2, unsigned int magic2));
718 : NO_INLINE(void PushStackTraceAndDie(unsigned int magic1, void* ptr1,
719 : void* ptr2, void* ptr3, void* ptr4,
720 : void* ptr5, void* ptr6, void* ptr7,
721 : void* ptr8, unsigned int magic2));
722 : NO_INLINE(void PushCodeObjectsAndDie(unsigned int magic, void* ptr1,
723 : void* ptr2, void* ptr3, void* ptr4,
724 : void* ptr5, void* ptr6, void* ptr7,
725 : void* ptr8, unsigned int magic2));
726 : Handle<FixedArray> CaptureCurrentStackTrace(
727 : int frame_limit, StackTrace::StackTraceOptions options);
728 : Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
729 : FrameSkipMode mode,
730 : Handle<Object> caller);
731 : MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
732 : Handle<JSReceiver> error_object);
733 : MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
734 : Handle<JSReceiver> error_object, FrameSkipMode mode,
735 : Handle<Object> caller);
736 : Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
737 :
738 : // Returns if the given context may access the given global object. If
739 : // the result is false, the pending exception is guaranteed to be
740 : // set.
741 : bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
742 :
743 : void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
744 : void ReportFailedAccessCheck(Handle<JSObject> receiver);
745 :
746 : // Exception throwing support. The caller should use the result
747 : // of Throw() as its return value.
748 : Object* Throw(Object* exception, MessageLocation* location = NULL);
749 : Object* ThrowIllegalOperation();
750 :
751 : template <typename T>
752 : MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception,
753 : MessageLocation* location = NULL) {
754 389030 : Throw(*exception, location);
755 : return MaybeHandle<T>();
756 : }
757 :
758 : void set_console_delegate(debug::ConsoleDelegate* delegate) {
759 38231 : console_delegate_ = delegate;
760 : }
761 : debug::ConsoleDelegate* console_delegate() { return console_delegate_; }
762 :
763 : // Re-throw an exception. This involves no error reporting since error
764 : // reporting was handled when the exception was thrown originally.
765 : Object* ReThrow(Object* exception);
766 :
767 : // Find the correct handler for the current pending exception. This also
768 : // clears and returns the current pending exception.
769 : Object* UnwindAndFindHandler();
770 :
771 : // Tries to predict whether an exception will be caught. Note that this can
772 : // only produce an estimate, because it is undecidable whether a finally
773 : // clause will consume or re-throw an exception.
774 : enum CatchType {
775 : NOT_CAUGHT,
776 : CAUGHT_BY_JAVASCRIPT,
777 : CAUGHT_BY_EXTERNAL,
778 : CAUGHT_BY_DESUGARING,
779 : CAUGHT_BY_PROMISE,
780 : CAUGHT_BY_ASYNC_AWAIT
781 : };
782 : CatchType PredictExceptionCatcher();
783 :
784 : void ScheduleThrow(Object* exception);
785 : // Re-set pending message, script and positions reported to the TryCatch
786 : // back to the TLS for re-use when rethrowing.
787 : void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
788 : // Un-schedule an exception that was caught by a TryCatch handler.
789 : void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
790 : void ReportPendingMessages();
791 : // Return pending location if any or unfilled structure.
792 : MessageLocation GetMessageLocation();
793 :
794 : // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
795 : Object* PromoteScheduledException();
796 :
797 : // Attempts to compute the current source location, storing the
798 : // result in the target out parameter. The source location is attached to a
799 : // Message object as the location which should be shown to the user. It's
800 : // typically the top-most meaningful location on the stack.
801 : bool ComputeLocation(MessageLocation* target);
802 : bool ComputeLocationFromException(MessageLocation* target,
803 : Handle<Object> exception);
804 : bool ComputeLocationFromStackTrace(MessageLocation* target,
805 : Handle<Object> exception);
806 :
807 : Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
808 : MessageLocation* location);
809 :
810 : // Out of resource exception helpers.
811 : Object* StackOverflow();
812 : Object* TerminateExecution();
813 : void CancelTerminateExecution();
814 :
815 : void RequestInterrupt(InterruptCallback callback, void* data);
816 : void InvokeApiInterruptCallbacks();
817 :
818 : // Administration
819 : void Iterate(RootVisitor* v);
820 : void Iterate(RootVisitor* v, ThreadLocalTop* t);
821 : char* Iterate(RootVisitor* v, char* t);
822 : void IterateThread(ThreadVisitor* v, char* t);
823 :
824 : // Returns the current native context.
825 : inline Handle<Context> native_context();
826 : inline Context* raw_native_context();
827 :
828 : // Returns the native context of the calling JavaScript code. That
829 : // is, the native context of the top-most JavaScript frame.
830 : Handle<Context> GetCallingNativeContext();
831 :
832 : void RegisterTryCatchHandler(v8::TryCatch* that);
833 : void UnregisterTryCatchHandler(v8::TryCatch* that);
834 :
835 : char* ArchiveThread(char* to);
836 : char* RestoreThread(char* from);
837 :
838 : static const int kUC16AlphabetSize = 256; // See StringSearchBase.
839 : static const int kBMMaxShift = 250; // See StringSearchBase.
840 :
841 : // Accessors.
842 : #define GLOBAL_ACCESSOR(type, name, initialvalue) \
843 : inline type name() const { \
844 : DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
845 : return name##_; \
846 : } \
847 : inline void set_##name(type value) { \
848 : DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
849 : name##_ = value; \
850 : }
851 299392378 : ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
852 : #undef GLOBAL_ACCESSOR
853 :
854 : #define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
855 : inline type* name() { \
856 : DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
857 : return &(name##_)[0]; \
858 : }
859 : ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
860 : #undef GLOBAL_ARRAY_ACCESSOR
861 :
862 : #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
863 : inline Handle<type> name(); \
864 : inline bool is_##name(type* value);
865 : NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
866 : #undef NATIVE_CONTEXT_FIELD_ACCESSOR
867 :
868 26773 : Bootstrapper* bootstrapper() { return bootstrapper_; }
869 12910183 : Counters* counters() {
870 : // Call InitializeLoggingAndCounters() if logging is needed before
871 : // the isolate is fully initialized.
872 : DCHECK(counters_ != NULL);
873 12910183 : return counters_;
874 : }
875 14799 : RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
876 : CompilationCache* compilation_cache() { return compilation_cache_; }
877 1659 : Logger* logger() {
878 : // Call InitializeLoggingAndCounters() if logging is needed before
879 : // the isolate is fully initialized.
880 : DCHECK(logger_ != NULL);
881 1659 : return logger_;
882 : }
883 717107 : StackGuard* stack_guard() { return &stack_guard_; }
884 2506642291 : Heap* heap() { return &heap_; }
885 : StatsTable* stats_table();
886 : StubCache* load_stub_cache() { return load_stub_cache_; }
887 : StubCache* store_stub_cache() { return store_stub_cache_; }
888 : CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
889 : DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
890 : bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
891 : void set_deoptimizer_lazy_throw(bool value) {
892 1010 : deoptimizer_lazy_throw_ = value;
893 : }
894 51425 : ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
895 : MaterializedObjectStore* materialized_object_store() {
896 : return materialized_object_store_;
897 : }
898 :
899 : ContextSlotCache* context_slot_cache() {
900 : return context_slot_cache_;
901 : }
902 :
903 299935173 : DescriptorLookupCache* descriptor_lookup_cache() {
904 299935173 : return descriptor_lookup_cache_;
905 : }
906 :
907 3182025018 : HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
908 :
909 : HandleScopeImplementer* handle_scope_implementer() {
910 : DCHECK(handle_scope_implementer_);
911 : return handle_scope_implementer_;
912 : }
913 :
914 2105281 : UnicodeCache* unicode_cache() {
915 2105281 : return unicode_cache_;
916 : }
917 :
918 : InnerPointerToCodeCache* inner_pointer_to_code_cache() {
919 : return inner_pointer_to_code_cache_;
920 : }
921 :
922 : GlobalHandles* global_handles() { return global_handles_; }
923 :
924 228114 : EternalHandles* eternal_handles() { return eternal_handles_; }
925 :
926 25436 : ThreadManager* thread_manager() { return thread_manager_; }
927 :
928 : unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
929 : return &jsregexp_uncanonicalize_;
930 : }
931 :
932 : unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
933 : return &jsregexp_canonrange_;
934 : }
935 :
936 : RuntimeState* runtime_state() { return &runtime_state_; }
937 :
938 55186 : Builtins* builtins() { return &builtins_; }
939 :
940 : unibrow::Mapping<unibrow::Ecma262Canonicalize>*
941 : regexp_macro_assembler_canonicalize() {
942 : return ®exp_macro_assembler_canonicalize_;
943 : }
944 :
945 : RegExpStack* regexp_stack() { return regexp_stack_; }
946 :
947 : size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
948 : void IncreaseTotalRegexpCodeGenerated(int size) {
949 92181 : total_regexp_code_generated_ += size;
950 : }
951 :
952 : List<int>* regexp_indices() { return ®exp_indices_; }
953 :
954 : unibrow::Mapping<unibrow::Ecma262Canonicalize>*
955 : interp_canonicalize_mapping() {
956 : return ®exp_macro_assembler_canonicalize_;
957 : }
958 :
959 21440851 : Debug* debug() { return debug_; }
960 :
961 : bool* is_profiling_address() { return &is_profiling_; }
962 : CodeEventDispatcher* code_event_dispatcher() const {
963 : return code_event_dispatcher_.get();
964 : }
965 : HeapProfiler* heap_profiler() const { return heap_profiler_; }
966 :
967 : #ifdef DEBUG
968 : HistogramInfo* heap_histograms() { return heap_histograms_; }
969 :
970 : JSObject::SpillInformation* js_spill_information() {
971 : return &js_spill_information_;
972 : }
973 : #endif
974 :
975 93207202 : Factory* factory() { return reinterpret_cast<Factory*>(this); }
976 :
977 : static const int kJSRegexpStaticOffsetsVectorSize = 128;
978 :
979 189059865 : THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
980 :
981 1083627677 : THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
982 :
983 : void SetData(uint32_t slot, void* data) {
984 : DCHECK(slot < Internals::kNumIsolateDataSlots);
985 : embedder_data_[slot] = data;
986 : }
987 : void* GetData(uint32_t slot) {
988 : DCHECK(slot < Internals::kNumIsolateDataSlots);
989 : return embedder_data_[slot];
990 : }
991 :
992 75157306 : bool serializer_enabled() const { return serializer_enabled_; }
993 : bool snapshot_available() const {
994 349915 : return snapshot_blob_ != NULL && snapshot_blob_->raw_size != 0;
995 : }
996 :
997 : bool IsDead() { return has_fatal_error_; }
998 12 : void SignalFatalError() { has_fatal_error_ = true; }
999 :
1000 : bool use_crankshaft();
1001 :
1002 : bool initialized_from_snapshot() { return initialized_from_snapshot_; }
1003 :
1004 : bool NeedsSourcePositionsForProfiling() const;
1005 :
1006 9097283 : bool is_best_effort_code_coverage() const {
1007 56 : return code_coverage_mode() == debug::Coverage::kBestEffort;
1008 : }
1009 :
1010 879777 : bool is_precise_count_code_coverage() const {
1011 : return code_coverage_mode() == debug::Coverage::kPreciseCount;
1012 : }
1013 :
1014 180720 : bool is_precise_binary_code_coverage() const {
1015 : return code_coverage_mode() == debug::Coverage::kPreciseBinary;
1016 : }
1017 :
1018 : void SetCodeCoverageList(Object* value);
1019 :
1020 : double time_millis_since_init() {
1021 122618 : return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
1022 : }
1023 :
1024 : DateCache* date_cache() {
1025 : return date_cache_;
1026 : }
1027 :
1028 : void set_date_cache(DateCache* date_cache) {
1029 : if (date_cache != date_cache_) {
1030 : delete date_cache_;
1031 : }
1032 : date_cache_ = date_cache;
1033 : }
1034 :
1035 : Map* get_initial_js_array_map(ElementsKind kind);
1036 :
1037 : static const int kProtectorValid = 1;
1038 : static const int kProtectorInvalid = 0;
1039 :
1040 : bool IsFastArrayConstructorPrototypeChainIntact();
1041 : inline bool IsArraySpeciesLookupChainIntact();
1042 : bool IsIsConcatSpreadableLookupChainIntact();
1043 : bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
1044 : inline bool IsStringLengthOverflowIntact();
1045 : inline bool IsArrayIteratorLookupChainIntact();
1046 :
1047 : // Avoid deopt loops if fast Array Iterators migrate to slow Array Iterators.
1048 : inline bool IsFastArrayIterationIntact();
1049 :
1050 : // Make sure we do check for neutered array buffers.
1051 : inline bool IsArrayBufferNeuteringIntact();
1052 :
1053 : // On intent to set an element in object, make sure that appropriate
1054 : // notifications occur if the set is on the elements of the array or
1055 : // object prototype. Also ensure that changes to prototype chain between
1056 : // Array and Object fire notifications.
1057 : void UpdateArrayProtectorOnSetElement(Handle<JSObject> object);
1058 : void UpdateArrayProtectorOnSetLength(Handle<JSObject> object) {
1059 1273652 : UpdateArrayProtectorOnSetElement(object);
1060 : }
1061 : void UpdateArrayProtectorOnSetPrototype(Handle<JSObject> object) {
1062 4155243 : UpdateArrayProtectorOnSetElement(object);
1063 : }
1064 : void UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object) {
1065 464831 : UpdateArrayProtectorOnSetElement(object);
1066 : }
1067 : void InvalidateArraySpeciesProtector();
1068 : void InvalidateIsConcatSpreadableProtector();
1069 : void InvalidateStringLengthOverflowProtector();
1070 : void InvalidateArrayIteratorProtector();
1071 : void InvalidateArrayBufferNeuteringProtector();
1072 :
1073 : // Returns true if array is the initial array prototype in any native context.
1074 : bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
1075 :
1076 : V8_EXPORT_PRIVATE CallInterfaceDescriptorData* call_descriptor_data(
1077 : int index);
1078 :
1079 : AccessCompilerData* access_compiler_data() { return access_compiler_data_; }
1080 :
1081 : void IterateDeferredHandles(RootVisitor* visitor);
1082 : void LinkDeferredHandles(DeferredHandles* deferred_handles);
1083 : void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
1084 :
1085 : #ifdef DEBUG
1086 : bool IsDeferredHandle(Object** location);
1087 : #endif // DEBUG
1088 :
1089 4855 : bool concurrent_recompilation_enabled() {
1090 : // Thread is only available with flag enabled.
1091 : DCHECK(optimizing_compile_dispatcher_ == NULL ||
1092 : FLAG_concurrent_recompilation);
1093 4855 : return optimizing_compile_dispatcher_ != NULL;
1094 : }
1095 :
1096 27337 : OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
1097 27337 : return optimizing_compile_dispatcher_;
1098 : }
1099 :
1100 : int id() const { return static_cast<int>(id_); }
1101 :
1102 : HStatistics* GetHStatistics();
1103 : CompilationStatistics* GetTurboStatistics();
1104 : HTracer* GetHTracer();
1105 : CodeTracer* GetCodeTracer();
1106 :
1107 : void DumpAndResetStats();
1108 :
1109 : FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
1110 : void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
1111 0 : function_entry_hook_ = function_entry_hook;
1112 : }
1113 :
1114 : void* stress_deopt_count_address() { return &stress_deopt_count_; }
1115 :
1116 : V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
1117 :
1118 : // Generates a random number that is non-zero when masked
1119 : // with the provided mask.
1120 : int GenerateIdentityHash(uint32_t mask);
1121 :
1122 : // Given an address occupied by a live code object, return that object.
1123 : Code* FindCodeObject(Address a);
1124 :
1125 : int NextOptimizationId() {
1126 802252 : int id = next_optimization_id_++;
1127 : if (!Smi::IsValid(next_optimization_id_)) {
1128 : next_optimization_id_ = 0;
1129 : }
1130 : return id;
1131 : }
1132 :
1133 : void AddCallCompletedCallback(CallCompletedCallback callback);
1134 : void RemoveCallCompletedCallback(CallCompletedCallback callback);
1135 : void FireCallCompletedCallback();
1136 :
1137 : void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1138 : void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
1139 : inline void FireBeforeCallEnteredCallback();
1140 :
1141 : void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
1142 : void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
1143 : void FireMicrotasksCompletedCallback();
1144 :
1145 : void SetPromiseRejectCallback(PromiseRejectCallback callback);
1146 : void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
1147 : v8::PromiseRejectEvent event);
1148 :
1149 : void PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
1150 : MaybeHandle<Object>* result,
1151 : MaybeHandle<Object>* maybe_exception);
1152 : void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info,
1153 : MaybeHandle<Object>* result,
1154 : MaybeHandle<Object>* maybe_exception);
1155 : void EnqueueMicrotask(Handle<Object> microtask);
1156 : void RunMicrotasks();
1157 : bool IsRunningMicrotasks() const { return is_running_microtasks_; }
1158 :
1159 : Handle<Symbol> SymbolFor(Heap::RootListIndex dictionary_index,
1160 : Handle<String> name, bool private_symbol);
1161 :
1162 : void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
1163 : void CountUsage(v8::Isolate::UseCounterFeature feature);
1164 :
1165 : BasicBlockProfiler* GetOrCreateBasicBlockProfiler();
1166 : BasicBlockProfiler* basic_block_profiler() { return basic_block_profiler_; }
1167 :
1168 : std::string GetTurboCfgFileName();
1169 :
1170 : #if TRACE_MAPS
1171 : int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
1172 : #endif
1173 :
1174 : Address promise_hook_or_debug_is_active_address() {
1175 : return reinterpret_cast<Address>(&promise_hook_or_debug_is_active_);
1176 : }
1177 :
1178 : void DebugStateUpdated();
1179 :
1180 : void SetPromiseHook(PromiseHook hook);
1181 : void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
1182 : Handle<Object> parent);
1183 :
1184 : // Support for dynamically disabling tail call elimination.
1185 : Address is_tail_call_elimination_enabled_address() {
1186 : return reinterpret_cast<Address>(&is_tail_call_elimination_enabled_);
1187 : }
1188 : bool is_tail_call_elimination_enabled() const {
1189 : return is_tail_call_elimination_enabled_;
1190 : }
1191 : void SetTailCallEliminationEnabled(bool enabled);
1192 :
1193 : void AddDetachedContext(Handle<Context> context);
1194 : void CheckDetachedContextsAfterGC();
1195 :
1196 : List<Object*>* partial_snapshot_cache() { return &partial_snapshot_cache_; }
1197 :
1198 : void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
1199 60698 : array_buffer_allocator_ = allocator;
1200 : }
1201 2900 : v8::ArrayBuffer::Allocator* array_buffer_allocator() const {
1202 2900 : return array_buffer_allocator_;
1203 : }
1204 :
1205 : FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
1206 :
1207 : CancelableTaskManager* cancelable_task_manager() {
1208 : return cancelable_task_manager_;
1209 : }
1210 :
1211 : const AstStringConstants* ast_string_constants() const {
1212 : return ast_string_constants_;
1213 : }
1214 :
1215 65553 : interpreter::Interpreter* interpreter() const { return interpreter_; }
1216 :
1217 543 : AccountingAllocator* allocator() { return allocator_; }
1218 :
1219 : CompilerDispatcher* compiler_dispatcher() const {
1220 : return compiler_dispatcher_;
1221 : }
1222 :
1223 : // Clear all optimized code stored in native contexts.
1224 : void ClearOSROptimizedCode();
1225 :
1226 : // Ensure that a particular optimized code is evicted.
1227 : void EvictOSROptimizedCode(Code* code, const char* reason);
1228 :
1229 : bool IsInAnyContext(Object* object, uint32_t index);
1230 :
1231 : void SetHostImportModuleDynamicallyCallback(
1232 : HostImportModuleDynamicallyCallback callback);
1233 : void RunHostImportModuleDynamicallyCallback(Handle<String> referrer,
1234 : Handle<String> specifier,
1235 : Handle<JSPromise> promise);
1236 :
1237 : void SetRAILMode(RAILMode rail_mode);
1238 :
1239 : RAILMode rail_mode() { return rail_mode_.Value(); }
1240 :
1241 : double LoadStartTimeMs();
1242 :
1243 : void IsolateInForegroundNotification();
1244 :
1245 : void IsolateInBackgroundNotification();
1246 :
1247 : bool IsIsolateInBackground() { return is_isolate_in_background_; }
1248 :
1249 : PRINTF_FORMAT(2, 3) void PrintWithTimestamp(const char* format, ...);
1250 :
1251 : #ifdef USE_SIMULATOR
1252 : base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
1253 : #endif
1254 :
1255 60635 : void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
1256 : bool allow_atomics_wait() { return allow_atomics_wait_; }
1257 :
1258 : // List of native heap values allocated by the runtime as part of its
1259 : // implementation that must be freed at isolate deinit.
1260 : class ManagedObjectFinalizer final {
1261 : public:
1262 : typedef void (*Deleter)(void*);
1263 20905 : void Dispose() { deleter_(value_); }
1264 :
1265 : private:
1266 : friend class Isolate;
1267 :
1268 81792 : ManagedObjectFinalizer() {
1269 : DCHECK_EQ(reinterpret_cast<void*>(this),
1270 : reinterpret_cast<void*>(&value_));
1271 : }
1272 :
1273 : // value_ must be the first member
1274 : void* value_ = nullptr;
1275 : Deleter deleter_ = nullptr;
1276 : ManagedObjectFinalizer* prev_ = nullptr;
1277 : ManagedObjectFinalizer* next_ = nullptr;
1278 : };
1279 :
1280 : // Register a native value for destruction at isolate teardown.
1281 : ManagedObjectFinalizer* RegisterForReleaseAtTeardown(
1282 : void* value, ManagedObjectFinalizer::Deleter deleter);
1283 :
1284 : // Unregister a previously registered value from release at
1285 : // isolate teardown, deleting the ManagedObjectFinalizer.
1286 : // This transfers the responsibility of the previously managed value's
1287 : // deletion to the caller. Pass by pointer, because *finalizer_ptr gets
1288 : // reset to nullptr.
1289 : void UnregisterFromReleaseAtTeardown(ManagedObjectFinalizer** finalizer_ptr);
1290 :
1291 : // Used by mjsunit tests to force d8 to wait for certain things to run.
1292 945 : inline void IncrementWaitCountForTesting() { wait_count_++; }
1293 945 : inline void DecrementWaitCountForTesting() { wait_count_--; }
1294 : inline int GetWaitCountForTesting() { return wait_count_; }
1295 :
1296 : protected:
1297 : explicit Isolate(bool enable_serializer);
1298 : bool IsArrayOrObjectPrototype(Object* object);
1299 :
1300 : private:
1301 : friend struct GlobalState;
1302 : friend struct InitializeGlobalState;
1303 :
1304 : // These fields are accessed through the API, offsets must be kept in sync
1305 : // with v8::internal::Internals (in include/v8.h) constants. This is also
1306 : // verified in Isolate::Init() using runtime checks.
1307 : void* embedder_data_[Internals::kNumIsolateDataSlots];
1308 : Heap heap_;
1309 :
1310 : // The per-process lock should be acquired before the ThreadDataTable is
1311 : // modified.
1312 : class ThreadDataTable {
1313 : public:
1314 : ThreadDataTable();
1315 : ~ThreadDataTable();
1316 :
1317 : PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
1318 : void Insert(PerIsolateThreadData* data);
1319 : void Remove(PerIsolateThreadData* data);
1320 : void RemoveAllThreads(Isolate* isolate);
1321 :
1322 : private:
1323 : PerIsolateThreadData* list_;
1324 : };
1325 :
1326 : // These items form a stack synchronously with threads Enter'ing and Exit'ing
1327 : // the Isolate. The top of the stack points to a thread which is currently
1328 : // running the Isolate. When the stack is empty, the Isolate is considered
1329 : // not entered by any thread and can be Disposed.
1330 : // If the same thread enters the Isolate more than once, the entry_count_
1331 : // is incremented rather then a new item pushed to the stack.
1332 : class EntryStackItem {
1333 : public:
1334 : EntryStackItem(PerIsolateThreadData* previous_thread_data,
1335 : Isolate* previous_isolate,
1336 : EntryStackItem* previous_item)
1337 : : entry_count(1),
1338 : previous_thread_data(previous_thread_data),
1339 : previous_isolate(previous_isolate),
1340 185520 : previous_item(previous_item) { }
1341 :
1342 : int entry_count;
1343 : PerIsolateThreadData* previous_thread_data;
1344 : Isolate* previous_isolate;
1345 : EntryStackItem* previous_item;
1346 :
1347 : private:
1348 : DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
1349 : };
1350 :
1351 : static base::LazyMutex thread_data_table_mutex_;
1352 :
1353 : static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
1354 : static base::Thread::LocalStorageKey isolate_key_;
1355 : static base::Thread::LocalStorageKey thread_id_key_;
1356 : static ThreadDataTable* thread_data_table_;
1357 :
1358 : // A global counter for all generated Isolates, might overflow.
1359 : static base::Atomic32 isolate_counter_;
1360 :
1361 : #if DEBUG
1362 : static base::Atomic32 isolate_key_created_;
1363 : #endif
1364 :
1365 : void Deinit();
1366 :
1367 : static void SetIsolateThreadLocals(Isolate* isolate,
1368 : PerIsolateThreadData* data);
1369 :
1370 : // Find the PerThread for this particular (isolate, thread) combination.
1371 : // If one does not yet exist, allocate a new one.
1372 : PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
1373 :
1374 : // Initializes the current thread to run this Isolate.
1375 : // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1376 : // at the same time, this should be prevented using external locking.
1377 : void Enter();
1378 :
1379 : // Exits the current thread. The previosuly entered Isolate is restored
1380 : // for the thread.
1381 : // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1382 : // at the same time, this should be prevented using external locking.
1383 : void Exit();
1384 :
1385 : void InitializeThreadLocal();
1386 :
1387 : void MarkCompactPrologue(bool is_compacting,
1388 : ThreadLocalTop* archived_thread_data);
1389 : void MarkCompactEpilogue(bool is_compacting,
1390 : ThreadLocalTop* archived_thread_data);
1391 :
1392 : void FillCache();
1393 :
1394 : // Propagate pending exception message to the v8::TryCatch.
1395 : // If there is no external try-catch or message was successfully propagated,
1396 : // then return true.
1397 : bool PropagatePendingExceptionToExternalTryCatch();
1398 :
1399 : void RunMicrotasksInternal();
1400 :
1401 : const char* RAILModeName(RAILMode rail_mode) const {
1402 : switch (rail_mode) {
1403 : case PERFORMANCE_RESPONSE:
1404 : return "RESPONSE";
1405 : case PERFORMANCE_ANIMATION:
1406 : return "ANIMATION";
1407 : case PERFORMANCE_IDLE:
1408 : return "IDLE";
1409 : case PERFORMANCE_LOAD:
1410 : return "LOAD";
1411 : }
1412 : return "";
1413 : }
1414 :
1415 : // TODO(alph): Remove along with the deprecated GetCpuProfiler().
1416 : friend v8::CpuProfiler* v8::Isolate::GetCpuProfiler();
1417 : CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
1418 :
1419 : base::Atomic32 id_;
1420 : EntryStackItem* entry_stack_;
1421 : int stack_trace_nesting_level_;
1422 : StringStream* incomplete_message_;
1423 : Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
1424 : Bootstrapper* bootstrapper_;
1425 : RuntimeProfiler* runtime_profiler_;
1426 : CompilationCache* compilation_cache_;
1427 : Counters* counters_;
1428 : base::RecursiveMutex break_access_;
1429 : Logger* logger_;
1430 : StackGuard stack_guard_;
1431 : StatsTable* stats_table_;
1432 : StubCache* load_stub_cache_;
1433 : StubCache* store_stub_cache_;
1434 : CodeAgingHelper* code_aging_helper_;
1435 : DeoptimizerData* deoptimizer_data_;
1436 : bool deoptimizer_lazy_throw_;
1437 : MaterializedObjectStore* materialized_object_store_;
1438 : ThreadLocalTop thread_local_top_;
1439 : bool capture_stack_trace_for_uncaught_exceptions_;
1440 : int stack_trace_for_uncaught_exceptions_frame_limit_;
1441 : StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
1442 : ContextSlotCache* context_slot_cache_;
1443 : DescriptorLookupCache* descriptor_lookup_cache_;
1444 : HandleScopeData handle_scope_data_;
1445 : HandleScopeImplementer* handle_scope_implementer_;
1446 : UnicodeCache* unicode_cache_;
1447 : AccountingAllocator* allocator_;
1448 : InnerPointerToCodeCache* inner_pointer_to_code_cache_;
1449 : GlobalHandles* global_handles_;
1450 : EternalHandles* eternal_handles_;
1451 : ThreadManager* thread_manager_;
1452 : RuntimeState runtime_state_;
1453 : Builtins builtins_;
1454 : SetupIsolateDelegate* setup_delegate_;
1455 : unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
1456 : unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
1457 : unibrow::Mapping<unibrow::Ecma262Canonicalize>
1458 : regexp_macro_assembler_canonicalize_;
1459 : RegExpStack* regexp_stack_;
1460 : List<int> regexp_indices_;
1461 : DateCache* date_cache_;
1462 : CallInterfaceDescriptorData* call_descriptor_data_;
1463 : AccessCompilerData* access_compiler_data_;
1464 : base::RandomNumberGenerator* random_number_generator_;
1465 : base::AtomicValue<RAILMode> rail_mode_;
1466 : bool promise_hook_or_debug_is_active_;
1467 : PromiseHook promise_hook_;
1468 : HostImportModuleDynamicallyCallback host_import_module_dynamically_callback_;
1469 : base::Mutex rail_mutex_;
1470 : double load_start_time_ms_;
1471 :
1472 : // Whether the isolate has been created for snapshotting.
1473 : bool serializer_enabled_;
1474 :
1475 : // True if fatal error has been signaled for this isolate.
1476 : bool has_fatal_error_;
1477 :
1478 : // True if this isolate was initialized from a snapshot.
1479 : bool initialized_from_snapshot_;
1480 :
1481 : // True if ES2015 tail call elimination feature is enabled.
1482 : bool is_tail_call_elimination_enabled_;
1483 :
1484 : // True if the isolate is in background. This flag is used
1485 : // to prioritize between memory usage and latency.
1486 : bool is_isolate_in_background_;
1487 :
1488 : // Time stamp at initialization.
1489 : double time_millis_at_init_;
1490 :
1491 : #ifdef DEBUG
1492 : // A static array of histogram info for each type.
1493 : HistogramInfo heap_histograms_[LAST_TYPE + 1];
1494 : JSObject::SpillInformation js_spill_information_;
1495 : #endif
1496 :
1497 : Debug* debug_;
1498 : CpuProfiler* cpu_profiler_;
1499 : HeapProfiler* heap_profiler_;
1500 : std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
1501 : FunctionEntryHook function_entry_hook_;
1502 :
1503 : const AstStringConstants* ast_string_constants_;
1504 :
1505 : interpreter::Interpreter* interpreter_;
1506 :
1507 : CompilerDispatcher* compiler_dispatcher_;
1508 :
1509 : typedef std::pair<InterruptCallback, void*> InterruptEntry;
1510 : std::queue<InterruptEntry> api_interrupts_queue_;
1511 :
1512 : #define GLOBAL_BACKING_STORE(type, name, initialvalue) \
1513 : type name##_;
1514 : ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
1515 : #undef GLOBAL_BACKING_STORE
1516 :
1517 : #define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
1518 : type name##_[length];
1519 : ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
1520 : #undef GLOBAL_ARRAY_BACKING_STORE
1521 :
1522 : #ifdef DEBUG
1523 : // This class is huge and has a number of fields controlled by
1524 : // preprocessor defines. Make sure the offsets of these fields agree
1525 : // between compilation units.
1526 : #define ISOLATE_FIELD_OFFSET(type, name, ignored) \
1527 : static const intptr_t name##_debug_offset_;
1528 : ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
1529 : ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
1530 : #undef ISOLATE_FIELD_OFFSET
1531 : #endif
1532 :
1533 : DeferredHandles* deferred_handles_head_;
1534 : OptimizingCompileDispatcher* optimizing_compile_dispatcher_;
1535 :
1536 : // Counts deopt points if deopt_every_n_times is enabled.
1537 : unsigned int stress_deopt_count_;
1538 :
1539 : int next_optimization_id_;
1540 :
1541 : #if TRACE_MAPS
1542 : int next_unique_sfi_id_;
1543 : #endif
1544 :
1545 : // List of callbacks before a Call starts execution.
1546 : List<BeforeCallEnteredCallback> before_call_entered_callbacks_;
1547 :
1548 : // List of callbacks when a Call completes.
1549 : List<CallCompletedCallback> call_completed_callbacks_;
1550 :
1551 : // List of callbacks after microtasks were run.
1552 : List<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
1553 : bool is_running_microtasks_;
1554 :
1555 : v8::Isolate::UseCounterCallback use_counter_callback_;
1556 : BasicBlockProfiler* basic_block_profiler_;
1557 :
1558 : List<Object*> partial_snapshot_cache_;
1559 :
1560 : v8::ArrayBuffer::Allocator* array_buffer_allocator_;
1561 :
1562 : FutexWaitListNode futex_wait_list_node_;
1563 :
1564 : CancelableTaskManager* cancelable_task_manager_;
1565 :
1566 : debug::ConsoleDelegate* console_delegate_ = nullptr;
1567 :
1568 : v8::Isolate::AbortOnUncaughtExceptionCallback
1569 : abort_on_uncaught_exception_callback_;
1570 :
1571 : #ifdef USE_SIMULATOR
1572 : base::Mutex simulator_i_cache_mutex_;
1573 : #endif
1574 :
1575 : bool allow_atomics_wait_;
1576 :
1577 : ManagedObjectFinalizer managed_object_finalizers_list_;
1578 :
1579 : size_t total_regexp_code_generated_;
1580 :
1581 : int wait_count_ = 0;
1582 :
1583 : friend class ExecutionAccess;
1584 : friend class HandleScopeImplementer;
1585 : friend class HeapTester;
1586 : friend class OptimizingCompileDispatcher;
1587 : friend class SweeperThread;
1588 : friend class ThreadManager;
1589 : friend class Simulator;
1590 : friend class StackGuard;
1591 : friend class ThreadId;
1592 : friend class v8::Isolate;
1593 : friend class v8::Locker;
1594 : friend class v8::Unlocker;
1595 : friend class v8::SnapshotCreator;
1596 : friend class ::TestIsolate;
1597 : friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
1598 : friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
1599 : const char*);
1600 :
1601 : DISALLOW_COPY_AND_ASSIGN(Isolate);
1602 : };
1603 :
1604 :
1605 : #undef FIELD_ACCESSOR
1606 : #undef THREAD_LOCAL_TOP_ACCESSOR
1607 :
1608 :
1609 : class PromiseOnStack {
1610 : public:
1611 : PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
1612 28486 : : promise_(promise), prev_(prev) {}
1613 : Handle<JSObject> promise() { return promise_; }
1614 : PromiseOnStack* prev() { return prev_; }
1615 :
1616 : private:
1617 : Handle<JSObject> promise_;
1618 : PromiseOnStack* prev_;
1619 : };
1620 :
1621 :
1622 : // If the GCC version is 4.1.x or 4.2.x an additional field is added to the
1623 : // class as a work around for a bug in the generated code found with these
1624 : // versions of GCC. See V8 issue 122 for details.
1625 : class SaveContext BASE_EMBEDDED {
1626 : public:
1627 : explicit SaveContext(Isolate* isolate);
1628 : ~SaveContext();
1629 :
1630 229547 : Handle<Context> context() { return context_; }
1631 : SaveContext* prev() { return prev_; }
1632 :
1633 : // Returns true if this save context is below a given JavaScript frame.
1634 : bool IsBelowFrame(StandardFrame* frame) {
1635 755891 : return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
1636 : }
1637 :
1638 : private:
1639 : Isolate* const isolate_;
1640 : Handle<Context> context_;
1641 : SaveContext* const prev_;
1642 : Address c_entry_fp_;
1643 : };
1644 :
1645 :
1646 : class AssertNoContextChange BASE_EMBEDDED {
1647 : #ifdef DEBUG
1648 : public:
1649 : explicit AssertNoContextChange(Isolate* isolate);
1650 : ~AssertNoContextChange() {
1651 : DCHECK(isolate_->context() == *context_);
1652 : }
1653 :
1654 : private:
1655 : Isolate* isolate_;
1656 : Handle<Context> context_;
1657 : #else
1658 : public:
1659 : explicit AssertNoContextChange(Isolate* isolate) { }
1660 : #endif
1661 : };
1662 :
1663 :
1664 : class ExecutionAccess BASE_EMBEDDED {
1665 : public:
1666 97082 : explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
1667 : Lock(isolate);
1668 : }
1669 97082 : ~ExecutionAccess() { Unlock(isolate_); }
1670 :
1671 13779276 : static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
1672 13716266 : static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
1673 :
1674 : static bool TryLock(Isolate* isolate) {
1675 : return isolate->break_access()->TryLock();
1676 : }
1677 :
1678 : private:
1679 : Isolate* isolate_;
1680 : };
1681 :
1682 :
1683 : // Support for checking for stack-overflows.
1684 : class StackLimitCheck BASE_EMBEDDED {
1685 : public:
1686 19114683 : explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
1687 :
1688 : // Use this to check for stack-overflows in C++ code.
1689 6733221 : bool HasOverflowed() const {
1690 97031415 : StackGuard* stack_guard = isolate_->stack_guard();
1691 6733221 : return GetCurrentStackPosition() < stack_guard->real_climit();
1692 : }
1693 :
1694 : // Use this to check for interrupt request in C++ code.
1695 1149290 : bool InterruptRequested() {
1696 1149290 : StackGuard* stack_guard = isolate_->stack_guard();
1697 1149290 : return GetCurrentStackPosition() < stack_guard->climit();
1698 : }
1699 :
1700 : // Use this to check for stack-overflow when entering runtime from JS code.
1701 : bool JsHasOverflowed(uintptr_t gap = 0) const;
1702 :
1703 : private:
1704 : Isolate* isolate_;
1705 : };
1706 :
1707 : #define STACK_CHECK(isolate, result_value) \
1708 : do { \
1709 : StackLimitCheck stack_check(isolate); \
1710 : if (stack_check.HasOverflowed()) { \
1711 : isolate->StackOverflow(); \
1712 : return result_value; \
1713 : } \
1714 : } while (false)
1715 :
1716 : // Support for temporarily postponing interrupts. When the outermost
1717 : // postpone scope is left the interrupts will be re-enabled and any
1718 : // interrupts that occurred while in the scope will be taken into
1719 : // account.
1720 : class PostponeInterruptsScope BASE_EMBEDDED {
1721 : public:
1722 : PostponeInterruptsScope(Isolate* isolate,
1723 : int intercept_mask = StackGuard::ALL_INTERRUPTS)
1724 4580157 : : stack_guard_(isolate->stack_guard()),
1725 : intercept_mask_(intercept_mask),
1726 4580157 : intercepted_flags_(0) {
1727 4580157 : stack_guard_->PushPostponeInterruptsScope(this);
1728 : }
1729 :
1730 : ~PostponeInterruptsScope() {
1731 4580154 : stack_guard_->PopPostponeInterruptsScope();
1732 : }
1733 :
1734 : // Find the bottom-most scope that intercepts this interrupt.
1735 : // Return whether the interrupt has been intercepted.
1736 : bool Intercept(StackGuard::InterruptFlag flag);
1737 :
1738 : private:
1739 : StackGuard* stack_guard_;
1740 : int intercept_mask_;
1741 : int intercepted_flags_;
1742 : PostponeInterruptsScope* prev_;
1743 :
1744 : friend class StackGuard;
1745 : };
1746 :
1747 :
1748 : class CodeTracer final : public Malloced {
1749 : public:
1750 0 : explicit CodeTracer(int isolate_id)
1751 : : file_(NULL),
1752 0 : scope_depth_(0) {
1753 0 : if (!ShouldRedirect()) {
1754 0 : file_ = stdout;
1755 0 : return;
1756 : }
1757 :
1758 0 : if (FLAG_redirect_code_traces_to == NULL) {
1759 : SNPrintF(filename_,
1760 : "code-%d-%d.asm",
1761 : base::OS::GetCurrentProcessId(),
1762 0 : isolate_id);
1763 : } else {
1764 0 : StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
1765 : }
1766 :
1767 0 : WriteChars(filename_.start(), "", 0, false);
1768 : }
1769 :
1770 : class Scope {
1771 : public:
1772 0 : explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
1773 0 : ~Scope() { tracer_->CloseFile(); }
1774 :
1775 0 : FILE* file() const { return tracer_->file(); }
1776 :
1777 : private:
1778 : CodeTracer* tracer_;
1779 : };
1780 :
1781 0 : void OpenFile() {
1782 0 : if (!ShouldRedirect()) {
1783 0 : return;
1784 : }
1785 :
1786 0 : if (file_ == NULL) {
1787 0 : file_ = base::OS::FOpen(filename_.start(), "ab");
1788 : }
1789 :
1790 0 : scope_depth_++;
1791 : }
1792 :
1793 0 : void CloseFile() {
1794 0 : if (!ShouldRedirect()) {
1795 0 : return;
1796 : }
1797 :
1798 0 : if (--scope_depth_ == 0) {
1799 0 : fclose(file_);
1800 0 : file_ = NULL;
1801 : }
1802 : }
1803 :
1804 : FILE* file() const { return file_; }
1805 :
1806 : private:
1807 : static bool ShouldRedirect() {
1808 0 : return FLAG_redirect_code_traces;
1809 : }
1810 :
1811 : EmbeddedVector<char, 128> filename_;
1812 : FILE* file_;
1813 : int scope_depth_;
1814 : };
1815 :
1816 : } // namespace internal
1817 : } // namespace v8
1818 :
1819 : #endif // V8_ISOLATE_H_
|