Line | Count | Source |
1 | | #include "env.h" |
2 | | #include "async_wrap.h" |
3 | | #include "base_object-inl.h" |
4 | | #include "debug_utils-inl.h" |
5 | | #include "diagnosticfilename-inl.h" |
6 | | #include "memory_tracker-inl.h" |
7 | | #include "module_wrap.h" |
8 | | #include "node_buffer.h" |
9 | | #include "node_context_data.h" |
10 | | #include "node_contextify.h" |
11 | | #include "node_errors.h" |
12 | | #include "node_internals.h" |
13 | | #include "node_options-inl.h" |
14 | | #include "node_process-inl.h" |
15 | | #include "node_shadow_realm.h" |
16 | | #include "node_snapshotable.h" |
17 | | #include "node_v8_platform-inl.h" |
18 | | #include "node_worker.h" |
19 | | #include "req_wrap-inl.h" |
20 | | #include "stream_base.h" |
21 | | #include "tracing/agent.h" |
22 | | #include "tracing/traced_value.h" |
23 | | #include "util-inl.h" |
24 | | #include "v8-cppgc.h" |
25 | | #include "v8-profiler.h" |
26 | | #include "v8-sandbox.h" // v8::Object::Wrap(), v8::Object::Unwrap() |
27 | | |
28 | | #include <algorithm> |
29 | | #include <atomic> |
30 | | #include <cinttypes> |
31 | | #include <cstdio> |
32 | | #include <iostream> |
33 | | #include <limits> |
34 | | #include <memory> |
35 | | #include <optional> |
36 | | #include <unordered_map> |
37 | | |
38 | | namespace node { |
39 | | |
40 | | using errors::TryCatchScope; |
41 | | using v8::Array; |
42 | | using v8::ArrayBuffer; |
43 | | using v8::BackingStore; |
44 | | using v8::BackingStoreInitializationMode; |
45 | | using v8::Boolean; |
46 | | using v8::Context; |
47 | | using v8::EmbedderGraph; |
48 | | using v8::EscapableHandleScope; |
49 | | using v8::ExternalMemoryAccounter; |
50 | | using v8::Function; |
51 | | using v8::Global; |
52 | | using v8::HandleScope; |
53 | | using v8::HeapProfiler; |
54 | | using v8::HeapSpaceStatistics; |
55 | | using v8::Integer; |
56 | | using v8::Isolate; |
57 | | using v8::JustVoid; |
58 | | using v8::Local; |
59 | | using v8::Maybe; |
60 | | using v8::MaybeLocal; |
61 | | using v8::NewStringType; |
62 | | using v8::Nothing; |
63 | | using v8::Number; |
64 | | using v8::Object; |
65 | | using v8::ObjectTemplate; |
66 | | using v8::Private; |
67 | | using v8::Promise; |
68 | | using v8::PromiseHookType; |
69 | | using v8::Script; |
70 | | using v8::SnapshotCreator; |
71 | | using v8::StackTrace; |
72 | | using v8::String; |
73 | | using v8::Symbol; |
74 | | using v8::TracingController; |
75 | | using v8::TryCatch; |
76 | | using v8::Uint32; |
77 | | using v8::Undefined; |
78 | | using v8::Value; |
79 | | using worker::Worker; |
80 | | |
81 | | int const ContextEmbedderTag::kNodeContextTag = 0x6e6f64; |
82 | | void* const ContextEmbedderTag::kNodeContextTagPtr = const_cast<void*>( |
83 | | static_cast<const void*>(&ContextEmbedderTag::kNodeContextTag)); |
84 | | |
85 | | void AsyncHooks::ResetPromiseHooks(Local<Function> init, |
86 | | Local<Function> before, |
87 | | Local<Function> after, |
88 | 0 | Local<Function> resolve) { |
89 | 0 | js_promise_hooks_[0].Reset(env()->isolate(), init); |
90 | 0 | js_promise_hooks_[1].Reset(env()->isolate(), before); |
91 | 0 | js_promise_hooks_[2].Reset(env()->isolate(), after); |
92 | 0 | js_promise_hooks_[3].Reset(env()->isolate(), resolve); |
93 | 0 | } |
94 | | |
95 | 0 | Local<Array> AsyncHooks::GetPromiseHooks(Isolate* isolate) const { |
96 | 0 | v8::LocalVector<Value> values(isolate, js_promise_hooks_.size()); |
97 | 0 | for (size_t i = 0; i < js_promise_hooks_.size(); ++i) { |
98 | 0 | if (js_promise_hooks_[i].IsEmpty()) { |
99 | 0 | values[i] = Undefined(isolate); |
100 | 0 | } else { |
101 | 0 | values[i] = js_promise_hooks_[i].Get(isolate); |
102 | 0 | } |
103 | 0 | } |
104 | 0 | return Array::New(isolate, values.data(), values.size()); |
105 | 0 | } |
106 | | |
107 | | void Environment::ResetPromiseHooks(Local<Function> init, |
108 | | Local<Function> before, |
109 | | Local<Function> after, |
110 | 0 | Local<Function> resolve) { |
111 | 0 | async_hooks()->ResetPromiseHooks(init, before, after, resolve); |
112 | |
|
113 | 0 | for (auto it = contexts_.begin(); it != contexts_.end(); it++) { |
114 | 0 | if (it->IsEmpty()) { |
115 | 0 | contexts_.erase(it--); |
116 | 0 | continue; |
117 | 0 | } |
118 | 0 | PersistentToLocal::Weak(isolate_, *it) |
119 | 0 | ->SetPromiseHooks(init, before, after, resolve); |
120 | 0 | } |
121 | 0 | } |
122 | | |
123 | | // Remember to keep this code aligned with pushAsyncContext() in JS. |
124 | | void AsyncHooks::push_async_context( |
125 | | double async_id, |
126 | | double trigger_async_id, |
127 | 760k | std::variant<Local<Object>*, Global<Object>*> resource) { |
128 | 760k | std::visit([](auto* ptr) { CHECK_IMPLIES(ptr != nullptr, !ptr->IsEmpty()); },env.cc:auto node::AsyncHooks::push_async_context(double, double, std::__1::variant<v8::Local<v8::Object>*, v8::Global<v8::Object>*>)::$_0::operator()<v8::Local<v8::Object> >(v8::Local<v8::Object>*) const Line | Count | Source | 128 | 760k | std::visit([](auto* ptr) { CHECK_IMPLIES(ptr != nullptr, !ptr->IsEmpty()); }, |
Unexecuted instantiation: env.cc:auto node::AsyncHooks::push_async_context(double, double, std::__1::variant<v8::Local<v8::Object>*, v8::Global<v8::Object>*>)::$_0::operator()<v8::Global<v8::Object> >(v8::Global<v8::Object>*) const |
129 | 760k | resource); |
130 | | |
131 | 760k | if (fields_[kCheck] > 0) { |
132 | 760k | CHECK_GE(async_id, -1); |
133 | 760k | CHECK_GE(trigger_async_id, -1); |
134 | 760k | } |
135 | | |
136 | 760k | uint32_t offset = fields_[kStackLength]; |
137 | 760k | if (offset * 2 >= async_ids_stack_.Length()) grow_async_ids_stack(); |
138 | 760k | async_ids_stack_[2 * offset] = async_id_fields_[kExecutionAsyncId]; |
139 | 760k | async_ids_stack_[2 * offset + 1] = async_id_fields_[kTriggerAsyncId]; |
140 | 760k | fields_[kStackLength] += 1; |
141 | 760k | async_id_fields_[kExecutionAsyncId] = async_id; |
142 | 760k | async_id_fields_[kTriggerAsyncId] = trigger_async_id; |
143 | | |
144 | | #ifdef DEBUG |
145 | | for (uint32_t i = offset; i < native_execution_async_resources_.size(); i++) |
146 | | std::visit([](auto* ptr) { CHECK_NULL(ptr); }, |
147 | | native_execution_async_resources_[i]); |
148 | | #endif |
149 | | |
150 | | // When this call comes from JS (as a way of increasing the stack size), |
151 | | // `resource` will be empty, because JS caches these values anyway. |
152 | | // False positive: https://github.com/cpplint/cpplint/issues/410 |
153 | | // NOLINTNEXTLINE(whitespace/newline) |
154 | 760k | if (std::visit([](auto* ptr) { return ptr != nullptr; }, resource)) {env.cc:auto node::AsyncHooks::push_async_context(double, double, std::__1::variant<v8::Local<v8::Object>*, v8::Global<v8::Object>*>)::$_1::operator()<v8::Local<v8::Object> >(v8::Local<v8::Object>*) const Line | Count | Source | 154 | 760k | if (std::visit([](auto* ptr) { return ptr != nullptr; }, resource)) { |
Unexecuted instantiation: env.cc:auto node::AsyncHooks::push_async_context(double, double, std::__1::variant<v8::Local<v8::Object>*, v8::Global<v8::Object>*>)::$_1::operator()<v8::Global<v8::Object> >(v8::Global<v8::Object>*) const |
155 | 760k | native_execution_async_resources_.resize(offset + 1); |
156 | | // Caveat: This is a v8::Local<>* assignment, we do not keep a v8::Global<>! |
157 | 760k | native_execution_async_resources_[offset] = resource; |
158 | 760k | } |
159 | 760k | } |
160 | | |
161 | | // Remember to keep this code aligned with popAsyncContext() in JS. |
162 | 760k | bool AsyncHooks::pop_async_context(double async_id) { |
163 | | // In case of an exception then this may have already been reset, if the |
164 | | // stack was multiple MakeCallback()'s deep. |
165 | 760k | if (fields_[kStackLength] == 0) [[unlikely]] |
166 | 0 | return false; |
167 | | |
168 | | // Ask for the async_id to be restored as a check that the stack |
169 | | // hasn't been corrupted. |
170 | 760k | if (fields_[kCheck] > 0 && async_id_fields_[kExecutionAsyncId] != async_id) |
171 | 0 | [[unlikely]] { |
172 | 0 | FailWithCorruptedAsyncStack(async_id); |
173 | 0 | } |
174 | | |
175 | 760k | uint32_t offset = fields_[kStackLength] - 1; |
176 | 760k | async_id_fields_[kExecutionAsyncId] = async_ids_stack_[2 * offset]; |
177 | 760k | async_id_fields_[kTriggerAsyncId] = async_ids_stack_[2 * offset + 1]; |
178 | 760k | fields_[kStackLength] = offset; |
179 | | |
180 | 760k | if (offset < native_execution_async_resources_.size() && |
181 | 760k | std::visit([](auto* ptr) { return ptr != nullptr; },env.cc:auto node::AsyncHooks::pop_async_context(double)::$_0::operator()<v8::Local<v8::Object> >(v8::Local<v8::Object>*) const Line | Count | Source | 181 | 760k | std::visit([](auto* ptr) { return ptr != nullptr; }, |
Unexecuted instantiation: env.cc:auto node::AsyncHooks::pop_async_context(double)::$_0::operator()<v8::Global<v8::Object> >(v8::Global<v8::Object>*) const |
182 | 760k | native_execution_async_resources_[offset])) [[likely]] { |
183 | | #ifdef DEBUG |
184 | | for (uint32_t i = offset + 1; i < native_execution_async_resources_.size(); |
185 | | i++) { |
186 | | std::visit([](auto* ptr) { CHECK_NULL(ptr); }, |
187 | | native_execution_async_resources_[i]); |
188 | | } |
189 | | #endif |
190 | 760k | native_execution_async_resources_.resize(offset); |
191 | 760k | native_execution_async_resources_.shrink_to_fit(); |
192 | 760k | } |
193 | | |
194 | 760k | if (js_execution_async_resources()->Length() > offset) [[unlikely]] { |
195 | 0 | HandleScope handle_scope(env()->isolate()); |
196 | 0 | USE(js_execution_async_resources()->Set( |
197 | 0 | env()->context(), |
198 | 0 | env()->length_string(), |
199 | 0 | Integer::NewFromUnsigned(env()->isolate(), offset))); |
200 | 0 | } |
201 | | |
202 | 760k | return fields_[kStackLength] > 0; |
203 | 760k | } |
204 | | |
205 | 179 | void AsyncHooks::clear_async_id_stack() { |
206 | 179 | if (!js_execution_async_resources_.IsEmpty() && env()->can_call_into_js()) { |
207 | 0 | Isolate* isolate = env()->isolate(); |
208 | 0 | HandleScope handle_scope(isolate); |
209 | 0 | USE(PersistentToLocal::Strong(js_execution_async_resources_) |
210 | 0 | ->Set(env()->context(), |
211 | 0 | env()->length_string(), |
212 | 0 | Integer::NewFromUnsigned(isolate, 0))); |
213 | 0 | } |
214 | | |
215 | 179 | native_execution_async_resources_.clear(); |
216 | 179 | native_execution_async_resources_.shrink_to_fit(); |
217 | | |
218 | 179 | async_id_fields_[kExecutionAsyncId] = 0; |
219 | 179 | async_id_fields_[kTriggerAsyncId] = 0; |
220 | 179 | fields_[kStackLength] = 0; |
221 | 179 | } |
222 | | |
223 | 35 | void AsyncHooks::InstallPromiseHooks(Local<Context> ctx) { |
224 | 35 | ctx->SetPromiseHooks(js_promise_hooks_[0].IsEmpty() |
225 | 35 | ? Local<Function>() |
226 | 35 | : PersistentToLocal::Strong(js_promise_hooks_[0]), |
227 | 35 | js_promise_hooks_[1].IsEmpty() |
228 | 35 | ? Local<Function>() |
229 | 35 | : PersistentToLocal::Strong(js_promise_hooks_[1]), |
230 | 35 | js_promise_hooks_[2].IsEmpty() |
231 | 35 | ? Local<Function>() |
232 | 35 | : PersistentToLocal::Strong(js_promise_hooks_[2]), |
233 | 35 | js_promise_hooks_[3].IsEmpty() |
234 | 35 | ? Local<Function>() |
235 | 35 | : PersistentToLocal::Strong(js_promise_hooks_[3])); |
236 | 35 | } |
237 | | |
238 | 70 | void Environment::PurgeTrackedEmptyContexts() { |
239 | 70 | std::erase_if(contexts_, [&](auto&& el) { return el.IsEmpty(); }); |
240 | 70 | } |
241 | | |
242 | 35 | void Environment::TrackContext(Local<Context> context) { |
243 | 35 | PurgeTrackedEmptyContexts(); |
244 | 35 | size_t id = contexts_.size(); |
245 | 35 | contexts_.resize(id + 1); |
246 | 35 | contexts_[id].Reset(isolate_, context); |
247 | 35 | contexts_[id].SetWeak(); |
248 | 35 | } |
249 | | |
250 | 35 | void Environment::UntrackContext(Local<Context> context) { |
251 | 35 | HandleScope handle_scope(isolate_); |
252 | 35 | PurgeTrackedEmptyContexts(); |
253 | 35 | for (auto it = contexts_.begin(); it != contexts_.end(); it++) { |
254 | 35 | if (Local<Context> saved_context = PersistentToLocal::Weak(isolate_, *it); |
255 | 35 | saved_context == context) { |
256 | 35 | it->Reset(); |
257 | 35 | contexts_.erase(it); |
258 | 35 | break; |
259 | 35 | } |
260 | 35 | } |
261 | 35 | } |
262 | | |
263 | 0 | void Environment::TrackShadowRealm(shadow_realm::ShadowRealm* realm) { |
264 | 0 | shadow_realms_.insert(realm); |
265 | 0 | } |
266 | | |
267 | 0 | void Environment::UntrackShadowRealm(shadow_realm::ShadowRealm* realm) { |
268 | 0 | shadow_realms_.erase(realm); |
269 | 0 | } |
270 | | |
271 | | AsyncHooks::DefaultTriggerAsyncIdScope::DefaultTriggerAsyncIdScope( |
272 | | Environment* env, double default_trigger_async_id) |
273 | 0 | : async_hooks_(env->async_hooks()) { |
274 | 0 | if (env->async_hooks()->fields()[AsyncHooks::kCheck] > 0) { |
275 | 0 | CHECK_GE(default_trigger_async_id, 0); |
276 | 0 | } |
277 | | |
278 | 0 | old_default_trigger_async_id_ = |
279 | 0 | async_hooks_->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId]; |
280 | 0 | async_hooks_->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId] = |
281 | 0 | default_trigger_async_id; |
282 | 0 | } |
283 | | |
284 | 0 | AsyncHooks::DefaultTriggerAsyncIdScope::~DefaultTriggerAsyncIdScope() { |
285 | 0 | async_hooks_->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId] = |
286 | 0 | old_default_trigger_async_id_; |
287 | 0 | } |
288 | | |
289 | | AsyncHooks::DefaultTriggerAsyncIdScope::DefaultTriggerAsyncIdScope( |
290 | | AsyncWrap* async_wrap) |
291 | 0 | : DefaultTriggerAsyncIdScope(async_wrap->env(), |
292 | 0 | async_wrap->get_async_id()) {} |
293 | | |
294 | | std::ostream& operator<<(std::ostream& output, |
295 | 0 | const std::vector<SnapshotIndex>& v) { |
296 | 0 | output << "{ "; |
297 | 0 | for (const SnapshotIndex i : v) { |
298 | 0 | output << i << ", "; |
299 | 0 | } |
300 | 0 | output << " }"; |
301 | 0 | return output; |
302 | 0 | } |
303 | | |
304 | | std::ostream& operator<<(std::ostream& output, |
305 | 0 | const IsolateDataSerializeInfo& i) { |
306 | 0 | output << "{\n" |
307 | 0 | << "// -- primitive begins --\n" |
308 | 0 | << i.primitive_values << ",\n" |
309 | 0 | << "// -- primitive ends --\n" |
310 | 0 | << "// -- template_values begins --\n" |
311 | 0 | << i.template_values << ",\n" |
312 | 0 | << "// -- template_values ends --\n" |
313 | 0 | << "}"; |
314 | 0 | return output; |
315 | 0 | } |
316 | | |
317 | 0 | std::ostream& operator<<(std::ostream& output, const SnapshotFlags& flags) { |
318 | 0 | output << "static_cast<SnapshotFlags>(" << static_cast<uint32_t>(flags) |
319 | 0 | << ")"; |
320 | 0 | return output; |
321 | 0 | } |
322 | | |
323 | 0 | std::ostream& operator<<(std::ostream& output, const SnapshotMetadata& i) { |
324 | 0 | output << "{\n" |
325 | 0 | << " " |
326 | 0 | << (i.type == SnapshotMetadata::Type::kDefault |
327 | 0 | ? "SnapshotMetadata::Type::kDefault" |
328 | 0 | : "SnapshotMetadata::Type::kFullyCustomized") |
329 | 0 | << ", // type\n" |
330 | 0 | << " \"" << i.node_version << "\", // node_version\n" |
331 | 0 | << " \"" << i.node_arch << "\", // node_arch\n" |
332 | 0 | << " \"" << i.node_platform << "\", // node_platform\n" |
333 | 0 | << " " << i.flags << ", // flags\n" |
334 | 0 | << "}"; |
335 | 0 | return output; |
336 | 0 | } |
337 | | |
338 | 0 | IsolateDataSerializeInfo IsolateData::Serialize(SnapshotCreator* creator) { |
339 | 0 | Isolate* isolate = creator->GetIsolate(); |
340 | 0 | IsolateDataSerializeInfo info; |
341 | 0 | HandleScope handle_scope(isolate); |
342 | | // XXX(joyeecheung): technically speaking, the indexes here should be |
343 | | // consecutive and we could just return a range instead of an array, |
344 | | // but that's not part of the V8 API contract so we use an array |
345 | | // just to be safe. |
346 | |
|
347 | 0 | #define VP(PropertyName, StringValue) V(Private, PropertyName) |
348 | 0 | #define VY(PropertyName, StringValue) V(Symbol, PropertyName) |
349 | 0 | #define VS(PropertyName, StringValue) V(String, PropertyName) |
350 | 0 | #define VR(PropertyName, TypeName) V(Private, per_realm_##PropertyName) |
351 | 0 | #define V(TypeName, PropertyName) \ |
352 | 0 | info.primitive_values.push_back( \ |
353 | 0 | creator->AddData(PropertyName##_.Get(isolate))); |
354 | 0 | PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP) |
355 | 0 | PER_ISOLATE_SYMBOL_PROPERTIES(VY) |
356 | 0 | PER_ISOLATE_STRING_PROPERTIES(VS) |
357 | 0 | PER_REALM_STRONG_PERSISTENT_VALUES(VR) |
358 | 0 | #undef V |
359 | 0 | #undef VR |
360 | 0 | #undef VY |
361 | 0 | #undef VS |
362 | 0 | #undef VP |
363 | |
|
364 | 0 | info.primitive_values.reserve(info.primitive_values.size() + |
365 | 0 | AsyncWrap::PROVIDERS_LENGTH); |
366 | 0 | for (size_t i = 0; i < AsyncWrap::PROVIDERS_LENGTH; i++) { |
367 | 0 | info.primitive_values.push_back(creator->AddData(async_wrap_provider(i))); |
368 | 0 | } |
369 | 0 | uint32_t id = 0; |
370 | 0 | #define VM(PropertyName) V(PropertyName##_binding_template, ObjectTemplate) |
371 | 0 | #define V(PropertyName, TypeName) \ |
372 | 0 | do { \ |
373 | 0 | Local<TypeName> field = PropertyName(); \ |
374 | 0 | if (!field.IsEmpty()) { \ |
375 | 0 | size_t index = creator->AddData(field); \ |
376 | 0 | info.template_values.push_back({#PropertyName, id, index}); \ |
377 | 0 | } \ |
378 | 0 | id++; \ |
379 | 0 | } while (0); |
380 | 0 | PER_ISOLATE_TEMPLATE_PROPERTIES(V) |
381 | 0 | NODE_BINDINGS_WITH_PER_ISOLATE_INIT(VM) |
382 | 0 | #undef V |
383 | |
|
384 | 0 | return info; |
385 | 0 | } |
386 | | |
387 | 0 | void IsolateData::DeserializeProperties(const IsolateDataSerializeInfo* info) { |
388 | 0 | size_t i = 0; |
389 | |
|
390 | 0 | Isolate::Scope isolate_scope(isolate_); |
391 | 0 | HandleScope handle_scope(isolate_); |
392 | |
|
393 | 0 | if (per_process::enabled_debug_list.enabled(DebugCategory::MKSNAPSHOT)) { |
394 | 0 | fprintf(stderr, "deserializing IsolateDataSerializeInfo...\n"); |
395 | 0 | std::cerr << *info << "\n"; |
396 | 0 | } |
397 | |
|
398 | 0 | #define VP(PropertyName, StringValue) V(Private, PropertyName) |
399 | 0 | #define VY(PropertyName, StringValue) V(Symbol, PropertyName) |
400 | 0 | #define VS(PropertyName, StringValue) V(String, PropertyName) |
401 | 0 | #define VR(PropertyName, TypeName) V(Private, per_realm_##PropertyName) |
402 | 0 | #define V(TypeName, PropertyName) \ |
403 | 0 | do { \ |
404 | 0 | MaybeLocal<TypeName> maybe_field = \ |
405 | 0 | isolate_->GetDataFromSnapshotOnce<TypeName>( \ |
406 | 0 | info->primitive_values[i++]); \ |
407 | 0 | Local<TypeName> field; \ |
408 | 0 | if (!maybe_field.ToLocal(&field)) { \ |
409 | 0 | fprintf(stderr, "Failed to deserialize " #PropertyName "\n"); \ |
410 | 0 | } \ |
411 | 0 | PropertyName##_.Set(isolate_, field); \ |
412 | 0 | } while (0); |
413 | 0 | PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP) |
414 | 0 | PER_ISOLATE_SYMBOL_PROPERTIES(VY) |
415 | 0 | PER_ISOLATE_STRING_PROPERTIES(VS) |
416 | 0 | PER_REALM_STRONG_PERSISTENT_VALUES(VR) |
417 | 0 | #undef V |
418 | 0 | #undef VR |
419 | 0 | #undef VY |
420 | 0 | #undef VS |
421 | 0 | #undef VP |
422 | |
|
423 | 0 | for (size_t j = 0; j < AsyncWrap::PROVIDERS_LENGTH; j++) { |
424 | 0 | MaybeLocal<String> maybe_field = |
425 | 0 | isolate_->GetDataFromSnapshotOnce<String>(info->primitive_values[i++]); |
426 | 0 | Local<String> field; |
427 | 0 | if (!maybe_field.ToLocal(&field)) { |
428 | 0 | fprintf(stderr, "Failed to deserialize AsyncWrap provider %zu\n", j); |
429 | 0 | } |
430 | 0 | async_wrap_providers_[j].Set(isolate_, field); |
431 | 0 | } |
432 | |
|
433 | 0 | const std::vector<PropInfo>& values = info->template_values; |
434 | 0 | i = 0; // index to the array |
435 | 0 | uint32_t id = 0; |
436 | 0 | #define VM(PropertyName) V(PropertyName##_binding_template, ObjectTemplate) |
437 | 0 | #define V(PropertyName, TypeName) \ |
438 | 0 | do { \ |
439 | 0 | if (values.size() > i && id == values[i].id) { \ |
440 | 0 | const PropInfo& d = values[i]; \ |
441 | 0 | DCHECK_EQ(d.name, #PropertyName); \ |
442 | 0 | MaybeLocal<TypeName> maybe_field = \ |
443 | 0 | isolate_->GetDataFromSnapshotOnce<TypeName>(d.index); \ |
444 | 0 | Local<TypeName> field; \ |
445 | 0 | if (!maybe_field.ToLocal(&field)) { \ |
446 | 0 | fprintf(stderr, \ |
447 | 0 | "Failed to deserialize isolate data template " #PropertyName \ |
448 | 0 | "\n"); \ |
449 | 0 | } \ |
450 | 0 | set_##PropertyName(field); \ |
451 | 0 | i++; \ |
452 | 0 | } \ |
453 | 0 | id++; \ |
454 | 0 | } while (0); |
455 | |
|
456 | 0 | PER_ISOLATE_TEMPLATE_PROPERTIES(V); |
457 | 0 | NODE_BINDINGS_WITH_PER_ISOLATE_INIT(VM); |
458 | 0 | #undef V |
459 | 0 | } |
460 | | |
461 | 35 | void IsolateData::CreateProperties() { |
462 | | // Create string and private symbol properties as internalized one byte |
463 | | // strings after the platform is properly initialized. |
464 | | // |
465 | | // Internalized because it makes property lookups a little faster and |
466 | | // because the string is created in the old space straight away. It's going |
467 | | // to end up in the old space sooner or later anyway but now it doesn't go |
468 | | // through v8::Eternal's new space handling first. |
469 | | // |
470 | | // One byte because our strings are ASCII and we can safely skip V8's UTF-8 |
471 | | // decoding step. |
472 | | |
473 | 35 | v8::Isolate::Scope isolate_scope(isolate_); |
474 | 35 | HandleScope handle_scope(isolate_); |
475 | | |
476 | 35 | #define V(PropertyName, StringValue) \ |
477 | 700 | PropertyName##_.Set( \ |
478 | 700 | isolate_, \ |
479 | 700 | Private::New(isolate_, \ |
480 | 700 | String::NewFromOneByte( \ |
481 | 700 | isolate_, \ |
482 | 700 | reinterpret_cast<const uint8_t*>(StringValue), \ |
483 | 700 | NewStringType::kInternalized, \ |
484 | 700 | sizeof(StringValue) - 1) \ |
485 | 700 | .ToLocalChecked())); |
486 | 700 | PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(V) |
487 | 35 | #undef V |
488 | 35 | #define V(PropertyName, TypeName) \ |
489 | 2.10k | per_realm_##PropertyName##_.Set( \ |
490 | 2.10k | isolate_, \ |
491 | 2.10k | Private::New( \ |
492 | 2.10k | isolate_, \ |
493 | 2.10k | String::NewFromOneByte( \ |
494 | 2.10k | isolate_, \ |
495 | 2.10k | reinterpret_cast<const uint8_t*>("per_realm_" #PropertyName), \ |
496 | 2.10k | NewStringType::kInternalized, \ |
497 | 2.10k | sizeof("per_realm_" #PropertyName) - 1) \ |
498 | 2.10k | .ToLocalChecked())); |
499 | 2.10k | PER_REALM_STRONG_PERSISTENT_VALUES(V) |
500 | 35 | #undef V |
501 | 35 | #define V(PropertyName, StringValue) \ |
502 | 770 | PropertyName##_.Set( \ |
503 | 770 | isolate_, \ |
504 | 770 | Symbol::New(isolate_, \ |
505 | 770 | String::NewFromOneByte( \ |
506 | 770 | isolate_, \ |
507 | 770 | reinterpret_cast<const uint8_t*>(StringValue), \ |
508 | 770 | NewStringType::kInternalized, \ |
509 | 770 | sizeof(StringValue) - 1) \ |
510 | 770 | .ToLocalChecked())); |
511 | 770 | PER_ISOLATE_SYMBOL_PROPERTIES(V) |
512 | 35 | #undef V |
513 | 35 | #define V(PropertyName, StringValue) \ |
514 | 10.5k | PropertyName##_.Set( \ |
515 | 10.5k | isolate_, \ |
516 | 10.5k | String::NewFromOneByte(isolate_, \ |
517 | 10.5k | reinterpret_cast<const uint8_t*>(StringValue), \ |
518 | 10.5k | NewStringType::kInternalized, \ |
519 | 10.5k | sizeof(StringValue) - 1) \ |
520 | 10.5k | .ToLocalChecked()); |
521 | 10.5k | PER_ISOLATE_STRING_PROPERTIES(V) |
522 | 35 | #undef V |
523 | | |
524 | | // Create all the provider strings that will be passed to JS. Place them in |
525 | | // an array so the array index matches the PROVIDER id offset. This way the |
526 | | // strings can be retrieved quickly. |
527 | 35 | #define V(Provider) \ |
528 | 2.41k | async_wrap_providers_[AsyncWrap::PROVIDER_ ## Provider].Set( \ |
529 | 2.41k | isolate_, \ |
530 | 2.41k | String::NewFromOneByte( \ |
531 | 2.41k | isolate_, \ |
532 | 2.41k | reinterpret_cast<const uint8_t*>(#Provider), \ |
533 | 2.41k | NewStringType::kInternalized, \ |
534 | 2.41k | sizeof(#Provider) - 1).ToLocalChecked()); |
535 | 2.41k | NODE_ASYNC_PROVIDER_TYPES(V) |
536 | 35 | #undef V |
537 | | |
538 | 35 | Local<ObjectTemplate> templ = ObjectTemplate::New(isolate()); |
539 | 35 | templ->SetInternalFieldCount(BaseObject::kInternalFieldCount); |
540 | 35 | set_binding_data_default_template(templ); |
541 | 35 | binding::CreateInternalBindingTemplates(this); |
542 | | |
543 | 35 | contextify::ContextifyContext::InitializeGlobalTemplates(this); |
544 | 35 | CreateEnvProxyTemplate(this); |
545 | 35 | } |
546 | | |
547 | | // Previously, the general convention of the wrappable layout for cppgc in |
548 | | // the ecosystem is: |
549 | | // [ 0 ] -> embedder id |
550 | | // [ 1 ] -> wrappable instance |
551 | | // Now V8 has deprecated this layout-based tracing enablement, embedders |
552 | | // should simply use v8::Object::Wrap() and v8::Object::Unwrap(). We preserve |
553 | | // this layout only to distinguish internally how the memory of a Node.js |
554 | | // wrapper is managed or whether a wrapper is managed by Node.js. |
555 | | constexpr uint16_t kDefaultCppGCEmbedderID = 0x90de; |
556 | | Mutex IsolateData::isolate_data_mutex_; |
557 | | std::unordered_map<uint16_t, std::unique_ptr<PerIsolateWrapperData>> |
558 | | IsolateData::wrapper_data_map_; |
559 | | |
560 | | IsolateData* IsolateData::CreateIsolateData( |
561 | | Isolate* isolate, |
562 | | uv_loop_t* loop, |
563 | | MultiIsolatePlatform* platform, |
564 | | ArrayBufferAllocator* allocator, |
565 | | const EmbedderSnapshotData* embedder_snapshot_data, |
566 | 35 | std::shared_ptr<PerIsolateOptions> options) { |
567 | 35 | const SnapshotData* snapshot_data = |
568 | 35 | SnapshotData::FromEmbedderWrapper(embedder_snapshot_data); |
569 | 35 | if (options == nullptr) { |
570 | 35 | options = per_process::cli_options->per_isolate->Clone(); |
571 | 35 | } |
572 | 35 | return new IsolateData( |
573 | 35 | isolate, loop, platform, allocator, snapshot_data, options); |
574 | 35 | } |
575 | | |
576 | | IsolateData::IsolateData(Isolate* isolate, |
577 | | uv_loop_t* event_loop, |
578 | | MultiIsolatePlatform* platform, |
579 | | ArrayBufferAllocator* node_allocator, |
580 | | const SnapshotData* snapshot_data, |
581 | | std::shared_ptr<PerIsolateOptions> options) |
582 | 35 | : isolate_(isolate), |
583 | 35 | event_loop_(event_loop), |
584 | 35 | node_allocator_(node_allocator == nullptr ? nullptr |
585 | 35 | : node_allocator->GetImpl()), |
586 | 35 | platform_(platform), |
587 | 35 | snapshot_data_(snapshot_data), |
588 | 35 | options_(std::move(options)) { |
589 | 35 | uint16_t cppgc_id = kDefaultCppGCEmbedderID; |
590 | | // We do not care about overflow since we just want this to be different |
591 | | // from the cppgc id. |
592 | 35 | uint16_t non_cppgc_id = cppgc_id + 1; |
593 | | |
594 | 35 | { |
595 | | // GC could still be run after the IsolateData is destroyed, so we store |
596 | | // the ids in a static map to ensure pointers to them are still valid |
597 | | // then. In practice there should be very few variants of the cppgc id |
598 | | // in one process so the size of this map should be very small. |
599 | 35 | node::Mutex::ScopedLock lock(isolate_data_mutex_); |
600 | 35 | auto it = wrapper_data_map_.find(cppgc_id); |
601 | 35 | if (it == wrapper_data_map_.end()) { |
602 | 35 | auto pair = wrapper_data_map_.emplace( |
603 | 35 | cppgc_id, new PerIsolateWrapperData{cppgc_id, non_cppgc_id}); |
604 | 35 | it = pair.first; |
605 | 35 | } |
606 | 35 | wrapper_data_ = it->second.get(); |
607 | 35 | } |
608 | | |
609 | 35 | if (snapshot_data == nullptr) { |
610 | 35 | CreateProperties(); |
611 | 35 | } else { |
612 | 0 | DeserializeProperties(&snapshot_data->isolate_data_info); |
613 | 0 | } |
614 | 35 | } |
615 | | |
616 | 35 | IsolateData::~IsolateData() {} |
617 | | |
618 | | // Deprecated API, embedders should use v8::Object::Wrap() directly instead. |
619 | | void SetCppgcReference(Isolate* isolate, |
620 | | Local<Object> object, |
621 | 0 | v8::Object::Wrappable* wrappable) { |
622 | 0 | v8::Object::Wrap<v8::CppHeapPointerTag::kDefaultTag>( |
623 | 0 | isolate, object, wrappable); |
624 | 0 | } |
625 | | |
626 | 0 | void IsolateData::MemoryInfo(MemoryTracker* tracker) const { |
627 | 0 | #define V(PropertyName, StringValue) \ |
628 | 0 | tracker->TrackField(#PropertyName, PropertyName()); |
629 | 0 | PER_ISOLATE_SYMBOL_PROPERTIES(V) |
630 | |
|
631 | 0 | PER_ISOLATE_STRING_PROPERTIES(V) |
632 | 0 | #undef V |
633 | |
|
634 | 0 | tracker->TrackField("async_wrap_providers", async_wrap_providers_); |
635 | |
|
636 | 0 | if (node_allocator_ != nullptr) { |
637 | 0 | tracker->TrackFieldWithSize( |
638 | 0 | "node_allocator", sizeof(*node_allocator_), "NodeArrayBufferAllocator"); |
639 | 0 | } |
640 | 0 | tracker->TrackFieldWithSize( |
641 | 0 | "platform", sizeof(*platform_), "MultiIsolatePlatform"); |
642 | | // TODO(joyeecheung): implement MemoryRetainer in the option classes. |
643 | 0 | } |
644 | | |
645 | 0 | void TrackingTraceStateObserver::UpdateTraceCategoryState() { |
646 | 0 | if (!env_->owns_process_state() || !env_->can_call_into_js()) { |
647 | | // Ideally, we’d have a consistent story that treats all threads/Environment |
648 | | // instances equally here. However, tracing is essentially global, and this |
649 | | // callback is called from whichever thread calls `StartTracing()` or |
650 | | // `StopTracing()`. The only way to do this in a threadsafe fashion |
651 | | // seems to be only tracking this from the main thread, and only allowing |
652 | | // these state modifications from the main thread. |
653 | 0 | return; |
654 | 0 | } |
655 | | |
656 | 0 | if (env_->principal_realm() == nullptr) { |
657 | 0 | return; |
658 | 0 | } |
659 | | |
660 | 0 | bool async_hooks_enabled = (*(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED( |
661 | 0 | TRACING_CATEGORY_NODE1(async_hooks)))) != 0; |
662 | |
|
663 | 0 | Isolate* isolate = env_->isolate(); |
664 | 0 | HandleScope handle_scope(isolate); |
665 | 0 | Local<Function> cb = env_->trace_category_state_function(); |
666 | 0 | if (cb.IsEmpty()) |
667 | 0 | return; |
668 | 0 | TryCatchScope try_catch(env_); |
669 | 0 | try_catch.SetVerbose(true); |
670 | 0 | Local<Value> args[] = {Boolean::New(isolate, async_hooks_enabled)}; |
671 | 0 | USE(cb->Call(env_->context(), Undefined(isolate), arraysize(args), args)); |
672 | 0 | } |
673 | | |
674 | | void Environment::AssignToContext(Local<v8::Context> context, |
675 | | Realm* realm, |
676 | 35 | const ContextInfo& info) { |
677 | 35 | context->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kEnvironment, |
678 | 35 | this, |
679 | 35 | EmbedderDataTag::kPerContextData); |
680 | 35 | context->SetAlignedPointerInEmbedderData( |
681 | 35 | ContextEmbedderIndex::kRealm, realm, EmbedderDataTag::kPerContextData); |
682 | | |
683 | | // ContextifyContexts will update this to a pointer to the native object. |
684 | 35 | context->SetAlignedPointerInEmbedderData( |
685 | 35 | ContextEmbedderIndex::kContextifyContext, |
686 | 35 | nullptr, |
687 | 35 | EmbedderDataTag::kPerContextData); |
688 | | |
689 | | // This must not be done before other context fields are initialized. |
690 | 35 | ContextEmbedderTag::TagNodeContext(context); |
691 | | |
692 | 35 | #if HAVE_INSPECTOR |
693 | 35 | inspector_agent()->ContextCreated(context, info); |
694 | 35 | #endif // HAVE_INSPECTOR |
695 | | |
696 | 35 | this->async_hooks()->InstallPromiseHooks(context); |
697 | 35 | TrackContext(context); |
698 | 35 | } |
699 | | |
700 | 35 | void Environment::UnassignFromContext(Local<v8::Context> context) { |
701 | 35 | if (!context.IsEmpty()) { |
702 | 35 | context->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kEnvironment, |
703 | 35 | nullptr, |
704 | 35 | EmbedderDataTag::kPerContextData); |
705 | 35 | context->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kRealm, |
706 | 35 | nullptr, |
707 | 35 | EmbedderDataTag::kPerContextData); |
708 | 35 | context->SetAlignedPointerInEmbedderData( |
709 | 35 | ContextEmbedderIndex::kContextifyContext, |
710 | 35 | nullptr, |
711 | 35 | EmbedderDataTag::kPerContextData); |
712 | 35 | } |
713 | 35 | UntrackContext(context); |
714 | 35 | } |
715 | | |
716 | | void Environment::TryLoadAddon( |
717 | | const char* filename, |
718 | | int flags, |
719 | 0 | const std::function<bool(binding::DLib*)>& was_loaded) { |
720 | 0 | loaded_addons_.emplace_back(filename, flags); |
721 | 0 | if (!was_loaded(&loaded_addons_.back())) { |
722 | 0 | loaded_addons_.pop_back(); |
723 | 0 | } |
724 | 0 | } |
725 | | |
726 | 0 | std::string Environment::GetCwd(const std::string& exec_path) { |
727 | 0 | char cwd[PATH_MAX_BYTES]; |
728 | 0 | size_t size = PATH_MAX_BYTES; |
729 | |
|
730 | 0 | if (uv_cwd(cwd, &size) == 0) { |
731 | 0 | CHECK_GT(size, 0); |
732 | 0 | return cwd; |
733 | 0 | } |
734 | | |
735 | | // This can fail if the cwd is deleted. In that case, fall back to |
736 | | // exec_path. |
737 | 0 | return exec_path.substr(0, exec_path.find_last_of(kPathSeparator)); |
738 | 0 | } |
739 | | |
740 | 0 | void Environment::add_refs(int64_t diff) { |
741 | 0 | task_queues_async_refs_ += diff; |
742 | 0 | CHECK_GE(task_queues_async_refs_, 0); |
743 | 0 | if (task_queues_async_refs_ == 0) |
744 | 0 | uv_unref(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
745 | 0 | else |
746 | 0 | uv_ref(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
747 | 0 | } |
748 | | |
749 | 0 | uv_buf_t Environment::allocate_managed_buffer(const size_t suggested_size) { |
750 | 0 | std::unique_ptr<BackingStore> bs = ArrayBuffer::NewBackingStore( |
751 | 0 | isolate(), |
752 | 0 | suggested_size, |
753 | 0 | BackingStoreInitializationMode::kUninitialized); |
754 | 0 | uv_buf_t buf = uv_buf_init(static_cast<char*>(bs->Data()), bs->ByteLength()); |
755 | 0 | released_allocated_buffers_.emplace(buf.base, std::move(bs)); |
756 | 0 | return buf; |
757 | 0 | } |
758 | | |
759 | | std::unique_ptr<BackingStore> Environment::release_managed_buffer( |
760 | 0 | const uv_buf_t& buf) { |
761 | 0 | std::unique_ptr<BackingStore> bs; |
762 | 0 | if (buf.base != nullptr) { |
763 | 0 | auto it = released_allocated_buffers_.find(buf.base); |
764 | 0 | CHECK_NE(it, released_allocated_buffers_.end()); |
765 | 0 | bs = std::move(it->second); |
766 | 0 | released_allocated_buffers_.erase(it); |
767 | 0 | } |
768 | 0 | return bs; |
769 | 0 | } |
770 | | |
771 | 35 | std::string Environment::GetExecPath(const std::vector<std::string>& argv) { |
772 | 35 | char exec_path_buf[2 * PATH_MAX]; |
773 | 35 | size_t exec_path_len = sizeof(exec_path_buf); |
774 | 35 | std::string exec_path; |
775 | 35 | if (uv_exepath(exec_path_buf, &exec_path_len) == 0) { |
776 | 35 | exec_path = std::string(exec_path_buf, exec_path_len); |
777 | 35 | } else if (!argv.empty()) { |
778 | 0 | exec_path = argv[0]; |
779 | 0 | } |
780 | | |
781 | | // On OpenBSD process.execPath will be relative unless we |
782 | | // get the full path before process.execPath is used. |
783 | | #if defined(__OpenBSD__) |
784 | | uv_fs_t req; |
785 | | req.ptr = nullptr; |
786 | | if (0 == |
787 | | uv_fs_realpath(nullptr, &req, exec_path.c_str(), nullptr)) { |
788 | | CHECK_NOT_NULL(req.ptr); |
789 | | exec_path = std::string(static_cast<char*>(req.ptr)); |
790 | | } |
791 | | uv_fs_req_cleanup(&req); |
792 | | #endif |
793 | | |
794 | 35 | return exec_path; |
795 | 35 | } |
796 | | |
797 | | Environment::Environment(IsolateData* isolate_data, |
798 | | Isolate* isolate, |
799 | | const std::vector<std::string>& args, |
800 | | const std::vector<std::string>& exec_args, |
801 | | const EnvSerializeInfo* env_info, |
802 | | EnvironmentFlags::Flags flags, |
803 | | ThreadId thread_id, |
804 | | std::string_view thread_name) |
805 | 35 | : isolate_(isolate), |
806 | 35 | external_memory_accounter_(new ExternalMemoryAccounter()), |
807 | 35 | isolate_data_(isolate_data), |
808 | 35 | async_hooks_(isolate, MAYBE_FIELD_PTR(env_info, async_hooks)), |
809 | 35 | immediate_info_(isolate, MAYBE_FIELD_PTR(env_info, immediate_info)), |
810 | 35 | timeout_info_(isolate_, 1, MAYBE_FIELD_PTR(env_info, timeout_info)), |
811 | 35 | tick_info_(isolate, MAYBE_FIELD_PTR(env_info, tick_info)), |
812 | 35 | timer_base_(uv_now(isolate_data->event_loop())), |
813 | 35 | exec_argv_(exec_args), |
814 | 35 | argv_(args), |
815 | 35 | exec_path_(Environment::GetExecPath(args)), |
816 | 35 | exit_info_( |
817 | 35 | isolate_, kExitInfoFieldCount, MAYBE_FIELD_PTR(env_info, exit_info)), |
818 | 35 | should_abort_on_uncaught_toggle_( |
819 | 35 | isolate_, |
820 | 35 | 1, |
821 | 35 | MAYBE_FIELD_PTR(env_info, should_abort_on_uncaught_toggle)), |
822 | 35 | stream_base_state_(isolate_, |
823 | 35 | StreamBase::kNumStreamBaseStateFields, |
824 | 35 | MAYBE_FIELD_PTR(env_info, stream_base_state)), |
825 | 35 | time_origin_(performance::performance_process_start), |
826 | 35 | time_origin_timestamp_(performance::performance_process_start_timestamp), |
827 | 35 | environment_start_(PERFORMANCE_NOW()), |
828 | 35 | flags_(flags), |
829 | 35 | thread_id_(thread_id.id == static_cast<uint64_t>(-1) |
830 | 35 | ? AllocateEnvironmentThreadId().id |
831 | 35 | : thread_id.id), |
832 | 35 | thread_name_(thread_name) { |
833 | 35 | if (!is_main_thread()) { |
834 | | // If this is a Worker thread, we can always safely use the parent's |
835 | | // Isolate's code cache because of the shared read-only heap. |
836 | 0 | CHECK_NOT_NULL(isolate_data->worker_context()); |
837 | 0 | builtin_loader()->CopySourceAndCodeCacheReferenceFrom( |
838 | 0 | isolate_data->worker_context()->env()->builtin_loader()); |
839 | 35 | } else if (isolate_data->snapshot_data() != nullptr) { |
840 | | // ... otherwise, if a snapshot was provided, use its code cache. |
841 | 0 | size_t cache_size = isolate_data->snapshot_data()->code_cache.size(); |
842 | 0 | per_process::Debug(DebugCategory::CODE_CACHE, |
843 | 0 | "snapshot contains %zu code cache\n", |
844 | 0 | cache_size); |
845 | 0 | if (cache_size > 0) { |
846 | 0 | builtin_loader()->RefreshCodeCache( |
847 | 0 | isolate_data->snapshot_data()->code_cache); |
848 | 0 | } |
849 | 0 | } |
850 | | |
851 | | // Compile builtins eagerly when building the snapshot so that inner functions |
852 | | // of essential builtins that are loaded in the snapshot can have faster first |
853 | | // invocation. |
854 | 35 | if (isolate_data->is_building_snapshot()) { |
855 | 0 | builtin_loader()->SetEagerCompile(); |
856 | 0 | } |
857 | | |
858 | | // We'll be creating new objects so make sure we've entered the context. |
859 | 35 | HandleScope handle_scope(isolate); |
860 | | |
861 | | // Set some flags if only kDefaultFlags was passed. This can make API version |
862 | | // transitions easier for embedders. |
863 | 35 | if (flags_ & EnvironmentFlags::kDefaultFlags) { |
864 | 35 | flags_ = flags_ | |
865 | 35 | EnvironmentFlags::kOwnsProcessState | |
866 | 35 | EnvironmentFlags::kOwnsInspector; |
867 | 35 | } |
868 | | |
869 | | // We create new copies of the per-Environment option sets, so that it is |
870 | | // easier to modify them after Environment creation. The defaults are |
871 | | // part of the per-Isolate option set, for which in turn the defaults are |
872 | | // part of the per-process option set. |
873 | 35 | options_ = std::make_shared<EnvironmentOptions>( |
874 | 35 | *isolate_data->options()->per_env); |
875 | 35 | inspector_host_port_ = std::make_shared<ExclusiveAccess<HostPort>>( |
876 | 35 | options_->debug_options().host_port); |
877 | | |
878 | 35 | set_env_vars(per_process::system_environment); |
879 | | // This should be done after options is created, so that --trace-env can be |
880 | | // checked when parsing NODE_DEBUG_NATIVE. It should also be done after |
881 | | // env_vars() is set so that the parser uses values from env->env_vars() |
882 | | // which may or may not be the system environment variable store. |
883 | 35 | enabled_debug_list_.Parse(this); |
884 | | |
885 | 35 | heap_snapshot_near_heap_limit_ = |
886 | 35 | static_cast<uint32_t>(options_->heap_snapshot_near_heap_limit); |
887 | | |
888 | 35 | if (!(flags_ & EnvironmentFlags::kOwnsProcessState)) { |
889 | 0 | set_abort_on_uncaught_exception(false); |
890 | 0 | } |
891 | | |
892 | 35 | #if HAVE_INSPECTOR |
893 | | // We can only create the inspector agent after having cloned the options. |
894 | 35 | inspector_agent_ = std::make_unique<inspector::Agent>(this); |
895 | 35 | #endif |
896 | | |
897 | 35 | if (tracing::AgentWriterHandle* writer = GetTracingAgentWriter()) { |
898 | 35 | trace_state_observer_ = std::make_unique<TrackingTraceStateObserver>(this); |
899 | 35 | if (TracingController* tracing_controller = writer->GetTracingController()) |
900 | 0 | tracing_controller->AddTraceStateObserver(trace_state_observer_.get()); |
901 | 35 | } |
902 | | |
903 | 35 | destroy_async_id_list_.reserve(512); |
904 | | |
905 | 35 | performance_state_ = std::make_unique<performance::PerformanceState>( |
906 | 35 | isolate, |
907 | 35 | time_origin_, |
908 | 35 | time_origin_timestamp_, |
909 | 35 | MAYBE_FIELD_PTR(env_info, performance_state)); |
910 | | |
911 | 35 | if (*TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED( |
912 | 35 | TRACING_CATEGORY_NODE1(environment)) != 0) { |
913 | 0 | tracing::EnvironmentArgs traced_value(args, exec_args); |
914 | 0 | TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(TRACING_CATEGORY_NODE1(environment), |
915 | 0 | "Environment", |
916 | 0 | this, |
917 | 0 | "args", |
918 | 0 | tracing::CastTracedValue(traced_value)); |
919 | 0 | } |
920 | | |
921 | 35 | if (options_->permission) { |
922 | 0 | permission()->EnablePermissions(); |
923 | | // The process shouldn't be able to neither |
924 | | // spawn/worker nor use addons or enable inspector |
925 | | // unless explicitly allowed by the user |
926 | 0 | if (!options_->allow_addons) { |
927 | 0 | options_->allow_native_addons = false; |
928 | 0 | permission()->Apply(this, {"*"}, permission::PermissionScope::kAddon); |
929 | 0 | } |
930 | 0 | if (!options_->allow_inspector) { |
931 | 0 | flags_ = flags_ | EnvironmentFlags::kNoCreateInspector; |
932 | 0 | permission()->Apply(this, {"*"}, permission::PermissionScope::kInspector); |
933 | 0 | } |
934 | 0 | if (!options_->allow_child_process) { |
935 | 0 | permission()->Apply( |
936 | 0 | this, {"*"}, permission::PermissionScope::kChildProcess); |
937 | 0 | } |
938 | 0 | if (!options_->allow_worker_threads) { |
939 | 0 | permission()->Apply( |
940 | 0 | this, {"*"}, permission::PermissionScope::kWorkerThreads); |
941 | 0 | } |
942 | 0 | if (!options_->allow_wasi) { |
943 | 0 | permission()->Apply(this, {"*"}, permission::PermissionScope::kWASI); |
944 | 0 | } |
945 | | |
946 | | // Implicit allow entrypoint to kFileSystemRead |
947 | 0 | if (!options_->has_eval_string && !options_->force_repl) { |
948 | 0 | std::string first_argv; |
949 | 0 | if (argv_.size() > 1) { |
950 | 0 | first_argv = argv_[1]; |
951 | 0 | } |
952 | | |
953 | | // Also implicit allow preloaded modules to kFileSystemRead |
954 | 0 | if (!options_->preload_cjs_modules.empty()) { |
955 | 0 | for (const std::string& mod : options_->preload_cjs_modules) { |
956 | 0 | options_->allow_fs_read.push_back(mod); |
957 | 0 | } |
958 | 0 | } |
959 | |
|
960 | 0 | if (first_argv != "inspect") { |
961 | 0 | options_->allow_fs_read.push_back(first_argv); |
962 | 0 | } |
963 | 0 | } |
964 | |
|
965 | 0 | if (!options_->allow_fs_read.empty()) { |
966 | 0 | permission()->Apply(this, |
967 | 0 | options_->allow_fs_read, |
968 | 0 | permission::PermissionScope::kFileSystemRead); |
969 | 0 | } |
970 | |
|
971 | 0 | if (!options_->allow_fs_write.empty()) { |
972 | 0 | permission()->Apply(this, |
973 | 0 | options_->allow_fs_write, |
974 | 0 | permission::PermissionScope::kFileSystemWrite); |
975 | 0 | } |
976 | |
|
977 | 0 | if (options_->allow_net) { |
978 | 0 | permission()->Apply(this, {"*"}, permission::PermissionScope::kNet); |
979 | 0 | } |
980 | 0 | } |
981 | 35 | } |
982 | | |
983 | | void Environment::InitializeMainContext(Local<Context> context, |
984 | 35 | const EnvSerializeInfo* env_info) { |
985 | 35 | principal_realm_ = std::make_unique<PrincipalRealm>( |
986 | 35 | this, context, MAYBE_FIELD_PTR(env_info, principal_realm)); |
987 | 35 | if (env_info != nullptr) { |
988 | 0 | DeserializeProperties(env_info); |
989 | 0 | } |
990 | | |
991 | 35 | if (!options_->force_async_hooks_checks) { |
992 | 0 | async_hooks_.no_force_checks(); |
993 | 0 | } |
994 | | |
995 | | // By default, always abort when --abort-on-uncaught-exception was passed. |
996 | 35 | should_abort_on_uncaught_toggle_[0] = 1; |
997 | | |
998 | | // The process is not exiting by default. |
999 | 35 | set_exiting(false); |
1000 | | |
1001 | 35 | performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_ENVIRONMENT, |
1002 | 35 | environment_start_); |
1003 | 35 | performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_NODE_START, |
1004 | 35 | per_process::node_start_time); |
1005 | | |
1006 | 35 | if (per_process::v8_initialized) { |
1007 | 35 | performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_V8_START, |
1008 | 35 | performance::performance_v8_start); |
1009 | 35 | } |
1010 | 35 | } |
1011 | | |
1012 | 35 | Environment::~Environment() { |
1013 | 35 | HandleScope handle_scope(isolate()); |
1014 | 35 | Local<Context> ctx = context(); |
1015 | | |
1016 | 35 | if (Environment** interrupt_data = interrupt_data_.load()) { |
1017 | | // There are pending RequestInterrupt() callbacks. Tell them not to run, |
1018 | | // then force V8 to run interrupts by compiling and running an empty script |
1019 | | // so as not to leak memory. |
1020 | 0 | *interrupt_data = nullptr; |
1021 | |
|
1022 | 0 | Isolate::AllowJavascriptExecutionScope allow_js_here(isolate()); |
1023 | 0 | TryCatch try_catch(isolate()); |
1024 | 0 | Context::Scope context_scope(ctx); |
1025 | |
|
1026 | | #ifdef DEBUG |
1027 | | bool consistency_check = false; |
1028 | | isolate()->RequestInterrupt([](Isolate*, void* data) { |
1029 | | *static_cast<bool*>(data) = true; |
1030 | | }, &consistency_check); |
1031 | | #endif |
1032 | |
|
1033 | 0 | Local<Script> script; |
1034 | 0 | if (Script::Compile(ctx, String::Empty(isolate())).ToLocal(&script)) |
1035 | 0 | USE(script->Run(ctx)); |
1036 | |
|
1037 | 0 | DCHECK(consistency_check); |
1038 | 0 | } |
1039 | | |
1040 | | // FreeEnvironment() should have set this. |
1041 | 35 | CHECK(is_stopping()); |
1042 | | |
1043 | 35 | if (heapsnapshot_near_heap_limit_callback_added_) { |
1044 | 0 | RemoveHeapSnapshotNearHeapLimitCallback(0); |
1045 | 0 | } |
1046 | | |
1047 | 35 | isolate()->GetHeapProfiler()->RemoveBuildEmbedderGraphCallback( |
1048 | 35 | BuildEmbedderGraph, this); |
1049 | | |
1050 | 35 | #if HAVE_INSPECTOR |
1051 | | // Destroy inspector agent before erasing the context. The inspector |
1052 | | // destructor depends on the context still being accessible. |
1053 | 35 | inspector_agent_.reset(); |
1054 | 35 | #endif |
1055 | | |
1056 | | // Sub-realms should have been cleared with Environment's cleanup. |
1057 | 35 | DCHECK_EQ(shadow_realms_.size(), 0); |
1058 | 35 | principal_realm_.reset(); |
1059 | | |
1060 | 35 | if (trace_state_observer_) { |
1061 | 35 | tracing::AgentWriterHandle* writer = GetTracingAgentWriter(); |
1062 | 35 | CHECK_NOT_NULL(writer); |
1063 | 35 | if (TracingController* tracing_controller = writer->GetTracingController()) |
1064 | 0 | tracing_controller->RemoveTraceStateObserver(trace_state_observer_.get()); |
1065 | 35 | } |
1066 | | |
1067 | 35 | TRACE_EVENT_NESTABLE_ASYNC_END0( |
1068 | 35 | TRACING_CATEGORY_NODE1(environment), "Environment", this); |
1069 | | |
1070 | | // Do not unload addons on the main thread. Some addons need to retain memory |
1071 | | // beyond the Environment's lifetime, and unloading them early would break |
1072 | | // them; with Worker threads, we have the opportunity to be stricter. |
1073 | | // Also, since the main thread usually stops just before the process exits, |
1074 | | // this is far less relevant here. |
1075 | 35 | if (!is_main_thread()) { |
1076 | | // Dereference all addons that were loaded into this environment. |
1077 | 0 | for (binding::DLib& addon : loaded_addons_) { |
1078 | 0 | addon.Close(); |
1079 | 0 | } |
1080 | 0 | } |
1081 | | |
1082 | 35 | delete external_memory_accounter_; |
1083 | 35 | if (cpu_profiler_) { |
1084 | 0 | for (auto& it : pending_profiles_) { |
1085 | 0 | cpu_profiler_->Stop(it); |
1086 | 0 | } |
1087 | 0 | cpu_profiler_->Dispose(); |
1088 | 0 | cpu_profiler_ = nullptr; |
1089 | 0 | } |
1090 | 35 | } |
1091 | | |
1092 | 35 | void Environment::InitializeLibuv() { |
1093 | 35 | HandleScope handle_scope(isolate()); |
1094 | 35 | Context::Scope context_scope(context()); |
1095 | | |
1096 | 35 | CHECK_EQ(0, uv_timer_init(event_loop(), timer_handle())); |
1097 | 35 | uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle())); |
1098 | | |
1099 | 35 | CHECK_EQ(0, uv_check_init(event_loop(), immediate_check_handle())); |
1100 | 35 | uv_unref(reinterpret_cast<uv_handle_t*>(immediate_check_handle())); |
1101 | | |
1102 | 35 | CHECK_EQ(0, uv_idle_init(event_loop(), immediate_idle_handle())); |
1103 | | |
1104 | 35 | CHECK_EQ(0, uv_check_start(immediate_check_handle(), CheckImmediate)); |
1105 | | |
1106 | | // Inform V8's CPU profiler when we're idle. The profiler is sampling-based |
1107 | | // but not all samples are created equal; mark the wall clock time spent in |
1108 | | // epoll_wait() and friends so profiling tools can filter it out. The samples |
1109 | | // still end up in v8.log but with state=IDLE rather than state=EXTERNAL. |
1110 | 35 | CHECK_EQ(0, uv_prepare_init(event_loop(), &idle_prepare_handle_)); |
1111 | 35 | CHECK_EQ(0, uv_check_init(event_loop(), &idle_check_handle_)); |
1112 | | |
1113 | 35 | CHECK_EQ(0, uv_async_init( |
1114 | 35 | event_loop(), |
1115 | 35 | &task_queues_async_, |
1116 | 35 | [](uv_async_t* async) { |
1117 | 35 | Environment* env = ContainerOf( |
1118 | 35 | &Environment::task_queues_async_, async); |
1119 | 35 | HandleScope handle_scope(env->isolate()); |
1120 | 35 | Context::Scope context_scope(env->context()); |
1121 | 35 | env->RunAndClearNativeImmediates(); |
1122 | 35 | })); |
1123 | 35 | uv_unref(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_)); |
1124 | 35 | uv_unref(reinterpret_cast<uv_handle_t*>(&idle_check_handle_)); |
1125 | 35 | uv_unref(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
1126 | | |
1127 | 35 | { |
1128 | 35 | Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
1129 | 35 | task_queues_async_initialized_ = true; |
1130 | 35 | if (native_immediates_threadsafe_.size() > 0 || |
1131 | 35 | native_immediates_interrupts_.size() > 0) { |
1132 | 0 | uv_async_send(&task_queues_async_); |
1133 | 0 | } |
1134 | 35 | } |
1135 | | |
1136 | 35 | StartProfilerIdleNotifier(); |
1137 | 35 | env_handle_initialized_ = true; |
1138 | 35 | } |
1139 | | |
1140 | 35 | void Environment::InitializeCompileCache() { |
1141 | 35 | std::string dir_from_env; |
1142 | 35 | if (!credentials::SafeGetenv("NODE_COMPILE_CACHE", &dir_from_env, this) || |
1143 | 35 | dir_from_env.empty()) { |
1144 | 35 | return; |
1145 | 35 | } |
1146 | 0 | std::string portable_env; |
1147 | 0 | bool portable = credentials::SafeGetenv( |
1148 | 0 | "NODE_COMPILE_CACHE_PORTABLE", &portable_env, this) && |
1149 | 0 | !portable_env.empty() && portable_env == "1"; |
1150 | 0 | if (portable) { |
1151 | 0 | Debug(this, |
1152 | 0 | DebugCategory::COMPILE_CACHE, |
1153 | 0 | "[compile cache] using relative path\n"); |
1154 | 0 | } |
1155 | 0 | EnableCompileCache(dir_from_env, |
1156 | 0 | portable ? EnableOption::PORTABLE : EnableOption::DEFAULT); |
1157 | 0 | } |
1158 | | |
1159 | | CompileCacheEnableResult Environment::EnableCompileCache( |
1160 | 0 | const std::string& cache_dir, EnableOption option) { |
1161 | 0 | CompileCacheEnableResult result; |
1162 | 0 | std::string disable_env; |
1163 | 0 | if (credentials::SafeGetenv( |
1164 | 0 | "NODE_DISABLE_COMPILE_CACHE", &disable_env, this)) { |
1165 | 0 | result.status = CompileCacheEnableStatus::DISABLED; |
1166 | 0 | result.message = "Disabled by NODE_DISABLE_COMPILE_CACHE"; |
1167 | 0 | Debug(this, |
1168 | 0 | DebugCategory::COMPILE_CACHE, |
1169 | 0 | "[compile cache] %s.\n", |
1170 | 0 | result.message); |
1171 | 0 | return result; |
1172 | 0 | } |
1173 | | |
1174 | 0 | if (!compile_cache_handler_) { |
1175 | 0 | std::unique_ptr<CompileCacheHandler> handler = |
1176 | 0 | std::make_unique<CompileCacheHandler>(this); |
1177 | 0 | result = handler->Enable(this, cache_dir, option); |
1178 | 0 | if (result.status == CompileCacheEnableStatus::ENABLED) { |
1179 | 0 | compile_cache_handler_ = std::move(handler); |
1180 | 0 | AtExit( |
1181 | 0 | [](void* env) { |
1182 | 0 | static_cast<Environment*>(env)->FlushCompileCache(); |
1183 | 0 | }, |
1184 | 0 | this); |
1185 | 0 | } |
1186 | 0 | if (!result.message.empty()) { |
1187 | 0 | Debug(this, |
1188 | 0 | DebugCategory::COMPILE_CACHE, |
1189 | 0 | "[compile cache] %s\n", |
1190 | 0 | result.message); |
1191 | 0 | } |
1192 | 0 | } else { |
1193 | 0 | result.status = CompileCacheEnableStatus::ALREADY_ENABLED; |
1194 | 0 | result.cache_directory = compile_cache_handler_->cache_dir(); |
1195 | 0 | } |
1196 | 0 | return result; |
1197 | 0 | } |
1198 | | |
1199 | 0 | void Environment::FlushCompileCache() { |
1200 | 0 | if (!compile_cache_handler_ || compile_cache_handler_->cache_dir().empty()) { |
1201 | 0 | return; |
1202 | 0 | } |
1203 | 0 | compile_cache_handler_->Persist(); |
1204 | 0 | } |
1205 | | |
1206 | 35 | void Environment::ExitEnv(StopFlags::Flags flags) { |
1207 | | // Should not access non-thread-safe methods here. |
1208 | 35 | set_stopping(true); |
1209 | | |
1210 | 35 | #if HAVE_INSPECTOR |
1211 | 35 | if (inspector_agent_) { |
1212 | 35 | inspector_agent_->StopIfWaitingForConnect(); |
1213 | 35 | } |
1214 | 35 | #endif |
1215 | | |
1216 | 35 | if ((flags & StopFlags::kDoNotTerminateIsolate) == 0) |
1217 | 35 | isolate_->TerminateExecution(); |
1218 | 35 | SetImmediateThreadsafe([](Environment* env) { |
1219 | 35 | env->set_can_call_into_js(false); |
1220 | 35 | uv_stop(env->event_loop()); |
1221 | 35 | }); |
1222 | 35 | } |
1223 | | |
1224 | 35 | void Environment::ClosePerEnvHandles() { |
1225 | | // If LoadEnvironment and InitializeLibuv are not called, like when building |
1226 | | // snapshots, skip closing the per environment handles. |
1227 | 35 | if (!env_handle_initialized_) { |
1228 | 0 | return; |
1229 | 0 | } |
1230 | | |
1231 | 210 | auto close_and_finish = [&](uv_handle_t* handle) { |
1232 | 210 | CloseHandle(handle, [](uv_handle_t* handle) { |
1233 | | #ifdef DEBUG |
1234 | | memset(handle, 0xab, uv_handle_size(handle->type)); |
1235 | | #endif |
1236 | 210 | }); |
1237 | 210 | }; |
1238 | | |
1239 | 35 | close_and_finish(reinterpret_cast<uv_handle_t*>(timer_handle())); |
1240 | 35 | close_and_finish(reinterpret_cast<uv_handle_t*>(immediate_check_handle())); |
1241 | 35 | close_and_finish(reinterpret_cast<uv_handle_t*>(immediate_idle_handle())); |
1242 | 35 | close_and_finish(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_)); |
1243 | 35 | close_and_finish(reinterpret_cast<uv_handle_t*>(&idle_check_handle_)); |
1244 | 35 | close_and_finish(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
1245 | 35 | } |
1246 | | |
1247 | 70 | void Environment::CleanupHandles() { |
1248 | 70 | { |
1249 | 70 | Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
1250 | 70 | task_queues_async_initialized_ = false; |
1251 | 70 | } |
1252 | | |
1253 | 70 | Isolate::DisallowJavascriptExecutionScope disallow_js(isolate(), |
1254 | 70 | Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE); |
1255 | | |
1256 | 70 | RunAndClearNativeImmediates(true /* skip unrefed SetImmediate()s */); |
1257 | | |
1258 | 70 | for (ReqWrapBase* request : req_wrap_queue_) |
1259 | 0 | request->Cancel(); |
1260 | | |
1261 | 70 | for (HandleWrap* handle : handle_wrap_queue_) |
1262 | 0 | handle->Close(); |
1263 | | |
1264 | 175 | while (handle_cleanup_waiting_ != 0 || |
1265 | 70 | request_waiting_ != 0 || |
1266 | 105 | !handle_wrap_queue_.IsEmpty()) { |
1267 | 105 | uv_run(event_loop(), UV_RUN_ONCE); |
1268 | 105 | } |
1269 | 70 | } |
1270 | | |
1271 | 35 | void Environment::StartProfilerIdleNotifier() { |
1272 | 35 | uv_prepare_start(&idle_prepare_handle_, [](uv_prepare_t* handle) { |
1273 | 0 | Environment* env = ContainerOf(&Environment::idle_prepare_handle_, handle); |
1274 | 0 | env->isolate()->SetIdle(true); |
1275 | 0 | }); |
1276 | 35 | uv_check_start(&idle_check_handle_, [](uv_check_t* handle) { |
1277 | 0 | Environment* env = ContainerOf(&Environment::idle_check_handle_, handle); |
1278 | 0 | env->isolate()->SetIdle(false); |
1279 | 0 | }); |
1280 | 35 | } |
1281 | | |
1282 | 0 | void Environment::PrintSyncTrace() const { |
1283 | 0 | if (!trace_sync_io_) [[likely]] |
1284 | 0 | return; |
1285 | | |
1286 | 0 | HandleScope handle_scope(isolate()); |
1287 | |
|
1288 | 0 | fprintf( |
1289 | 0 | stderr, "(node:%d) WARNING: Detected use of sync API\n", uv_os_getpid()); |
1290 | 0 | PrintStackTrace( |
1291 | 0 | isolate(), |
1292 | 0 | StackTrace::CurrentStackTrace(isolate(), |
1293 | 0 | static_cast<int>(stack_trace_limit()), |
1294 | 0 | StackTrace::kDetailed)); |
1295 | 0 | } |
1296 | | |
1297 | 0 | MaybeLocal<Value> Environment::RunSnapshotSerializeCallback() const { |
1298 | 0 | EscapableHandleScope handle_scope(isolate()); |
1299 | 0 | if (!snapshot_serialize_callback().IsEmpty()) { |
1300 | 0 | Context::Scope context_scope(context()); |
1301 | 0 | return handle_scope.EscapeMaybe(snapshot_serialize_callback()->Call( |
1302 | 0 | context(), v8::Undefined(isolate()), 0, nullptr)); |
1303 | 0 | } |
1304 | 0 | return handle_scope.Escape(Undefined(isolate())); |
1305 | 0 | } |
1306 | | |
1307 | 0 | MaybeLocal<Value> Environment::RunSnapshotDeserializeMain() const { |
1308 | 0 | EscapableHandleScope handle_scope(isolate()); |
1309 | 0 | if (!snapshot_deserialize_main().IsEmpty()) { |
1310 | 0 | Context::Scope context_scope(context()); |
1311 | 0 | return handle_scope.EscapeMaybe(snapshot_deserialize_main()->Call( |
1312 | 0 | context(), v8::Undefined(isolate()), 0, nullptr)); |
1313 | 0 | } |
1314 | 0 | return handle_scope.Escape(Undefined(isolate())); |
1315 | 0 | } |
1316 | | |
1317 | 35 | void Environment::RunCleanup() { |
1318 | 35 | started_cleanup_ = true; |
1319 | 35 | TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "RunCleanup"); |
1320 | 35 | ClosePerEnvHandles(); |
1321 | | // Only BaseObject's cleanups are registered as per-realm cleanup hooks now. |
1322 | | // Defer the BaseObject cleanup after handles are cleaned up. |
1323 | 35 | CleanupHandles(); |
1324 | | |
1325 | 35 | while (!cleanable_queue_.IsEmpty()) { |
1326 | 0 | Cleanable* cleanable = cleanable_queue_.PopFront(); |
1327 | 0 | cleanable->Clean(); |
1328 | 0 | } |
1329 | | |
1330 | 70 | while (!cleanup_queue_.empty() || principal_realm_->PendingCleanup() || |
1331 | 35 | native_immediates_.size() > 0 || |
1332 | 35 | native_immediates_threadsafe_.size() > 0 || |
1333 | 35 | native_immediates_interrupts_.size() > 0) { |
1334 | | // TODO(legendecas): cleanup handles in per-realm cleanup hooks as well. |
1335 | 35 | principal_realm_->RunCleanup(); |
1336 | 35 | cleanup_queue_.Drain(); |
1337 | 35 | CleanupHandles(); |
1338 | 35 | } |
1339 | | |
1340 | 35 | for (const int fd : unmanaged_fds_) { |
1341 | 0 | uv_fs_t close_req; |
1342 | 0 | uv_fs_close(nullptr, &close_req, fd, nullptr); |
1343 | 0 | uv_fs_req_cleanup(&close_req); |
1344 | 0 | } |
1345 | 35 | } |
1346 | | |
1347 | 70 | void Environment::RunAtExitCallbacks() { |
1348 | 70 | TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "AtExit"); |
1349 | 70 | for (ExitCallback at_exit : at_exit_functions_) { |
1350 | 70 | at_exit.cb_(at_exit.arg_); |
1351 | 70 | } |
1352 | 70 | at_exit_functions_.clear(); |
1353 | 70 | } |
1354 | | |
1355 | 70 | void Environment::AtExit(void (*cb)(void* arg), void* arg) { |
1356 | 70 | at_exit_functions_.push_front(ExitCallback{cb, arg}); |
1357 | 70 | } |
1358 | | |
1359 | 0 | Maybe<bool> Environment::CheckUnsettledTopLevelAwait() const { |
1360 | 0 | HandleScope scope(isolate_); |
1361 | 0 | Local<Context> ctx = context(); |
1362 | 0 | Local<Value> value; |
1363 | |
|
1364 | 0 | Local<Value> entry_point_promise; |
1365 | 0 | if (!ctx->Global() |
1366 | 0 | ->GetPrivate(ctx, entry_point_promise_private_symbol()) |
1367 | 0 | .ToLocal(&entry_point_promise)) { |
1368 | 0 | return v8::Nothing<bool>(); |
1369 | 0 | } |
1370 | 0 | if (!entry_point_promise->IsPromise()) { |
1371 | 0 | return v8::Just(true); |
1372 | 0 | } |
1373 | 0 | if (entry_point_promise.As<Promise>()->State() != |
1374 | 0 | Promise::PromiseState::kPending) { |
1375 | 0 | return v8::Just(true); |
1376 | 0 | } |
1377 | | |
1378 | 0 | if (!ctx->Global() |
1379 | 0 | ->GetPrivate(ctx, entry_point_module_private_symbol()) |
1380 | 0 | .ToLocal(&value)) { |
1381 | 0 | return v8::Nothing<bool>(); |
1382 | 0 | } |
1383 | 0 | if (!value->IsObject()) { |
1384 | 0 | return v8::Just(true); |
1385 | 0 | } |
1386 | 0 | Local<Object> object = value.As<Object>(); |
1387 | 0 | CHECK(BaseObject::IsBaseObject(isolate_data_, object)); |
1388 | 0 | CHECK_EQ(object->InternalFieldCount(), |
1389 | 0 | loader::ModuleWrap::kInternalFieldCount); |
1390 | 0 | auto* wrap = BaseObject::FromJSObject<loader::ModuleWrap>(object); |
1391 | 0 | return wrap->CheckUnsettledTopLevelAwait(); |
1392 | 0 | } |
1393 | | |
1394 | 70 | void Environment::RunAndClearInterrupts() { |
1395 | 70 | while (native_immediates_interrupts_.size() > 0) { |
1396 | 0 | NativeImmediateQueue queue; |
1397 | 0 | { |
1398 | 0 | Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
1399 | 0 | queue.ConcatMove(std::move(native_immediates_interrupts_)); |
1400 | 0 | } |
1401 | 0 | DebugSealHandleScope seal_handle_scope(isolate()); |
1402 | |
|
1403 | 0 | while (auto head = queue.Shift()) |
1404 | 0 | head->Call(this); |
1405 | 0 | } |
1406 | 70 | } |
1407 | | |
1408 | 70 | void Environment::RunAndClearNativeImmediates(bool only_refed) { |
1409 | 70 | TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), |
1410 | 70 | "RunAndClearNativeImmediates"); |
1411 | 70 | HandleScope handle_scope(isolate_); |
1412 | | // In case the Isolate is no longer accessible just use an empty Local. This |
1413 | | // is not an issue for InternalCallbackScope as this case is already handled |
1414 | | // in its constructor but we avoid calls into v8 which can crash the process |
1415 | | // in debug builds. |
1416 | 70 | Local<Object> obj = |
1417 | 70 | can_call_into_js() ? Object::New(isolate_) : Local<Object>(); |
1418 | 70 | InternalCallbackScope cb_scope(this, obj, {0, 0}); |
1419 | | |
1420 | 70 | size_t ref_count = 0; |
1421 | | |
1422 | | // Handle interrupts first. These functions are not allowed to throw |
1423 | | // exceptions, so we do not need to handle that. |
1424 | 70 | RunAndClearInterrupts(); |
1425 | | |
1426 | 140 | auto drain_list = [&](NativeImmediateQueue* queue) { |
1427 | 140 | TryCatchScope try_catch(this); |
1428 | 140 | DebugSealHandleScope seal_handle_scope(isolate()); |
1429 | 175 | while (auto head = queue->Shift()) { |
1430 | 35 | bool is_refed = head->flags() & CallbackFlags::kRefed; |
1431 | 35 | if (is_refed) |
1432 | 35 | ref_count++; |
1433 | | |
1434 | 35 | if (is_refed || !only_refed) |
1435 | 35 | head->Call(this); |
1436 | | |
1437 | 35 | head.reset(); // Destroy now so that this is also observed by try_catch. |
1438 | | |
1439 | 35 | if (try_catch.HasCaught()) [[unlikely]] { |
1440 | 0 | if (!try_catch.HasTerminated() && can_call_into_js()) |
1441 | 0 | errors::TriggerUncaughtException(isolate(), try_catch); |
1442 | |
|
1443 | 0 | return true; |
1444 | 0 | } |
1445 | 35 | } |
1446 | 140 | return false; |
1447 | 140 | }; |
1448 | 70 | while (drain_list(&native_immediates_)) {} |
1449 | | |
1450 | 70 | immediate_info()->ref_count_dec(ref_count); |
1451 | | |
1452 | 70 | if (immediate_info()->ref_count() == 0) |
1453 | 70 | ToggleImmediateRef(false); |
1454 | | |
1455 | | // It is safe to check .size() first, because there is a causal relationship |
1456 | | // between pushes to the threadsafe immediate list and this function being |
1457 | | // called. For the common case, it's worth checking the size first before |
1458 | | // establishing a mutex lock. |
1459 | | // This is intentionally placed after the `ref_count` handling, because when |
1460 | | // refed threadsafe immediates are created, they are not counted towards the |
1461 | | // count in immediate_info() either. |
1462 | 70 | NativeImmediateQueue threadsafe_immediates; |
1463 | 70 | if (native_immediates_threadsafe_.size() > 0) { |
1464 | 35 | Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
1465 | 35 | threadsafe_immediates.ConcatMove(std::move(native_immediates_threadsafe_)); |
1466 | 35 | } |
1467 | 70 | while (drain_list(&threadsafe_immediates)) {} |
1468 | 70 | } |
1469 | | |
1470 | 0 | void Environment::RequestInterruptFromV8() { |
1471 | | // The Isolate may outlive the Environment, so some logic to handle the |
1472 | | // situation in which the Environment is destroyed before the handler runs |
1473 | | // is required. |
1474 | | |
1475 | | // We allocate a new pointer to a pointer to this Environment instance, and |
1476 | | // try to set it as interrupt_data_. If interrupt_data_ was already set, then |
1477 | | // callbacks are already scheduled to run and we can delete our own pointer |
1478 | | // and just return. If it was nullptr previously, the Environment** is stored; |
1479 | | // ~Environment sets the Environment* contained in it to nullptr, so that |
1480 | | // the callback can check whether ~Environment has already run and it is thus |
1481 | | // not safe to access the Environment instance itself. |
1482 | 0 | Environment** interrupt_data = new Environment*(this); |
1483 | 0 | Environment** dummy = nullptr; |
1484 | 0 | if (!interrupt_data_.compare_exchange_strong(dummy, interrupt_data)) { |
1485 | 0 | delete interrupt_data; |
1486 | 0 | return; // Already scheduled. |
1487 | 0 | } |
1488 | | |
1489 | 0 | isolate()->RequestInterrupt([](Isolate* isolate, void* data) { |
1490 | 0 | std::unique_ptr<Environment*> env_ptr { static_cast<Environment**>(data) }; |
1491 | 0 | Environment* env = *env_ptr; |
1492 | 0 | if (env == nullptr) { |
1493 | | // The Environment has already been destroyed. That should be okay; any |
1494 | | // callback added before the Environment shuts down would have been |
1495 | | // handled during cleanup. |
1496 | 0 | return; |
1497 | 0 | } |
1498 | 0 | env->interrupt_data_.store(nullptr); |
1499 | 0 | env->RunAndClearInterrupts(); |
1500 | 0 | }, interrupt_data); |
1501 | 0 | } |
1502 | | |
1503 | 0 | void Environment::ScheduleTimer(int64_t duration_ms) { |
1504 | 0 | if (started_cleanup_) return; |
1505 | 0 | uv_timer_start(timer_handle(), RunTimers, duration_ms, 0); |
1506 | 0 | } |
1507 | | |
1508 | 0 | void Environment::ToggleTimerRef(bool ref) { |
1509 | 0 | if (started_cleanup_) return; |
1510 | | |
1511 | 0 | if (ref) { |
1512 | 0 | uv_ref(reinterpret_cast<uv_handle_t*>(timer_handle())); |
1513 | 0 | } else { |
1514 | 0 | uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle())); |
1515 | 0 | } |
1516 | 0 | } |
1517 | | |
1518 | 0 | void Environment::RunTimers(uv_timer_t* handle) { |
1519 | 0 | Environment* env = Environment::from_timer_handle(handle); |
1520 | 0 | TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "RunTimers"); |
1521 | |
|
1522 | 0 | if (!env->can_call_into_js()) |
1523 | 0 | return; |
1524 | | |
1525 | 0 | HandleScope handle_scope(env->isolate()); |
1526 | 0 | Context::Scope context_scope(env->context()); |
1527 | |
|
1528 | 0 | Local<Object> process = env->process_object(); |
1529 | 0 | InternalCallbackScope scope(env, process, {0, 0}); |
1530 | |
|
1531 | 0 | Local<Function> cb = env->timers_callback_function(); |
1532 | 0 | MaybeLocal<Value> ret; |
1533 | 0 | Local<Value> arg = env->GetNow(); |
1534 | | // This code will loop until all currently due timers will process. It is |
1535 | | // impossible for us to end up in an infinite loop due to how the JS-side |
1536 | | // is structured. |
1537 | 0 | do { |
1538 | 0 | TryCatchScope try_catch(env); |
1539 | 0 | try_catch.SetVerbose(true); |
1540 | 0 | ret = cb->Call(env->context(), process, 1, &arg); |
1541 | 0 | } while (ret.IsEmpty() && env->can_call_into_js()); |
1542 | | |
1543 | | // NOTE(apapirovski): If it ever becomes possible that `call_into_js` above |
1544 | | // is reset back to `true` after being previously set to `false` then this |
1545 | | // code becomes invalid and needs to be rewritten. Otherwise catastrophic |
1546 | | // timers corruption will occur and all timers behaviour will become |
1547 | | // entirely unpredictable. |
1548 | 0 | if (ret.IsEmpty()) |
1549 | 0 | return; |
1550 | | |
1551 | | // To allow for less JS-C++ boundary crossing, the value returned from JS |
1552 | | // serves a few purposes: |
1553 | | // 1. If it's 0, no more timers exist and the handle should be unrefed |
1554 | | // 2. If it's > 0, the value represents the next timer's expiry and there |
1555 | | // is at least one timer remaining that is refed. |
1556 | | // 3. If it's < 0, the absolute value represents the next timer's expiry |
1557 | | // and there are no timers that are refed. |
1558 | 0 | int64_t expiry_ms = |
1559 | 0 | ret.ToLocalChecked()->IntegerValue(env->context()).FromJust(); |
1560 | |
|
1561 | 0 | auto* h = reinterpret_cast<uv_handle_t*>(handle); |
1562 | |
|
1563 | 0 | if (expiry_ms != 0) { |
1564 | 0 | int64_t duration_ms = |
1565 | 0 | llabs(expiry_ms) - (uv_now(env->event_loop()) - env->timer_base()); |
1566 | |
|
1567 | 0 | env->ScheduleTimer(duration_ms > 0 ? duration_ms : 1); |
1568 | |
|
1569 | 0 | if (expiry_ms > 0) |
1570 | 0 | uv_ref(h); |
1571 | 0 | else |
1572 | 0 | uv_unref(h); |
1573 | 0 | } else { |
1574 | 0 | uv_unref(h); |
1575 | 0 | } |
1576 | 0 | } |
1577 | | |
1578 | | |
1579 | 0 | void Environment::CheckImmediate(uv_check_t* handle) { |
1580 | 0 | Environment* env = Environment::from_immediate_check_handle(handle); |
1581 | 0 | TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "CheckImmediate"); |
1582 | |
|
1583 | 0 | HandleScope scope(env->isolate()); |
1584 | 0 | Context::Scope context_scope(env->context()); |
1585 | |
|
1586 | 0 | env->RunAndClearNativeImmediates(); |
1587 | |
|
1588 | 0 | if (env->immediate_info()->count() == 0 || !env->can_call_into_js()) |
1589 | 0 | return; |
1590 | | |
1591 | 0 | do { |
1592 | 0 | MakeCallback(env->isolate(), |
1593 | 0 | env->process_object(), |
1594 | 0 | env->immediate_callback_function(), |
1595 | 0 | 0, |
1596 | 0 | nullptr, |
1597 | 0 | {0, 0}).ToLocalChecked(); |
1598 | 0 | } while (env->immediate_info()->has_outstanding() && env->can_call_into_js()); |
1599 | |
|
1600 | 0 | if (env->immediate_info()->ref_count() == 0) |
1601 | 0 | env->ToggleImmediateRef(false); |
1602 | 0 | } |
1603 | | |
1604 | 70 | void Environment::ToggleImmediateRef(bool ref) { |
1605 | 70 | if (started_cleanup_) return; |
1606 | | |
1607 | 0 | if (ref) { |
1608 | | // Idle handle is needed only to stop the event loop from blocking in poll. |
1609 | 0 | uv_idle_start(immediate_idle_handle(), [](uv_idle_t*){ }); |
1610 | 0 | } else { |
1611 | 0 | uv_idle_stop(immediate_idle_handle()); |
1612 | 0 | } |
1613 | 0 | } |
1614 | | |
1615 | 0 | uint64_t Environment::GetNowUint64() { |
1616 | 0 | uv_update_time(event_loop()); |
1617 | 0 | uint64_t now = uv_now(event_loop()); |
1618 | 0 | CHECK_GE(now, timer_base()); |
1619 | 0 | now -= timer_base(); |
1620 | 0 | return now; |
1621 | 0 | } |
1622 | | |
1623 | 0 | Local<Value> Environment::GetNow() { |
1624 | 0 | uint64_t now = GetNowUint64(); |
1625 | 0 | if (now <= 0xffffffff) |
1626 | 0 | return Integer::NewFromUnsigned(isolate(), static_cast<uint32_t>(now)); |
1627 | 0 | return Number::New(isolate(), static_cast<double>(now)); |
1628 | 0 | } |
1629 | | |
1630 | | Maybe<void> CollectExceptionInfo(Environment* env, |
1631 | | Local<Object> obj, |
1632 | | int errorno, |
1633 | | const char* err_string, |
1634 | | const char* syscall, |
1635 | | const char* message, |
1636 | | const char* path, |
1637 | 0 | const char* dest) { |
1638 | 0 | if (obj->Set(env->context(), |
1639 | 0 | env->errno_string(), |
1640 | 0 | Integer::New(env->isolate(), errorno)) |
1641 | 0 | .IsNothing() || |
1642 | 0 | obj->Set(env->context(), |
1643 | 0 | env->code_string(), |
1644 | 0 | OneByteString(env->isolate(), err_string)) |
1645 | 0 | .IsNothing() || |
1646 | 0 | (message != nullptr && obj->Set(env->context(), |
1647 | 0 | env->message_string(), |
1648 | 0 | OneByteString(env->isolate(), message)) |
1649 | 0 | .IsNothing())) { |
1650 | 0 | return Nothing<void>(); |
1651 | 0 | } |
1652 | | |
1653 | 0 | Local<Value> path_buffer; |
1654 | 0 | if (path != nullptr) { |
1655 | 0 | if (!Buffer::Copy(env->isolate(), path, strlen(path)) |
1656 | 0 | .ToLocal(&path_buffer) || |
1657 | 0 | obj->Set(env->context(), env->path_string(), path_buffer).IsNothing()) { |
1658 | 0 | return Nothing<void>(); |
1659 | 0 | } |
1660 | 0 | } |
1661 | | |
1662 | 0 | Local<Value> dest_buffer; |
1663 | 0 | if (dest != nullptr) { |
1664 | 0 | if (!Buffer::Copy(env->isolate(), dest, strlen(dest)) |
1665 | 0 | .ToLocal(&dest_buffer) || |
1666 | 0 | obj->Set(env->context(), env->dest_string(), dest_buffer).IsNothing()) { |
1667 | 0 | return Nothing<void>(); |
1668 | 0 | } |
1669 | 0 | } |
1670 | | |
1671 | 0 | if (syscall != nullptr) { |
1672 | 0 | if (obj->Set(env->context(), |
1673 | 0 | env->syscall_string(), |
1674 | 0 | OneByteString(env->isolate(), syscall)) |
1675 | 0 | .IsNothing()) { |
1676 | 0 | return Nothing<void>(); |
1677 | 0 | } |
1678 | 0 | } |
1679 | | |
1680 | 0 | return JustVoid(); |
1681 | 0 | } |
1682 | | |
1683 | | Maybe<void> Environment::CollectUVExceptionInfo(Local<Value> object, |
1684 | | int errorno, |
1685 | | const char* syscall, |
1686 | | const char* message, |
1687 | | const char* path, |
1688 | 0 | const char* dest) { |
1689 | 0 | if (!object->IsObject() || errorno == 0) return JustVoid(); |
1690 | | |
1691 | 0 | Local<Object> obj = object.As<Object>(); |
1692 | 0 | const char* err_string = uv_err_name(errorno); |
1693 | |
|
1694 | 0 | if (message == nullptr || message[0] == '\0') { |
1695 | 0 | message = uv_strerror(errorno); |
1696 | 0 | } |
1697 | |
|
1698 | 0 | return CollectExceptionInfo( |
1699 | 0 | this, obj, errorno, err_string, syscall, message, path, dest); |
1700 | 0 | } |
1701 | | |
1702 | | ImmediateInfo::ImmediateInfo(Isolate* isolate, const SerializeInfo* info) |
1703 | 35 | : fields_(isolate, kFieldsCount, MAYBE_FIELD_PTR(info, fields)) {} |
1704 | | |
1705 | | ImmediateInfo::SerializeInfo ImmediateInfo::Serialize( |
1706 | 0 | Local<Context> context, SnapshotCreator* creator) { |
1707 | 0 | return {fields_.Serialize(context, creator)}; |
1708 | 0 | } |
1709 | | |
1710 | 0 | void ImmediateInfo::Deserialize(Local<Context> context) { |
1711 | 0 | fields_.Deserialize(context); |
1712 | 0 | } |
1713 | | |
1714 | | std::ostream& operator<<(std::ostream& output, |
1715 | 0 | const ImmediateInfo::SerializeInfo& i) { |
1716 | 0 | output << "{ " << i.fields << " }"; |
1717 | 0 | return output; |
1718 | 0 | } |
1719 | | |
1720 | 0 | void ImmediateInfo::MemoryInfo(MemoryTracker* tracker) const { |
1721 | 0 | tracker->TrackField("fields", fields_); |
1722 | 0 | } |
1723 | | |
1724 | | TickInfo::SerializeInfo TickInfo::Serialize(Local<Context> context, |
1725 | 0 | SnapshotCreator* creator) { |
1726 | 0 | return {fields_.Serialize(context, creator)}; |
1727 | 0 | } |
1728 | | |
1729 | 0 | void TickInfo::Deserialize(Local<Context> context) { |
1730 | 0 | fields_.Deserialize(context); |
1731 | 0 | } |
1732 | | |
1733 | | std::ostream& operator<<(std::ostream& output, |
1734 | 0 | const TickInfo::SerializeInfo& i) { |
1735 | 0 | output << "{ " << i.fields << " }"; |
1736 | 0 | return output; |
1737 | 0 | } |
1738 | | |
1739 | 0 | void TickInfo::MemoryInfo(MemoryTracker* tracker) const { |
1740 | 0 | tracker->TrackField("fields", fields_); |
1741 | 0 | } |
1742 | | |
1743 | | TickInfo::TickInfo(Isolate* isolate, const SerializeInfo* info) |
1744 | 35 | : fields_( |
1745 | 35 | isolate, kFieldsCount, info == nullptr ? nullptr : &(info->fields)) {} |
1746 | | |
1747 | | AsyncHooks::AsyncHooks(Isolate* isolate, const SerializeInfo* info) |
1748 | 35 | : async_ids_stack_(isolate, 16 * 2, MAYBE_FIELD_PTR(info, async_ids_stack)), |
1749 | 35 | fields_(isolate, kFieldsCount, MAYBE_FIELD_PTR(info, fields)), |
1750 | 35 | async_id_fields_( |
1751 | 35 | isolate, kUidFieldsCount, MAYBE_FIELD_PTR(info, async_id_fields)), |
1752 | 35 | info_(info) { |
1753 | 35 | HandleScope handle_scope(isolate); |
1754 | 35 | if (info == nullptr) { |
1755 | 35 | clear_async_id_stack(); |
1756 | | |
1757 | | // Always perform async_hooks checks, not just when async_hooks is enabled. |
1758 | | // Can be disabled via CLI option --no-force-async-hooks-checks |
1759 | | // See discussion in https://github.com/nodejs/node/pull/15454 |
1760 | | // When removing this, do it by reverting the commit. Otherwise the test |
1761 | | // and flag changes won't be included. |
1762 | 35 | fields_[kCheck] = 1; |
1763 | | |
1764 | | // kDefaultTriggerAsyncId should be -1, this indicates that there is no |
1765 | | // specified default value and it should fallback to the executionAsyncId. |
1766 | | // 0 is not used as the magic value, because that indicates a missing |
1767 | | // context which is different from a default context. |
1768 | 35 | async_id_fields_[AsyncHooks::kDefaultTriggerAsyncId] = -1; |
1769 | | |
1770 | | // kAsyncIdCounter should start at 1 because that'll be the id the execution |
1771 | | // context during bootstrap (code that runs before entering uv_run()). |
1772 | 35 | async_id_fields_[AsyncHooks::kAsyncIdCounter] = 1; |
1773 | 35 | } |
1774 | 35 | } |
1775 | | |
1776 | 0 | void AsyncHooks::Deserialize(Local<Context> context) { |
1777 | 0 | async_ids_stack_.Deserialize(context); |
1778 | 0 | fields_.Deserialize(context); |
1779 | 0 | async_id_fields_.Deserialize(context); |
1780 | |
|
1781 | 0 | Local<Array> js_execution_async_resources; |
1782 | 0 | if (info_->js_execution_async_resources != 0) { |
1783 | 0 | js_execution_async_resources = |
1784 | 0 | context->GetDataFromSnapshotOnce<Array>( |
1785 | 0 | info_->js_execution_async_resources).ToLocalChecked(); |
1786 | 0 | } else { |
1787 | 0 | js_execution_async_resources = Array::New(Isolate::GetCurrent()); |
1788 | 0 | } |
1789 | 0 | js_execution_async_resources_.Reset(Isolate::GetCurrent(), |
1790 | 0 | js_execution_async_resources); |
1791 | | |
1792 | | // The native_execution_async_resources_ field requires v8::Local<> instances |
1793 | | // for async calls whose resources were on the stack as JS objects when they |
1794 | | // were entered. We cannot recreate this here; however, storing these values |
1795 | | // on the JS equivalent gives the same result, so we do that instead. |
1796 | 0 | for (size_t i = 0; i < info_->native_execution_async_resources.size(); ++i) { |
1797 | 0 | if (info_->native_execution_async_resources[i] == SIZE_MAX) |
1798 | 0 | continue; |
1799 | 0 | Local<Object> obj = context->GetDataFromSnapshotOnce<Object>( |
1800 | 0 | info_->native_execution_async_resources[i]) |
1801 | 0 | .ToLocalChecked(); |
1802 | 0 | js_execution_async_resources->Set(context, i, obj).Check(); |
1803 | 0 | } |
1804 | 0 | info_ = nullptr; |
1805 | 0 | } |
1806 | | |
1807 | | std::ostream& operator<<(std::ostream& output, |
1808 | 0 | const AsyncHooks::SerializeInfo& i) { |
1809 | 0 | output << "{\n" |
1810 | 0 | << " " << i.async_ids_stack << ", // async_ids_stack\n" |
1811 | 0 | << " " << i.fields << ", // fields\n" |
1812 | 0 | << " " << i.async_id_fields << ", // async_id_fields\n" |
1813 | 0 | << " " << i.js_execution_async_resources |
1814 | 0 | << ", // js_execution_async_resources\n" |
1815 | 0 | << " " << i.native_execution_async_resources |
1816 | 0 | << ", // native_execution_async_resources\n" |
1817 | 0 | << "}"; |
1818 | 0 | return output; |
1819 | 0 | } |
1820 | | |
1821 | | AsyncHooks::SerializeInfo AsyncHooks::Serialize(Local<Context> context, |
1822 | 0 | SnapshotCreator* creator) { |
1823 | 0 | SerializeInfo info; |
1824 | | // TODO(joyeecheung): some of these probably don't need to be serialized. |
1825 | 0 | info.async_ids_stack = async_ids_stack_.Serialize(context, creator); |
1826 | 0 | info.fields = fields_.Serialize(context, creator); |
1827 | 0 | info.async_id_fields = async_id_fields_.Serialize(context, creator); |
1828 | 0 | if (!js_execution_async_resources_.IsEmpty()) { |
1829 | 0 | info.js_execution_async_resources = creator->AddData( |
1830 | 0 | context, js_execution_async_resources_.Get(Isolate::GetCurrent())); |
1831 | 0 | CHECK_NE(info.js_execution_async_resources, 0); |
1832 | 0 | } else { |
1833 | 0 | info.js_execution_async_resources = 0; |
1834 | 0 | } |
1835 | | |
1836 | 0 | info.native_execution_async_resources.resize( |
1837 | 0 | native_execution_async_resources_.size()); |
1838 | 0 | for (size_t i = 0; i < native_execution_async_resources_.size(); i++) { |
1839 | 0 | auto resource = native_execution_async_resource(i); |
1840 | 0 | info.native_execution_async_resources[i] = |
1841 | 0 | resource.IsEmpty() ? SIZE_MAX : creator->AddData(context, resource); |
1842 | 0 | } |
1843 | | |
1844 | | // At the moment, promise hooks are not supported in the startup snapshot. |
1845 | | // TODO(joyeecheung): support promise hooks in the startup snapshot. |
1846 | 0 | CHECK(js_promise_hooks_[0].IsEmpty()); |
1847 | 0 | CHECK(js_promise_hooks_[1].IsEmpty()); |
1848 | 0 | CHECK(js_promise_hooks_[2].IsEmpty()); |
1849 | 0 | CHECK(js_promise_hooks_[3].IsEmpty()); |
1850 | | |
1851 | 0 | return info; |
1852 | 0 | } |
1853 | | |
1854 | 0 | void AsyncHooks::MemoryInfo(MemoryTracker* tracker) const { |
1855 | 0 | tracker->TrackField("async_ids_stack", async_ids_stack_); |
1856 | 0 | tracker->TrackField("fields", fields_); |
1857 | 0 | tracker->TrackField("async_id_fields", async_id_fields_); |
1858 | 0 | tracker->TrackField("js_promise_hooks", js_promise_hooks_); |
1859 | 0 | } |
1860 | | |
1861 | 0 | void AsyncHooks::grow_async_ids_stack() { |
1862 | 0 | async_ids_stack_.reserve(async_ids_stack_.Length() * 3); |
1863 | |
|
1864 | 0 | env() |
1865 | 0 | ->principal_realm() |
1866 | 0 | ->async_hooks_binding() |
1867 | 0 | ->Set(env()->context(), |
1868 | 0 | env()->async_ids_stack_string(), |
1869 | 0 | async_ids_stack_.GetJSArray()) |
1870 | 0 | .Check(); |
1871 | 0 | } |
1872 | | |
1873 | 0 | void AsyncHooks::FailWithCorruptedAsyncStack(double expected_async_id) { |
1874 | 0 | fprintf(stderr, |
1875 | 0 | "Error: async hook stack has become corrupted (" |
1876 | 0 | "actual: %.f, expected: %.f)\n", |
1877 | 0 | async_id_fields_.GetValue(kExecutionAsyncId), |
1878 | 0 | expected_async_id); |
1879 | 0 | DumpNativeBacktrace(stderr); |
1880 | 0 | DumpJavaScriptBacktrace(stderr); |
1881 | 0 | fflush(stderr); |
1882 | | // TODO(joyeecheung): should this exit code be more specific? |
1883 | 0 | if (!env()->abort_on_uncaught_exception()) Exit(ExitCode::kGenericUserError); |
1884 | 0 | fprintf(stderr, "\n"); |
1885 | 0 | fflush(stderr); |
1886 | 0 | ABORT_NO_BACKTRACE(); |
1887 | 0 | } |
1888 | | |
1889 | 0 | void Environment::Exit(ExitCode exit_code) { |
1890 | 0 | if (options()->trace_exit) { |
1891 | 0 | HandleScope handle_scope(isolate()); |
1892 | 0 | Isolate::DisallowJavascriptExecutionScope disallow_js( |
1893 | 0 | isolate(), Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE); |
1894 | |
|
1895 | 0 | if (is_main_thread()) { |
1896 | 0 | fprintf(stderr, "(node:%d) ", uv_os_getpid()); |
1897 | 0 | } else { |
1898 | 0 | fprintf(stderr, "(node:%d, thread:%" PRIu64 ") ", |
1899 | 0 | uv_os_getpid(), thread_id()); |
1900 | 0 | } |
1901 | |
|
1902 | 0 | fprintf(stderr, |
1903 | 0 | "WARNING: Exited the environment with code %d\n", |
1904 | 0 | static_cast<int>(exit_code)); |
1905 | 0 | PrintStackTrace( |
1906 | 0 | isolate(), |
1907 | 0 | StackTrace::CurrentStackTrace(isolate(), |
1908 | 0 | static_cast<int>(stack_trace_limit()), |
1909 | 0 | StackTrace::kDetailed)); |
1910 | 0 | } |
1911 | 0 | process_exit_handler_(this, exit_code); |
1912 | 0 | } |
1913 | | |
1914 | 35 | void Environment::stop_sub_worker_contexts() { |
1915 | 35 | DCHECK_EQ(Isolate::GetCurrent(), isolate()); |
1916 | | |
1917 | 35 | while (!sub_worker_contexts_.empty()) { |
1918 | 0 | Worker* w = *sub_worker_contexts_.begin(); |
1919 | 0 | remove_sub_worker_context(w); |
1920 | 0 | w->Exit(ExitCode::kGenericUserError); |
1921 | 0 | w->JoinThread(); |
1922 | 0 | } |
1923 | 35 | } |
1924 | | |
1925 | 0 | Environment* Environment::worker_parent_env() const { |
1926 | 0 | if (worker_context() == nullptr) return nullptr; |
1927 | 0 | return worker_context()->env(); |
1928 | 0 | } |
1929 | | |
1930 | 0 | void Environment::AddUnmanagedFd(int fd) { |
1931 | 0 | if (!tracks_unmanaged_fds()) return; |
1932 | 0 | auto result = unmanaged_fds_.insert(fd); |
1933 | 0 | if (!result.second) { |
1934 | 0 | ProcessEmitWarning( |
1935 | 0 | this, "File descriptor %d opened in unmanaged mode twice", fd); |
1936 | 0 | } |
1937 | 0 | } |
1938 | | |
1939 | 0 | void Environment::RemoveUnmanagedFd(int fd) { |
1940 | 0 | if (!tracks_unmanaged_fds()) return; |
1941 | 0 | size_t removed_count = unmanaged_fds_.erase(fd); |
1942 | 0 | if (removed_count == 0) { |
1943 | 0 | ProcessEmitWarning( |
1944 | 0 | this, "File descriptor %d closed but not opened in unmanaged mode", fd); |
1945 | 0 | } |
1946 | 0 | } |
1947 | | |
1948 | 0 | void Environment::PrintInfoForSnapshotIfDebug() { |
1949 | 0 | if (enabled_debug_list()->enabled(DebugCategory::MKSNAPSHOT)) { |
1950 | 0 | fprintf(stderr, "At the exit of the Environment:\n"); |
1951 | 0 | principal_realm()->PrintInfoForSnapshot(); |
1952 | 0 | } |
1953 | 0 | } |
1954 | | |
1955 | 0 | EnvSerializeInfo Environment::Serialize(SnapshotCreator* creator) { |
1956 | 0 | EnvSerializeInfo info; |
1957 | 0 | Local<Context> ctx = context(); |
1958 | |
|
1959 | 0 | info.async_hooks = async_hooks_.Serialize(ctx, creator); |
1960 | 0 | info.immediate_info = immediate_info_.Serialize(ctx, creator); |
1961 | 0 | info.timeout_info = timeout_info_.Serialize(ctx, creator); |
1962 | 0 | info.tick_info = tick_info_.Serialize(ctx, creator); |
1963 | 0 | info.performance_state = performance_state_->Serialize(ctx, creator); |
1964 | 0 | info.exit_info = exit_info_.Serialize(ctx, creator); |
1965 | 0 | info.stream_base_state = stream_base_state_.Serialize(ctx, creator); |
1966 | 0 | info.should_abort_on_uncaught_toggle = |
1967 | 0 | should_abort_on_uncaught_toggle_.Serialize(ctx, creator); |
1968 | |
|
1969 | 0 | info.principal_realm = principal_realm_->Serialize(creator); |
1970 | | // For now we only support serialization of the main context. |
1971 | | // TODO(joyeecheung): support de/serialization of vm contexts. |
1972 | 0 | CHECK_EQ(contexts_.size(), 1); |
1973 | 0 | CHECK_EQ(contexts_[0], context()); |
1974 | 0 | return info; |
1975 | 0 | } |
1976 | | |
1977 | | void Environment::EnqueueDeserializeRequest(DeserializeRequestCallback cb, |
1978 | | Local<Object> holder, |
1979 | | int index, |
1980 | 0 | InternalFieldInfoBase* info) { |
1981 | 0 | DCHECK_IS_SNAPSHOT_SLOT(index); |
1982 | 0 | DeserializeRequest request{cb, {isolate(), holder}, index, info}; |
1983 | 0 | deserialize_requests_.push_back(std::move(request)); |
1984 | 0 | } |
1985 | | |
1986 | 0 | void Environment::RunDeserializeRequests() { |
1987 | 0 | HandleScope scope(isolate()); |
1988 | 0 | Local<Context> ctx = context(); |
1989 | 0 | Isolate* is = isolate(); |
1990 | 0 | while (!deserialize_requests_.empty()) { |
1991 | 0 | DeserializeRequest request(std::move(deserialize_requests_.front())); |
1992 | 0 | deserialize_requests_.pop_front(); |
1993 | 0 | Local<Object> holder = request.holder.Get(is); |
1994 | 0 | request.cb(ctx, holder, request.index, request.info); |
1995 | 0 | request.holder.Reset(); |
1996 | 0 | request.info->Delete(); |
1997 | 0 | } |
1998 | 0 | } |
1999 | | |
2000 | 0 | void Environment::DeserializeProperties(const EnvSerializeInfo* info) { |
2001 | 0 | Local<Context> ctx = context(); |
2002 | |
|
2003 | 0 | if (enabled_debug_list_.enabled(DebugCategory::MKSNAPSHOT)) { |
2004 | 0 | fprintf(stderr, "deserializing EnvSerializeInfo...\n"); |
2005 | 0 | std::cerr << *info << "\n"; |
2006 | 0 | } |
2007 | | |
2008 | | // Deserialize the realm's properties before running the deserialize |
2009 | | // requests as the requests may need to access the realm's properties. |
2010 | 0 | principal_realm_->DeserializeProperties(&info->principal_realm); |
2011 | 0 | RunDeserializeRequests(); |
2012 | |
|
2013 | 0 | async_hooks_.Deserialize(ctx); |
2014 | 0 | immediate_info_.Deserialize(ctx); |
2015 | 0 | timeout_info_.Deserialize(ctx); |
2016 | 0 | tick_info_.Deserialize(ctx); |
2017 | 0 | performance_state_->Deserialize(ctx, time_origin_, time_origin_timestamp_); |
2018 | 0 | exit_info_.Deserialize(ctx); |
2019 | 0 | stream_base_state_.Deserialize(ctx); |
2020 | 0 | should_abort_on_uncaught_toggle_.Deserialize(ctx); |
2021 | 0 | } |
2022 | | |
2023 | | void Environment::BuildEmbedderGraph(Isolate* isolate, |
2024 | | EmbedderGraph* graph, |
2025 | 0 | void* data) { |
2026 | 0 | MemoryTracker tracker(isolate, graph); |
2027 | 0 | auto* env = static_cast<Environment*>(data); |
2028 | | // Start traversing embedder objects from the root Environment object. |
2029 | 0 | tracker.Track(env); |
2030 | 0 | } |
2031 | | |
2032 | 0 | std::optional<uint32_t> GetPromiseId(Environment* env, Local<Promise> promise) { |
2033 | 0 | Local<Value> id_val; |
2034 | 0 | if (!promise->GetPrivate(env->context(), env->promise_trace_id()) |
2035 | 0 | .ToLocal(&id_val) || |
2036 | 0 | !id_val->IsUint32()) { |
2037 | 0 | return std::nullopt; |
2038 | 0 | } |
2039 | 0 | return id_val.As<Uint32>()->Value(); |
2040 | 0 | } |
2041 | | |
2042 | | void Environment::TracePromises(PromiseHookType type, |
2043 | | Local<Promise> promise, |
2044 | 0 | Local<Value> parent) { |
2045 | | // We don't care about the execution of promises, just the |
2046 | | // creation/resolution. |
2047 | 0 | if (type == PromiseHookType::kBefore || type == PromiseHookType::kAfter) { |
2048 | 0 | return; |
2049 | 0 | } |
2050 | 0 | Isolate* isolate = Isolate::GetCurrent(); |
2051 | 0 | Local<Context> context = isolate->GetCurrentContext(); |
2052 | 0 | Environment* env = Environment::GetCurrent(context); |
2053 | 0 | if (env == nullptr) return; |
2054 | | |
2055 | 0 | std::optional<uint32_t> parent_id; |
2056 | 0 | if (!parent.IsEmpty() && parent->IsPromise()) { |
2057 | 0 | parent_id = GetPromiseId(env, parent.As<Promise>()); |
2058 | 0 | } |
2059 | |
|
2060 | 0 | uint32_t id = 0; |
2061 | 0 | std::string action; |
2062 | 0 | if (type == PromiseHookType::kInit) { |
2063 | 0 | id = env->trace_promise_id_counter_++; |
2064 | 0 | promise->SetPrivate( |
2065 | 0 | context, env->promise_trace_id(), Uint32::New(isolate, id)); |
2066 | 0 | action = "created"; |
2067 | 0 | } else if (type == PromiseHookType::kResolve) { |
2068 | 0 | auto opt = GetPromiseId(env, promise); |
2069 | 0 | if (!opt.has_value()) return; |
2070 | 0 | id = opt.value(); |
2071 | 0 | action = "resolved"; |
2072 | 0 | } else { |
2073 | 0 | UNREACHABLE(); |
2074 | 0 | } |
2075 | | |
2076 | 0 | FPrintF(stderr, "[--trace-promises] "); |
2077 | 0 | if (parent_id.has_value()) { |
2078 | 0 | FPrintF(stderr, "promise #%d ", parent_id.value()); |
2079 | 0 | } |
2080 | 0 | FPrintF(stderr, "%s promise #%d\n", action, id); |
2081 | | // TODO(joyeecheung): we can dump the native stack trace too if the |
2082 | | // JS stack trace is empty i.e. it may be resolved on the native side. |
2083 | 0 | PrintCurrentStackTrace(isolate); |
2084 | 0 | } |
2085 | | |
2086 | | size_t Environment::NearHeapLimitCallback(void* data, |
2087 | | size_t current_heap_limit, |
2088 | 0 | size_t initial_heap_limit) { |
2089 | 0 | auto* env = static_cast<Environment*>(data); |
2090 | |
|
2091 | 0 | Debug(env, |
2092 | 0 | DebugCategory::DIAGNOSTICS, |
2093 | 0 | "Invoked NearHeapLimitCallback, processing=%d, " |
2094 | 0 | "current_limit=%" PRIu64 ", " |
2095 | 0 | "initial_limit=%" PRIu64 "\n", |
2096 | 0 | env->is_in_heapsnapshot_heap_limit_callback_, |
2097 | 0 | static_cast<uint64_t>(current_heap_limit), |
2098 | 0 | static_cast<uint64_t>(initial_heap_limit)); |
2099 | |
|
2100 | 0 | size_t max_young_gen_size = env->isolate_data()->max_young_gen_size; |
2101 | 0 | size_t young_gen_size = 0; |
2102 | 0 | size_t old_gen_size = 0; |
2103 | |
|
2104 | 0 | HeapSpaceStatistics stats; |
2105 | 0 | size_t num_heap_spaces = env->isolate()->NumberOfHeapSpaces(); |
2106 | 0 | for (size_t i = 0; i < num_heap_spaces; ++i) { |
2107 | 0 | env->isolate()->GetHeapSpaceStatistics(&stats, i); |
2108 | 0 | if (strcmp(stats.space_name(), "new_space") == 0 || |
2109 | 0 | strcmp(stats.space_name(), "new_large_object_space") == 0) { |
2110 | 0 | young_gen_size += stats.space_used_size(); |
2111 | 0 | } else { |
2112 | 0 | old_gen_size += stats.space_used_size(); |
2113 | 0 | } |
2114 | 0 | } |
2115 | |
|
2116 | 0 | Debug(env, |
2117 | 0 | DebugCategory::DIAGNOSTICS, |
2118 | 0 | "max_young_gen_size=%" PRIu64 ", " |
2119 | 0 | "young_gen_size=%" PRIu64 ", " |
2120 | 0 | "old_gen_size=%" PRIu64 ", " |
2121 | 0 | "total_size=%" PRIu64 "\n", |
2122 | 0 | static_cast<uint64_t>(max_young_gen_size), |
2123 | 0 | static_cast<uint64_t>(young_gen_size), |
2124 | 0 | static_cast<uint64_t>(old_gen_size), |
2125 | 0 | static_cast<uint64_t>(young_gen_size + old_gen_size)); |
2126 | |
|
2127 | 0 | uint64_t available = uv_get_available_memory(); |
2128 | | // TODO(joyeecheung): get a better estimate about the native memory |
2129 | | // usage into the overhead, e.g. based on the count of objects. |
2130 | 0 | uint64_t estimated_overhead = max_young_gen_size; |
2131 | 0 | Debug(env, |
2132 | 0 | DebugCategory::DIAGNOSTICS, |
2133 | 0 | "Estimated available memory=%" PRIu64 ", " |
2134 | 0 | "estimated overhead=%" PRIu64 "\n", |
2135 | 0 | available, |
2136 | 0 | estimated_overhead); |
2137 | | |
2138 | | // This might be hit when the snapshot is being taken in another |
2139 | | // NearHeapLimitCallback invocation. |
2140 | | // When taking the snapshot, objects in the young generation may be |
2141 | | // promoted to the old generation, result in increased heap usage, |
2142 | | // but it should be no more than the young generation size. |
2143 | | // Ideally, this should be as small as possible - the heap limit |
2144 | | // can only be restored when the heap usage falls down below the |
2145 | | // new limit, so in a heap with unbounded growth the isolate |
2146 | | // may eventually crash with this new limit - effectively raising |
2147 | | // the heap limit to the new one. |
2148 | 0 | size_t new_limit = current_heap_limit + max_young_gen_size; |
2149 | 0 | if (env->is_in_heapsnapshot_heap_limit_callback_) { |
2150 | 0 | Debug(env, |
2151 | 0 | DebugCategory::DIAGNOSTICS, |
2152 | 0 | "Not generating snapshots in nested callback. " |
2153 | 0 | "new_limit=%" PRIu64 "\n", |
2154 | 0 | static_cast<uint64_t>(new_limit)); |
2155 | 0 | return new_limit; |
2156 | 0 | } |
2157 | | |
2158 | | // Estimate whether the snapshot is going to use up all the memory |
2159 | | // available to the process. If so, just give up to prevent the system |
2160 | | // from killing the process for a system OOM. |
2161 | 0 | if (estimated_overhead > available) { |
2162 | 0 | Debug(env, |
2163 | 0 | DebugCategory::DIAGNOSTICS, |
2164 | 0 | "Not generating snapshots because it's too risky.\n"); |
2165 | 0 | env->RemoveHeapSnapshotNearHeapLimitCallback(0); |
2166 | | // The new limit must be higher than current_heap_limit or V8 might |
2167 | | // crash. |
2168 | 0 | return new_limit; |
2169 | 0 | } |
2170 | | |
2171 | | // Take the snapshot synchronously. |
2172 | 0 | env->is_in_heapsnapshot_heap_limit_callback_ = true; |
2173 | |
|
2174 | 0 | std::string dir = env->options()->diagnostic_dir; |
2175 | 0 | if (dir.empty()) { |
2176 | 0 | dir = Environment::GetCwd(env->exec_path_); |
2177 | 0 | } |
2178 | 0 | DiagnosticFilename name(env, "Heap", "heapsnapshot"); |
2179 | 0 | std::string filename = dir + kPathSeparator + (*name); |
2180 | |
|
2181 | 0 | Debug(env, DebugCategory::DIAGNOSTICS, "Start generating %s...\n", *name); |
2182 | |
|
2183 | 0 | HeapProfiler::HeapSnapshotOptions options; |
2184 | 0 | options.numerics_mode = HeapProfiler::NumericsMode::kExposeNumericValues; |
2185 | 0 | options.snapshot_mode = HeapProfiler::HeapSnapshotMode::kExposeInternals; |
2186 | 0 | heap::WriteSnapshot(env, filename.c_str(), options); |
2187 | 0 | env->heap_limit_snapshot_taken_ += 1; |
2188 | |
|
2189 | 0 | Debug(env, |
2190 | 0 | DebugCategory::DIAGNOSTICS, |
2191 | 0 | "%" PRIu32 "/%" PRIu32 " snapshots taken.\n", |
2192 | 0 | env->heap_limit_snapshot_taken_, |
2193 | 0 | env->heap_snapshot_near_heap_limit_); |
2194 | | |
2195 | | // Don't take more snapshots than the limit specified. |
2196 | 0 | if (env->heap_limit_snapshot_taken_ == env->heap_snapshot_near_heap_limit_) { |
2197 | 0 | Debug(env, |
2198 | 0 | DebugCategory::DIAGNOSTICS, |
2199 | 0 | "Removing the near heap limit callback"); |
2200 | 0 | env->RemoveHeapSnapshotNearHeapLimitCallback(0); |
2201 | 0 | } |
2202 | |
|
2203 | 0 | FPrintF(stderr, "Wrote snapshot to %s\n", filename.c_str()); |
2204 | | // Tell V8 to reset the heap limit once the heap usage falls down to |
2205 | | // 95% of the initial limit. |
2206 | 0 | env->isolate()->AutomaticallyRestoreInitialHeapLimit(0.95); |
2207 | |
|
2208 | 0 | env->is_in_heapsnapshot_heap_limit_callback_ = false; |
2209 | | |
2210 | | // The new limit must be higher than current_heap_limit or V8 might |
2211 | | // crash. |
2212 | 0 | return new_limit; |
2213 | 0 | } |
2214 | | |
2215 | 0 | inline size_t Environment::SelfSize() const { |
2216 | 0 | size_t size = sizeof(*this); |
2217 | | // Remove non pointer fields that will be tracked in MemoryInfo() |
2218 | | // TODO(joyeecheung): refactor the MemoryTracker interface so |
2219 | | // this can be done for common types within the Track* calls automatically |
2220 | | // if a certain scope is entered. |
2221 | 0 | size -= sizeof(async_hooks_); |
2222 | 0 | size -= sizeof(cleanup_queue_); |
2223 | 0 | size -= sizeof(tick_info_); |
2224 | 0 | size -= sizeof(immediate_info_); |
2225 | 0 | return size; |
2226 | 0 | } |
2227 | | |
2228 | 0 | void Environment::MemoryInfo(MemoryTracker* tracker) const { |
2229 | | // Iterable STLs have their own sizes subtracted from the parent |
2230 | | // by default. |
2231 | 0 | tracker->TrackField("isolate_data", isolate_data_); |
2232 | 0 | tracker->TrackField("destroy_async_id_list", destroy_async_id_list_); |
2233 | 0 | tracker->TrackField("exec_argv", exec_argv_); |
2234 | 0 | tracker->TrackField("exit_info", exit_info_); |
2235 | 0 | tracker->TrackField("should_abort_on_uncaught_toggle", |
2236 | 0 | should_abort_on_uncaught_toggle_); |
2237 | 0 | tracker->TrackField("stream_base_state", stream_base_state_); |
2238 | 0 | tracker->TrackField("cleanup_queue", cleanup_queue_); |
2239 | 0 | tracker->TrackField("async_hooks", async_hooks_); |
2240 | 0 | tracker->TrackField("immediate_info", immediate_info_); |
2241 | 0 | tracker->TrackField("timeout_info", timeout_info_); |
2242 | 0 | tracker->TrackField("tick_info", tick_info_); |
2243 | 0 | tracker->TrackField("principal_realm", principal_realm_); |
2244 | 0 | tracker->TrackField("shadow_realms", shadow_realms_); |
2245 | | |
2246 | | // FIXME(joyeecheung): track other fields in Environment. |
2247 | | // Currently MemoryTracker is unable to track these |
2248 | | // correctly: |
2249 | | // - Internal types that do not implement MemoryRetainer yet |
2250 | | // - STL containers with MemoryRetainer* inside |
2251 | | // - STL containers with numeric types inside that should not have their |
2252 | | // nodes elided e.g. numeric keys in maps. |
2253 | | // We also need to make sure that when we add a non-pointer field as its own |
2254 | | // node, we shift its sizeof() size out of the Environment node. |
2255 | 0 | } |
2256 | | |
2257 | 760k | void Environment::RunWeakRefCleanup() { |
2258 | 760k | isolate()->ClearKeptObjects(); |
2259 | 760k | } |
2260 | | |
2261 | 0 | v8::CpuProfilingResult Environment::StartCpuProfile() { |
2262 | 0 | HandleScope handle_scope(isolate()); |
2263 | 0 | if (!cpu_profiler_) { |
2264 | 0 | cpu_profiler_ = v8::CpuProfiler::New(isolate()); |
2265 | 0 | } |
2266 | 0 | v8::CpuProfilingResult result = cpu_profiler_->Start( |
2267 | 0 | v8::CpuProfilingOptions{v8::CpuProfilingMode::kLeafNodeLineNumbers, |
2268 | 0 | v8::CpuProfilingOptions::kNoSampleLimit}); |
2269 | 0 | if (result.status == v8::CpuProfilingStatus::kStarted) { |
2270 | 0 | pending_profiles_.push_back(result.id); |
2271 | 0 | } |
2272 | 0 | return result; |
2273 | 0 | } |
2274 | | |
2275 | 0 | v8::CpuProfile* Environment::StopCpuProfile(v8::ProfilerId profile_id) { |
2276 | 0 | if (!cpu_profiler_) { |
2277 | 0 | return nullptr; |
2278 | 0 | } |
2279 | 0 | auto it = |
2280 | 0 | std::find(pending_profiles_.begin(), pending_profiles_.end(), profile_id); |
2281 | 0 | if (it == pending_profiles_.end()) { |
2282 | 0 | return nullptr; |
2283 | 0 | } |
2284 | 0 | v8::CpuProfile* profile = cpu_profiler_->Stop(*it); |
2285 | 0 | pending_profiles_.erase(it); |
2286 | 0 | return profile; |
2287 | 0 | } |
2288 | | |
2289 | | } // namespace node |