/src/node/src/inspector_agent.cc
Line | Count | Source (jump to first uncovered line) |
1 | | #include "inspector_agent.h" |
2 | | |
3 | | #include "env-inl.h" |
4 | | #include "inspector/main_thread_interface.h" |
5 | | #include "inspector/node_string.h" |
6 | | #include "inspector/runtime_agent.h" |
7 | | #include "inspector/tracing_agent.h" |
8 | | #include "inspector/worker_agent.h" |
9 | | #include "inspector/worker_inspector.h" |
10 | | #include "inspector_io.h" |
11 | | #include "node/inspector/protocol/Protocol.h" |
12 | | #include "node_errors.h" |
13 | | #include "node_internals.h" |
14 | | #include "node_options-inl.h" |
15 | | #include "node_process-inl.h" |
16 | | #include "node_url.h" |
17 | | #include "permission/permission.h" |
18 | | #include "timer_wrap-inl.h" |
19 | | #include "util-inl.h" |
20 | | #include "v8-inspector.h" |
21 | | #include "v8-platform.h" |
22 | | |
23 | | #include "libplatform/libplatform.h" |
24 | | |
25 | | #ifdef __POSIX__ |
26 | | #include <pthread.h> |
27 | | #include <climits> // PTHREAD_STACK_MIN |
28 | | #endif // __POSIX__ |
29 | | |
30 | | #include <algorithm> |
31 | | #include <cstring> |
32 | | #include <sstream> |
33 | | #include <unordered_map> |
34 | | #include <vector> |
35 | | |
36 | | namespace node { |
37 | | namespace inspector { |
38 | | namespace { |
39 | | using v8::Context; |
40 | | using v8::Function; |
41 | | using v8::HandleScope; |
42 | | using v8::Isolate; |
43 | | using v8::Local; |
44 | | using v8::Message; |
45 | | using v8::Object; |
46 | | using v8::Value; |
47 | | |
48 | | using v8_inspector::StringBuffer; |
49 | | using v8_inspector::StringView; |
50 | | using v8_inspector::V8Inspector; |
51 | | using v8_inspector::V8InspectorClient; |
52 | | |
53 | | #ifdef __POSIX__ |
54 | | static uv_sem_t start_io_thread_semaphore; |
55 | | #endif // __POSIX__ |
56 | | static uv_async_t start_io_thread_async; |
57 | | // This is just an additional check to make sure start_io_thread_async |
58 | | // is not accidentally re-used or used when uninitialized. |
59 | | static std::atomic_bool start_io_thread_async_initialized { false }; |
60 | | // Protects the Agent* stored in start_io_thread_async.data. |
61 | | static Mutex start_io_thread_async_mutex; |
62 | | |
63 | | std::unique_ptr<StringBuffer> ToProtocolString(Isolate* isolate, |
64 | 0 | Local<Value> value) { |
65 | 0 | TwoByteValue buffer(isolate, value); |
66 | 0 | return StringBuffer::create(StringView(*buffer, buffer.length())); |
67 | 0 | } |
68 | | |
69 | | // Called on the main thread. |
70 | 0 | void StartIoThreadAsyncCallback(uv_async_t* handle) { |
71 | 0 | static_cast<Agent*>(handle->data)->StartIoThread(); |
72 | 0 | } |
73 | | |
74 | | |
75 | | #ifdef __POSIX__ |
76 | 0 | static void StartIoThreadWakeup(int signo, siginfo_t* info, void* ucontext) { |
77 | 0 | uv_sem_post(&start_io_thread_semaphore); |
78 | 0 | } |
79 | | |
80 | 134k | inline void* StartIoThreadMain(void* unused) { |
81 | 134k | for (;;) { |
82 | 134k | uv_sem_wait(&start_io_thread_semaphore); |
83 | 134k | Mutex::ScopedLock lock(start_io_thread_async_mutex); |
84 | | |
85 | 134k | CHECK(start_io_thread_async_initialized); |
86 | 134k | Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
87 | 134k | if (agent != nullptr) |
88 | 0 | agent->RequestIoThreadStart(); |
89 | 134k | } |
90 | 134k | } |
91 | | |
92 | 134k | static int StartDebugSignalHandler() { |
93 | | // Start a watchdog thread for calling v8::Debug::DebugBreak() because |
94 | | // it's not safe to call directly from the signal handler, it can |
95 | | // deadlock with the thread it interrupts. |
96 | 134k | CHECK_EQ(0, uv_sem_init(&start_io_thread_semaphore, 0)); |
97 | 134k | pthread_attr_t attr; |
98 | 134k | CHECK_EQ(0, pthread_attr_init(&attr)); |
99 | 134k | #if defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
100 | | // PTHREAD_STACK_MIN is 2 KiB with musl libc, which is too small to safely |
101 | | // receive signals. PTHREAD_STACK_MIN + MINSIGSTKSZ is 8 KiB on arm64, which |
102 | | // is the musl architecture with the biggest MINSIGSTKSZ so let's use that |
103 | | // as a lower bound and let's quadruple it just in case. The goal is to avoid |
104 | | // creating a big 2 or 4 MiB address space gap (problematic on 32 bits |
105 | | // because of fragmentation), not squeeze out every last byte. |
106 | | // Omitted on FreeBSD because it doesn't seem to like small stacks. |
107 | 134k | const size_t stack_size = std::max(static_cast<size_t>(4 * 8192), |
108 | 134k | static_cast<size_t>(PTHREAD_STACK_MIN)); |
109 | 134k | CHECK_EQ(0, pthread_attr_setstacksize(&attr, stack_size)); |
110 | 134k | #endif // defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
111 | 134k | CHECK_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
112 | 134k | sigset_t sigmask; |
113 | | // Mask all signals. |
114 | 134k | sigfillset(&sigmask); |
115 | 134k | sigset_t savemask; |
116 | 134k | CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, &savemask)); |
117 | 134k | sigmask = savemask; |
118 | 134k | pthread_t thread; |
119 | 134k | const int err = pthread_create(&thread, &attr, |
120 | 134k | StartIoThreadMain, nullptr); |
121 | | // Restore original mask |
122 | 134k | CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, nullptr)); |
123 | 134k | CHECK_EQ(0, pthread_attr_destroy(&attr)); |
124 | 134k | if (err != 0) { |
125 | 0 | fprintf(stderr, "node[%u]: pthread_create: %s\n", |
126 | 0 | uv_os_getpid(), strerror(err)); |
127 | 0 | fflush(stderr); |
128 | | // Leave SIGUSR1 blocked. We don't install a signal handler, |
129 | | // receiving the signal would terminate the process. |
130 | 0 | return -err; |
131 | 0 | } |
132 | 134k | RegisterSignalHandler(SIGUSR1, StartIoThreadWakeup); |
133 | | // Unblock SIGUSR1. A pending SIGUSR1 signal will now be delivered. |
134 | 134k | sigemptyset(&sigmask); |
135 | 134k | sigaddset(&sigmask, SIGUSR1); |
136 | 134k | CHECK_EQ(0, pthread_sigmask(SIG_UNBLOCK, &sigmask, nullptr)); |
137 | 134k | return 0; |
138 | 134k | } |
139 | | #endif // __POSIX__ |
140 | | |
141 | | |
142 | | #ifdef _WIN32 |
143 | | DWORD WINAPI StartIoThreadProc(void* arg) { |
144 | | Mutex::ScopedLock lock(start_io_thread_async_mutex); |
145 | | CHECK(start_io_thread_async_initialized); |
146 | | Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
147 | | if (agent != nullptr) |
148 | | agent->RequestIoThreadStart(); |
149 | | return 0; |
150 | | } |
151 | | |
152 | | static int GetDebugSignalHandlerMappingName(DWORD pid, wchar_t* buf, |
153 | | size_t buf_len) { |
154 | | return _snwprintf(buf, buf_len, L"node-debug-handler-%u", pid); |
155 | | } |
156 | | |
157 | | static int StartDebugSignalHandler() { |
158 | | wchar_t mapping_name[32]; |
159 | | HANDLE mapping_handle; |
160 | | DWORD pid; |
161 | | LPTHREAD_START_ROUTINE* handler; |
162 | | |
163 | | pid = uv_os_getpid(); |
164 | | |
165 | | if (GetDebugSignalHandlerMappingName(pid, |
166 | | mapping_name, |
167 | | arraysize(mapping_name)) < 0) { |
168 | | return -1; |
169 | | } |
170 | | |
171 | | mapping_handle = CreateFileMappingW(INVALID_HANDLE_VALUE, |
172 | | nullptr, |
173 | | PAGE_READWRITE, |
174 | | 0, |
175 | | sizeof *handler, |
176 | | mapping_name); |
177 | | if (mapping_handle == nullptr) { |
178 | | return -1; |
179 | | } |
180 | | |
181 | | handler = reinterpret_cast<LPTHREAD_START_ROUTINE*>( |
182 | | MapViewOfFile(mapping_handle, |
183 | | FILE_MAP_ALL_ACCESS, |
184 | | 0, |
185 | | 0, |
186 | | sizeof *handler)); |
187 | | if (handler == nullptr) { |
188 | | CloseHandle(mapping_handle); |
189 | | return -1; |
190 | | } |
191 | | |
192 | | *handler = StartIoThreadProc; |
193 | | |
194 | | UnmapViewOfFile(static_cast<void*>(handler)); |
195 | | |
196 | | return 0; |
197 | | } |
198 | | #endif // _WIN32 |
199 | | |
200 | | |
201 | | const int CONTEXT_GROUP_ID = 1; |
202 | | |
203 | 0 | std::string GetWorkerLabel(node::Environment* env) { |
204 | 0 | std::ostringstream result; |
205 | 0 | result << "Worker[" << env->thread_id() << "]"; |
206 | 0 | return result.str(); |
207 | 0 | } |
208 | | |
209 | | class ChannelImpl final : public v8_inspector::V8Inspector::Channel, |
210 | | public protocol::FrontendChannel { |
211 | | public: |
212 | | explicit ChannelImpl(Environment* env, |
213 | | const std::unique_ptr<V8Inspector>& inspector, |
214 | | std::shared_ptr<WorkerManager> worker_manager, |
215 | | std::unique_ptr<InspectorSessionDelegate> delegate, |
216 | | std::shared_ptr<MainThreadHandle> main_thread_, |
217 | | bool prevent_shutdown) |
218 | 0 | : delegate_(std::move(delegate)), prevent_shutdown_(prevent_shutdown), |
219 | 0 | retaining_context_(false) { |
220 | 0 | session_ = inspector->connect(CONTEXT_GROUP_ID, |
221 | 0 | this, |
222 | 0 | StringView(), |
223 | 0 | V8Inspector::ClientTrustLevel::kFullyTrusted); |
224 | 0 | node_dispatcher_ = std::make_unique<protocol::UberDispatcher>(this); |
225 | 0 | tracing_agent_ = |
226 | 0 | std::make_unique<protocol::TracingAgent>(env, main_thread_); |
227 | 0 | tracing_agent_->Wire(node_dispatcher_.get()); |
228 | 0 | if (worker_manager) { |
229 | 0 | worker_agent_ = std::make_unique<protocol::WorkerAgent>(worker_manager); |
230 | 0 | worker_agent_->Wire(node_dispatcher_.get()); |
231 | 0 | } |
232 | 0 | runtime_agent_ = std::make_unique<protocol::RuntimeAgent>(); |
233 | 0 | runtime_agent_->Wire(node_dispatcher_.get()); |
234 | 0 | } |
235 | | |
236 | 0 | ~ChannelImpl() override { |
237 | 0 | tracing_agent_->disable(); |
238 | 0 | tracing_agent_.reset(); // Dispose before the dispatchers |
239 | 0 | if (worker_agent_) { |
240 | 0 | worker_agent_->disable(); |
241 | 0 | worker_agent_.reset(); // Dispose before the dispatchers |
242 | 0 | } |
243 | 0 | runtime_agent_->disable(); |
244 | 0 | runtime_agent_.reset(); // Dispose before the dispatchers |
245 | 0 | } |
246 | | |
247 | 0 | void dispatchProtocolMessage(const StringView& message) { |
248 | 0 | std::string raw_message = protocol::StringUtil::StringViewToUtf8(message); |
249 | 0 | per_process::Debug(DebugCategory::INSPECTOR_SERVER, |
250 | 0 | "[inspector received] %s\n", |
251 | 0 | raw_message); |
252 | 0 | std::unique_ptr<protocol::DictionaryValue> value = |
253 | 0 | protocol::DictionaryValue::cast( |
254 | 0 | protocol::StringUtil::parseJSON(message)); |
255 | 0 | int call_id; |
256 | 0 | std::string method; |
257 | 0 | node_dispatcher_->parseCommand(value.get(), &call_id, &method); |
258 | 0 | if (v8_inspector::V8InspectorSession::canDispatchMethod( |
259 | 0 | Utf8ToStringView(method)->string())) { |
260 | 0 | session_->dispatchProtocolMessage(message); |
261 | 0 | } else { |
262 | 0 | node_dispatcher_->dispatch(call_id, method, std::move(value), |
263 | 0 | raw_message); |
264 | 0 | } |
265 | 0 | } |
266 | | |
267 | 0 | void schedulePauseOnNextStatement(const std::string& reason) { |
268 | 0 | std::unique_ptr<StringBuffer> buffer = Utf8ToStringView(reason); |
269 | 0 | session_->schedulePauseOnNextStatement(buffer->string(), buffer->string()); |
270 | 0 | } |
271 | | |
272 | 0 | bool preventShutdown() { |
273 | 0 | return prevent_shutdown_; |
274 | 0 | } |
275 | | |
276 | 0 | bool notifyWaitingForDisconnect() { |
277 | 0 | retaining_context_ = runtime_agent_->notifyWaitingForDisconnect(); |
278 | 0 | return retaining_context_; |
279 | 0 | } |
280 | | |
281 | 0 | void setWaitingForDebugger() { runtime_agent_->setWaitingForDebugger(); } |
282 | | |
283 | 0 | void unsetWaitingForDebugger() { runtime_agent_->unsetWaitingForDebugger(); } |
284 | | |
285 | 0 | bool retainingContext() { |
286 | 0 | return retaining_context_; |
287 | 0 | } |
288 | | |
289 | | private: |
290 | | void sendResponse( |
291 | | int callId, |
292 | 0 | std::unique_ptr<v8_inspector::StringBuffer> message) override { |
293 | 0 | sendMessageToFrontend(message->string()); |
294 | 0 | } |
295 | | |
296 | | void sendNotification( |
297 | 0 | std::unique_ptr<v8_inspector::StringBuffer> message) override { |
298 | 0 | sendMessageToFrontend(message->string()); |
299 | 0 | } |
300 | | |
301 | 0 | void flushProtocolNotifications() override { } |
302 | | |
303 | 0 | void sendMessageToFrontend(const StringView& message) { |
304 | 0 | if (per_process::enabled_debug_list.enabled( |
305 | 0 | DebugCategory::INSPECTOR_SERVER)) { |
306 | 0 | std::string raw_message = protocol::StringUtil::StringViewToUtf8(message); |
307 | 0 | per_process::Debug(DebugCategory::INSPECTOR_SERVER, |
308 | 0 | "[inspector send] %s\n", |
309 | 0 | raw_message); |
310 | 0 | } |
311 | 0 | delegate_->SendMessageToFrontend(message); |
312 | 0 | } |
313 | | |
314 | 0 | void sendMessageToFrontend(const std::string& message) { |
315 | 0 | sendMessageToFrontend(Utf8ToStringView(message)->string()); |
316 | 0 | } |
317 | | |
318 | | using Serializable = protocol::Serializable; |
319 | | |
320 | | void sendProtocolResponse(int callId, |
321 | 0 | std::unique_ptr<Serializable> message) override { |
322 | 0 | sendMessageToFrontend(message->serializeToJSON()); |
323 | 0 | } |
324 | | void sendProtocolNotification( |
325 | 0 | std::unique_ptr<Serializable> message) override { |
326 | 0 | sendMessageToFrontend(message->serializeToJSON()); |
327 | 0 | } |
328 | | |
329 | | void fallThrough(int callId, |
330 | | const std::string& method, |
331 | 0 | const std::string& message) override { |
332 | 0 | DCHECK(false); |
333 | 0 | } |
334 | | |
335 | | std::unique_ptr<protocol::RuntimeAgent> runtime_agent_; |
336 | | std::unique_ptr<protocol::TracingAgent> tracing_agent_; |
337 | | std::unique_ptr<protocol::WorkerAgent> worker_agent_; |
338 | | std::unique_ptr<InspectorSessionDelegate> delegate_; |
339 | | std::unique_ptr<v8_inspector::V8InspectorSession> session_; |
340 | | std::unique_ptr<protocol::UberDispatcher> node_dispatcher_; |
341 | | bool prevent_shutdown_; |
342 | | bool retaining_context_; |
343 | | }; |
344 | | |
345 | | class SameThreadInspectorSession : public InspectorSession { |
346 | | public: |
347 | | SameThreadInspectorSession( |
348 | | int session_id, std::shared_ptr<NodeInspectorClient> client) |
349 | 0 | : session_id_(session_id), client_(client) {} |
350 | | ~SameThreadInspectorSession() override; |
351 | | void Dispatch(const v8_inspector::StringView& message) override; |
352 | | |
353 | | private: |
354 | | int session_id_; |
355 | | std::weak_ptr<NodeInspectorClient> client_; |
356 | | }; |
357 | | |
358 | 0 | void NotifyClusterWorkersDebugEnabled(Environment* env) { |
359 | 0 | Isolate* isolate = env->isolate(); |
360 | 0 | HandleScope handle_scope(isolate); |
361 | 0 | Local<Context> context = env->context(); |
362 | | |
363 | | // Send message to enable debug in cluster workers |
364 | 0 | Local<Object> message = Object::New(isolate); |
365 | 0 | message->Set(context, FIXED_ONE_BYTE_STRING(isolate, "cmd"), |
366 | 0 | FIXED_ONE_BYTE_STRING(isolate, "NODE_DEBUG_ENABLED")).Check(); |
367 | 0 | ProcessEmit(env, "internalMessage", message); |
368 | 0 | } |
369 | | |
370 | | #ifdef _WIN32 |
371 | | bool IsFilePath(const std::string& path) { |
372 | | // '\\' |
373 | | if (path.length() > 2 && path[0] == '\\' && path[1] == '\\') |
374 | | return true; |
375 | | // '[A-Z]:[/\\]' |
376 | | if (path.length() < 3) |
377 | | return false; |
378 | | if ((path[0] >= 'A' && path[0] <= 'Z') || (path[0] >= 'a' && path[0] <= 'z')) |
379 | | return path[1] == ':' && (path[2] == '/' || path[2] == '\\'); |
380 | | return false; |
381 | | } |
382 | | #else |
383 | 0 | bool IsFilePath(const std::string& path) { |
384 | 0 | return !path.empty() && path[0] == '/'; |
385 | 0 | } |
386 | | #endif // __POSIX__ |
387 | | |
388 | 0 | void ThrowUninitializedInspectorError(Environment* env) { |
389 | 0 | HandleScope scope(env->isolate()); |
390 | |
|
391 | 0 | const char* msg = "This Environment was initialized without a V8::Inspector"; |
392 | 0 | Local<Value> exception = |
393 | 0 | v8::String::NewFromUtf8(env->isolate(), msg).ToLocalChecked(); |
394 | |
|
395 | 0 | env->isolate()->ThrowException(exception); |
396 | 0 | } |
397 | | |
398 | | } // namespace |
399 | | |
400 | | class NodeInspectorClient : public V8InspectorClient { |
401 | | public: |
402 | | explicit NodeInspectorClient(node::Environment* env, bool is_main) |
403 | 134k | : env_(env), is_main_(is_main) { |
404 | 134k | client_ = V8Inspector::create(env->isolate(), this); |
405 | | // TODO(bnoordhuis) Make name configurable from src/node.cc. |
406 | 134k | std::string name = |
407 | 134k | is_main_ ? GetHumanReadableProcessName() : GetWorkerLabel(env); |
408 | 134k | ContextInfo info(name); |
409 | 134k | info.is_default = true; |
410 | 134k | contextCreated(env->context(), info); |
411 | 134k | } |
412 | | |
413 | 0 | void runMessageLoopOnPause(int context_group_id) override { |
414 | 0 | waiting_for_resume_ = true; |
415 | 0 | runMessageLoop(); |
416 | 0 | } |
417 | | |
418 | 0 | void waitForSessionsDisconnect() { |
419 | 0 | waiting_for_sessions_disconnect_ = true; |
420 | 0 | runMessageLoop(); |
421 | 0 | } |
422 | | |
423 | 0 | void waitForFrontend() { |
424 | 0 | waiting_for_frontend_ = true; |
425 | 0 | for (const auto& id_channel : channels_) { |
426 | 0 | id_channel.second->setWaitingForDebugger(); |
427 | 0 | } |
428 | 0 | runMessageLoop(); |
429 | 0 | } |
430 | | |
431 | 0 | void maxAsyncCallStackDepthChanged(int depth) override { |
432 | 0 | if (waiting_for_sessions_disconnect_) { |
433 | | // V8 isolate is mostly done and is only letting Inspector protocol |
434 | | // clients gather data. |
435 | 0 | return; |
436 | 0 | } |
437 | 0 | if (auto agent = env_->inspector_agent()) { |
438 | 0 | if (depth == 0) { |
439 | 0 | agent->DisableAsyncHook(); |
440 | 0 | } else { |
441 | 0 | agent->EnableAsyncHook(); |
442 | 0 | } |
443 | 0 | } |
444 | 0 | } |
445 | | |
446 | 135k | void contextCreated(Local<Context> context, const ContextInfo& info) { |
447 | 135k | auto name_buffer = Utf8ToStringView(info.name); |
448 | 135k | auto origin_buffer = Utf8ToStringView(info.origin); |
449 | 135k | std::unique_ptr<StringBuffer> aux_data_buffer; |
450 | | |
451 | 135k | v8_inspector::V8ContextInfo v8info( |
452 | 135k | context, CONTEXT_GROUP_ID, name_buffer->string()); |
453 | 135k | v8info.origin = origin_buffer->string(); |
454 | | |
455 | 135k | if (info.is_default) { |
456 | 134k | aux_data_buffer = Utf8ToStringView("{\"isDefault\":true}"); |
457 | 134k | } else { |
458 | 1.13k | aux_data_buffer = Utf8ToStringView("{\"isDefault\":false}"); |
459 | 1.13k | } |
460 | 135k | v8info.auxData = aux_data_buffer->string(); |
461 | | |
462 | 135k | client_->contextCreated(v8info); |
463 | 135k | } |
464 | | |
465 | 0 | void contextDestroyed(Local<Context> context) { |
466 | 0 | client_->contextDestroyed(context); |
467 | 0 | } |
468 | | |
469 | 0 | void quitMessageLoopOnPause() override { |
470 | 0 | waiting_for_resume_ = false; |
471 | 0 | } |
472 | | |
473 | 0 | void runIfWaitingForDebugger(int context_group_id) override { |
474 | 0 | waiting_for_frontend_ = false; |
475 | 0 | for (const auto& id_channel : channels_) { |
476 | 0 | id_channel.second->unsetWaitingForDebugger(); |
477 | 0 | } |
478 | 0 | } |
479 | | |
480 | | int connectFrontend(std::unique_ptr<InspectorSessionDelegate> delegate, |
481 | 0 | bool prevent_shutdown) { |
482 | 0 | int session_id = next_session_id_++; |
483 | 0 | channels_[session_id] = std::make_unique<ChannelImpl>(env_, |
484 | 0 | client_, |
485 | 0 | getWorkerManager(), |
486 | 0 | std::move(delegate), |
487 | 0 | getThreadHandle(), |
488 | 0 | prevent_shutdown); |
489 | 0 | if (waiting_for_frontend_) { |
490 | 0 | channels_[session_id]->setWaitingForDebugger(); |
491 | 0 | } |
492 | 0 | return session_id; |
493 | 0 | } |
494 | | |
495 | 0 | void disconnectFrontend(int session_id) { |
496 | 0 | auto it = channels_.find(session_id); |
497 | 0 | if (it == channels_.end()) |
498 | 0 | return; |
499 | 0 | bool retaining_context = it->second->retainingContext(); |
500 | 0 | channels_.erase(it); |
501 | 0 | if (retaining_context) { |
502 | 0 | for (const auto& id_channel : channels_) { |
503 | 0 | if (id_channel.second->retainingContext()) |
504 | 0 | return; |
505 | 0 | } |
506 | 0 | contextDestroyed(env_->context()); |
507 | 0 | } |
508 | 0 | if (waiting_for_sessions_disconnect_ && !is_main_) |
509 | 0 | waiting_for_sessions_disconnect_ = false; |
510 | 0 | } |
511 | | |
512 | 0 | void dispatchMessageFromFrontend(int session_id, const StringView& message) { |
513 | 0 | channels_[session_id]->dispatchProtocolMessage(message); |
514 | 0 | } |
515 | | |
516 | 0 | Local<Context> ensureDefaultContextInGroup(int contextGroupId) override { |
517 | 0 | return env_->context(); |
518 | 0 | } |
519 | | |
520 | | void installAdditionalCommandLineAPI(Local<Context> context, |
521 | 0 | Local<Object> target) override { |
522 | 0 | Local<Function> installer = env_->inspector_console_extension_installer(); |
523 | 0 | if (!installer.IsEmpty()) { |
524 | 0 | Local<Value> argv[] = {target}; |
525 | | // If there is an exception, proceed in JS land |
526 | 0 | USE(installer->Call(context, target, arraysize(argv), argv)); |
527 | 0 | } |
528 | 0 | } |
529 | | |
530 | 0 | void ReportUncaughtException(Local<Value> error, Local<Message> message) { |
531 | 0 | Isolate* isolate = env_->isolate(); |
532 | 0 | Local<Context> context = env_->context(); |
533 | |
|
534 | 0 | int script_id = message->GetScriptOrigin().ScriptId(); |
535 | |
|
536 | 0 | Local<v8::StackTrace> stack_trace = message->GetStackTrace(); |
537 | |
|
538 | 0 | if (!stack_trace.IsEmpty() && stack_trace->GetFrameCount() > 0 && |
539 | 0 | script_id == stack_trace->GetFrame(isolate, 0)->GetScriptId()) { |
540 | 0 | script_id = 0; |
541 | 0 | } |
542 | |
|
543 | 0 | const uint8_t DETAILS[] = "Uncaught"; |
544 | |
|
545 | 0 | client_->exceptionThrown( |
546 | 0 | context, |
547 | 0 | StringView(DETAILS, sizeof(DETAILS) - 1), |
548 | 0 | error, |
549 | 0 | ToProtocolString(isolate, message->Get())->string(), |
550 | 0 | ToProtocolString(isolate, message->GetScriptResourceName())->string(), |
551 | 0 | message->GetLineNumber(context).FromMaybe(0), |
552 | 0 | message->GetStartColumn(context).FromMaybe(0), |
553 | 0 | client_->createStackTrace(stack_trace), |
554 | 0 | script_id); |
555 | 0 | } |
556 | | |
557 | | void startRepeatingTimer(double interval_s, |
558 | | TimerCallback callback, |
559 | 0 | void* data) override { |
560 | 0 | auto result = |
561 | 0 | timers_.emplace(std::piecewise_construct, std::make_tuple(data), |
562 | 0 | std::make_tuple(env_, [=]() { callback(data); })); |
563 | 0 | CHECK(result.second); |
564 | 0 | uint64_t interval = static_cast<uint64_t>(1000 * interval_s); |
565 | 0 | result.first->second.Update(interval, interval); |
566 | 0 | } |
567 | | |
568 | 0 | void cancelTimer(void* data) override { |
569 | 0 | timers_.erase(data); |
570 | 0 | } |
571 | | |
572 | | // Async stack traces instrumentation. |
573 | | void AsyncTaskScheduled(const StringView& task_name, void* task, |
574 | 0 | bool recurring) { |
575 | 0 | client_->asyncTaskScheduled(task_name, task, recurring); |
576 | 0 | } |
577 | | |
578 | 0 | void AsyncTaskCanceled(void* task) { |
579 | 0 | client_->asyncTaskCanceled(task); |
580 | 0 | } |
581 | | |
582 | 0 | void AsyncTaskStarted(void* task) { |
583 | 0 | client_->asyncTaskStarted(task); |
584 | 0 | } |
585 | | |
586 | 0 | void AsyncTaskFinished(void* task) { |
587 | 0 | client_->asyncTaskFinished(task); |
588 | 0 | } |
589 | | |
590 | 0 | void AllAsyncTasksCanceled() { |
591 | 0 | client_->allAsyncTasksCanceled(); |
592 | 0 | } |
593 | | |
594 | 0 | void schedulePauseOnNextStatement(const std::string& reason) { |
595 | 0 | for (const auto& id_channel : channels_) { |
596 | 0 | id_channel.second->schedulePauseOnNextStatement(reason); |
597 | 0 | } |
598 | 0 | } |
599 | | |
600 | 0 | bool hasConnectedSessions() { |
601 | 0 | for (const auto& id_channel : channels_) { |
602 | | // Other sessions are "invisible" more most purposes |
603 | 0 | if (id_channel.second->preventShutdown()) |
604 | 0 | return true; |
605 | 0 | } |
606 | 0 | return false; |
607 | 0 | } |
608 | | |
609 | 0 | bool notifyWaitingForDisconnect() { |
610 | 0 | bool retaining_context = false; |
611 | 0 | for (const auto& id_channel : channels_) { |
612 | 0 | if (id_channel.second->notifyWaitingForDisconnect()) |
613 | 0 | retaining_context = true; |
614 | 0 | } |
615 | 0 | return retaining_context; |
616 | 0 | } |
617 | | |
618 | 0 | std::shared_ptr<MainThreadHandle> getThreadHandle() { |
619 | 0 | if (!interface_) { |
620 | 0 | interface_ = std::make_shared<MainThreadInterface>( |
621 | 0 | env_->inspector_agent()); |
622 | 0 | } |
623 | 0 | return interface_->GetHandle(); |
624 | 0 | } |
625 | | |
626 | 0 | std::shared_ptr<WorkerManager> getWorkerManager() { |
627 | 0 | if (!is_main_) { |
628 | 0 | return nullptr; |
629 | 0 | } |
630 | 0 | if (worker_manager_ == nullptr) { |
631 | 0 | worker_manager_ = |
632 | 0 | std::make_shared<WorkerManager>(getThreadHandle()); |
633 | 0 | } |
634 | 0 | return worker_manager_; |
635 | 0 | } |
636 | | |
637 | 136k | bool IsActive() { |
638 | 136k | return !channels_.empty(); |
639 | 136k | } |
640 | | |
641 | | private: |
642 | 0 | bool shouldRunMessageLoop() { |
643 | 0 | if (waiting_for_frontend_) |
644 | 0 | return true; |
645 | 0 | if (waiting_for_sessions_disconnect_ || waiting_for_resume_) { |
646 | 0 | return hasConnectedSessions(); |
647 | 0 | } |
648 | 0 | return false; |
649 | 0 | } |
650 | | |
651 | 0 | void runMessageLoop() { |
652 | 0 | if (running_nested_loop_) |
653 | 0 | return; |
654 | | |
655 | 0 | running_nested_loop_ = true; |
656 | |
|
657 | 0 | while (shouldRunMessageLoop()) { |
658 | 0 | if (interface_) interface_->WaitForFrontendEvent(); |
659 | 0 | env_->RunAndClearInterrupts(); |
660 | 0 | } |
661 | 0 | running_nested_loop_ = false; |
662 | 0 | } |
663 | | |
664 | 0 | double currentTimeMS() override { |
665 | 0 | return env_->isolate_data()->platform()->CurrentClockTimeMillis(); |
666 | 0 | } |
667 | | |
668 | | std::unique_ptr<StringBuffer> resourceNameToUrl( |
669 | 0 | const StringView& resource_name_view) override { |
670 | 0 | std::string resource_name = |
671 | 0 | protocol::StringUtil::StringViewToUtf8(resource_name_view); |
672 | 0 | if (!IsFilePath(resource_name)) |
673 | 0 | return nullptr; |
674 | | |
675 | 0 | std::string url = node::url::FromFilePath(resource_name); |
676 | 0 | return Utf8ToStringView(url); |
677 | 0 | } |
678 | | |
679 | | node::Environment* env_; |
680 | | bool is_main_; |
681 | | bool running_nested_loop_ = false; |
682 | | std::unique_ptr<V8Inspector> client_; |
683 | | // Note: ~ChannelImpl may access timers_ so timers_ has to come first. |
684 | | std::unordered_map<void*, TimerWrapHandle> timers_; |
685 | | std::unordered_map<int, std::unique_ptr<ChannelImpl>> channels_; |
686 | | int next_session_id_ = 1; |
687 | | bool waiting_for_resume_ = false; |
688 | | bool waiting_for_frontend_ = false; |
689 | | bool waiting_for_sessions_disconnect_ = false; |
690 | | // Allows accessing Inspector from non-main threads |
691 | | std::shared_ptr<MainThreadInterface> interface_; |
692 | | std::shared_ptr<WorkerManager> worker_manager_; |
693 | | }; |
694 | | |
695 | | Agent::Agent(Environment* env) |
696 | 134k | : parent_env_(env), |
697 | 134k | debug_options_(env->options()->debug_options()), |
698 | 134k | host_port_(env->inspector_host_port()) {} |
699 | | |
700 | 134k | Agent::~Agent() = default; |
701 | | |
702 | | bool Agent::Start(const std::string& path, |
703 | | const DebugOptions& options, |
704 | | std::shared_ptr<ExclusiveAccess<HostPort>> host_port, |
705 | 134k | bool is_main) { |
706 | 134k | path_ = path; |
707 | 134k | debug_options_ = options; |
708 | 134k | CHECK_NOT_NULL(host_port); |
709 | 134k | host_port_ = host_port; |
710 | | |
711 | 134k | client_ = std::make_shared<NodeInspectorClient>(parent_env_, is_main); |
712 | 134k | if (parent_env_->owns_inspector()) { |
713 | 134k | Mutex::ScopedLock lock(start_io_thread_async_mutex); |
714 | 134k | CHECK_EQ(start_io_thread_async_initialized.exchange(true), false); |
715 | 134k | CHECK_EQ(0, uv_async_init(parent_env_->event_loop(), |
716 | 134k | &start_io_thread_async, |
717 | 134k | StartIoThreadAsyncCallback)); |
718 | 134k | uv_unref(reinterpret_cast<uv_handle_t*>(&start_io_thread_async)); |
719 | 134k | start_io_thread_async.data = this; |
720 | | // Ignore failure, SIGUSR1 won't work, but that should not block node start. |
721 | 134k | StartDebugSignalHandler(); |
722 | | |
723 | 134k | parent_env_->AddCleanupHook([](void* data) { |
724 | 134k | Environment* env = static_cast<Environment*>(data); |
725 | | |
726 | 134k | { |
727 | 134k | Mutex::ScopedLock lock(start_io_thread_async_mutex); |
728 | 134k | start_io_thread_async.data = nullptr; |
729 | 134k | } |
730 | | |
731 | | // This is global, will never get freed |
732 | 134k | env->CloseHandle(&start_io_thread_async, [](uv_async_t*) { |
733 | 134k | CHECK(start_io_thread_async_initialized.exchange(false)); |
734 | 134k | }); |
735 | 134k | }, parent_env_); |
736 | 134k | } |
737 | | |
738 | 134k | AtExit(parent_env_, [](void* env) { |
739 | 134k | Agent* agent = static_cast<Environment*>(env)->inspector_agent(); |
740 | 134k | if (agent->IsActive()) { |
741 | 0 | agent->WaitForDisconnect(); |
742 | 0 | } |
743 | 134k | }, parent_env_); |
744 | | |
745 | 134k | bool wait_for_connect = options.wait_for_connect(); |
746 | 134k | if (parent_handle_) { |
747 | 0 | wait_for_connect = parent_handle_->WaitForConnect(); |
748 | 0 | parent_handle_->WorkerStarted(client_->getThreadHandle(), wait_for_connect); |
749 | 134k | } else if (!options.inspector_enabled || !options.allow_attaching_debugger || |
750 | 134k | !StartIoThread()) { |
751 | 134k | return false; |
752 | 134k | } |
753 | | |
754 | | // Patch the debug options to implement waitForDebuggerOnStart for |
755 | | // the NodeWorker.enable method. |
756 | 0 | if (wait_for_connect) { |
757 | 0 | CHECK(!parent_env_->has_serialized_options()); |
758 | 0 | debug_options_.EnableBreakFirstLine(); |
759 | 0 | parent_env_->options()->get_debug_options()->EnableBreakFirstLine(); |
760 | 0 | client_->waitForFrontend(); |
761 | 0 | } |
762 | 0 | return true; |
763 | 0 | } |
764 | | |
765 | 0 | bool Agent::StartIoThread() { |
766 | 0 | if (io_ != nullptr) |
767 | 0 | return true; |
768 | | |
769 | 0 | THROW_IF_INSUFFICIENT_PERMISSIONS(parent_env_, |
770 | 0 | permission::PermissionScope::kInspector, |
771 | 0 | "StartIoThread", |
772 | 0 | false); |
773 | 0 | if (!parent_env_->should_create_inspector() && !client_) { |
774 | 0 | ThrowUninitializedInspectorError(parent_env_); |
775 | 0 | return false; |
776 | 0 | } |
777 | | |
778 | 0 | CHECK_NOT_NULL(client_); |
779 | | |
780 | 0 | io_ = InspectorIo::Start(client_->getThreadHandle(), |
781 | 0 | path_, |
782 | 0 | host_port_, |
783 | 0 | debug_options_.inspect_publish_uid); |
784 | 0 | if (io_ == nullptr) { |
785 | 0 | return false; |
786 | 0 | } |
787 | 0 | NotifyClusterWorkersDebugEnabled(parent_env_); |
788 | 0 | return true; |
789 | 0 | } |
790 | | |
791 | 0 | void Agent::Stop() { |
792 | 0 | io_.reset(); |
793 | 0 | } |
794 | | |
795 | | std::unique_ptr<InspectorSession> Agent::Connect( |
796 | | std::unique_ptr<InspectorSessionDelegate> delegate, |
797 | 0 | bool prevent_shutdown) { |
798 | 0 | THROW_IF_INSUFFICIENT_PERMISSIONS(parent_env_, |
799 | 0 | permission::PermissionScope::kInspector, |
800 | 0 | "Connect", |
801 | 0 | std::unique_ptr<InspectorSession>{}); |
802 | 0 | if (!parent_env_->should_create_inspector() && !client_) { |
803 | 0 | ThrowUninitializedInspectorError(parent_env_); |
804 | 0 | return std::unique_ptr<InspectorSession>{}; |
805 | 0 | } |
806 | | |
807 | 0 | CHECK_NOT_NULL(client_); |
808 | | |
809 | 0 | int session_id = client_->connectFrontend(std::move(delegate), |
810 | 0 | prevent_shutdown); |
811 | 0 | return std::unique_ptr<InspectorSession>( |
812 | 0 | new SameThreadInspectorSession(session_id, client_)); |
813 | 0 | } |
814 | | |
815 | | std::unique_ptr<InspectorSession> Agent::ConnectToMainThread( |
816 | | std::unique_ptr<InspectorSessionDelegate> delegate, |
817 | 0 | bool prevent_shutdown) { |
818 | 0 | THROW_IF_INSUFFICIENT_PERMISSIONS(parent_env_, |
819 | 0 | permission::PermissionScope::kInspector, |
820 | 0 | "ConnectToMainThread", |
821 | 0 | std::unique_ptr<InspectorSession>{}); |
822 | 0 | if (!parent_env_->should_create_inspector() && !client_) { |
823 | 0 | ThrowUninitializedInspectorError(parent_env_); |
824 | 0 | return std::unique_ptr<InspectorSession>{}; |
825 | 0 | } |
826 | | |
827 | 0 | CHECK_NOT_NULL(parent_handle_); |
828 | 0 | CHECK_NOT_NULL(client_); |
829 | 0 | auto thread_safe_delegate = |
830 | 0 | client_->getThreadHandle()->MakeDelegateThreadSafe(std::move(delegate)); |
831 | 0 | return parent_handle_->Connect(std::move(thread_safe_delegate), |
832 | 0 | prevent_shutdown); |
833 | 0 | } |
834 | | |
835 | 0 | void Agent::WaitForDisconnect() { |
836 | 0 | THROW_IF_INSUFFICIENT_PERMISSIONS(parent_env_, |
837 | 0 | permission::PermissionScope::kInspector, |
838 | 0 | "WaitForDisconnect"); |
839 | 0 | if (!parent_env_->should_create_inspector() && !client_) { |
840 | 0 | ThrowUninitializedInspectorError(parent_env_); |
841 | 0 | return; |
842 | 0 | } |
843 | | |
844 | 0 | CHECK_NOT_NULL(client_); |
845 | 0 | bool is_worker = parent_handle_ != nullptr; |
846 | 0 | parent_handle_.reset(); |
847 | 0 | if (client_->hasConnectedSessions() && !is_worker) { |
848 | 0 | fprintf(stderr, "Waiting for the debugger to disconnect...\n"); |
849 | 0 | fflush(stderr); |
850 | 0 | } |
851 | 0 | if (!client_->notifyWaitingForDisconnect()) { |
852 | 0 | client_->contextDestroyed(parent_env_->context()); |
853 | 0 | } else if (is_worker) { |
854 | 0 | client_->waitForSessionsDisconnect(); |
855 | 0 | } |
856 | 0 | if (io_ != nullptr) { |
857 | 0 | io_->StopAcceptingNewConnections(); |
858 | 0 | client_->waitForSessionsDisconnect(); |
859 | 0 | } |
860 | 0 | } |
861 | | |
862 | | void Agent::ReportUncaughtException(Local<Value> error, |
863 | 88.3k | Local<Message> message) { |
864 | 88.3k | if (!IsListening()) |
865 | 88.3k | return; |
866 | 0 | client_->ReportUncaughtException(error, message); |
867 | 0 | WaitForDisconnect(); |
868 | 0 | } |
869 | | |
870 | 0 | void Agent::PauseOnNextJavascriptStatement(const std::string& reason) { |
871 | 0 | client_->schedulePauseOnNextStatement(reason); |
872 | 0 | } |
873 | | |
874 | | void Agent::RegisterAsyncHook(Isolate* isolate, |
875 | | Local<Function> enable_function, |
876 | 134k | Local<Function> disable_function) { |
877 | 134k | parent_env_->set_inspector_enable_async_hooks(enable_function); |
878 | 134k | parent_env_->set_inspector_disable_async_hooks(disable_function); |
879 | 134k | if (pending_enable_async_hook_) { |
880 | 0 | CHECK(!pending_disable_async_hook_); |
881 | 0 | pending_enable_async_hook_ = false; |
882 | 0 | EnableAsyncHook(); |
883 | 134k | } else if (pending_disable_async_hook_) { |
884 | 0 | CHECK(!pending_enable_async_hook_); |
885 | 0 | pending_disable_async_hook_ = false; |
886 | 0 | DisableAsyncHook(); |
887 | 0 | } |
888 | 134k | } |
889 | | |
890 | 0 | void Agent::EnableAsyncHook() { |
891 | 0 | HandleScope scope(parent_env_->isolate()); |
892 | 0 | Local<Function> enable = parent_env_->inspector_enable_async_hooks(); |
893 | 0 | if (!enable.IsEmpty()) { |
894 | 0 | ToggleAsyncHook(parent_env_->isolate(), enable); |
895 | 0 | } else if (pending_disable_async_hook_) { |
896 | 0 | CHECK(!pending_enable_async_hook_); |
897 | 0 | pending_disable_async_hook_ = false; |
898 | 0 | } else { |
899 | 0 | pending_enable_async_hook_ = true; |
900 | 0 | } |
901 | 0 | } |
902 | | |
903 | 0 | void Agent::DisableAsyncHook() { |
904 | 0 | HandleScope scope(parent_env_->isolate()); |
905 | 0 | Local<Function> disable = parent_env_->inspector_enable_async_hooks(); |
906 | 0 | if (!disable.IsEmpty()) { |
907 | 0 | ToggleAsyncHook(parent_env_->isolate(), disable); |
908 | 0 | } else if (pending_enable_async_hook_) { |
909 | 0 | CHECK(!pending_disable_async_hook_); |
910 | 0 | pending_enable_async_hook_ = false; |
911 | 0 | } else { |
912 | 0 | pending_disable_async_hook_ = true; |
913 | 0 | } |
914 | 0 | } |
915 | | |
916 | 0 | void Agent::ToggleAsyncHook(Isolate* isolate, Local<Function> fn) { |
917 | | // Guard against running this during cleanup -- no async events will be |
918 | | // emitted anyway at that point anymore, and calling into JS is not possible. |
919 | | // This should probably not be something we're attempting in the first place, |
920 | | // Refs: https://github.com/nodejs/node/pull/34362#discussion_r456006039 |
921 | 0 | if (!parent_env_->can_call_into_js()) return; |
922 | 0 | CHECK(parent_env_->has_run_bootstrapping_code()); |
923 | 0 | HandleScope handle_scope(isolate); |
924 | 0 | CHECK(!fn.IsEmpty()); |
925 | 0 | auto context = parent_env_->context(); |
926 | 0 | v8::TryCatch try_catch(isolate); |
927 | 0 | USE(fn->Call(context, Undefined(isolate), 0, nullptr)); |
928 | 0 | if (try_catch.HasCaught() && !try_catch.HasTerminated()) { |
929 | 0 | PrintCaughtException(isolate, context, try_catch); |
930 | 0 | UNREACHABLE("Cannot toggle Inspector's AsyncHook, please report this."); |
931 | 0 | } |
932 | 0 | } |
933 | | |
934 | | void Agent::AsyncTaskScheduled(const StringView& task_name, void* task, |
935 | 0 | bool recurring) { |
936 | 0 | client_->AsyncTaskScheduled(task_name, task, recurring); |
937 | 0 | } |
938 | | |
939 | 0 | void Agent::AsyncTaskCanceled(void* task) { |
940 | 0 | client_->AsyncTaskCanceled(task); |
941 | 0 | } |
942 | | |
943 | 0 | void Agent::AsyncTaskStarted(void* task) { |
944 | 0 | client_->AsyncTaskStarted(task); |
945 | 0 | } |
946 | | |
947 | 0 | void Agent::AsyncTaskFinished(void* task) { |
948 | 0 | client_->AsyncTaskFinished(task); |
949 | 0 | } |
950 | | |
951 | 0 | void Agent::AllAsyncTasksCanceled() { |
952 | 0 | client_->AllAsyncTasksCanceled(); |
953 | 0 | } |
954 | | |
955 | 0 | void Agent::RequestIoThreadStart() { |
956 | | // We need to attempt to interrupt V8 flow (in case Node is running |
957 | | // continuous JS code) and to wake up libuv thread (in case Node is waiting |
958 | | // for IO events) |
959 | 0 | if (!options().allow_attaching_debugger) { |
960 | 0 | return; |
961 | 0 | } |
962 | 0 | CHECK(start_io_thread_async_initialized); |
963 | 0 | uv_async_send(&start_io_thread_async); |
964 | 0 | parent_env_->RequestInterrupt([this](Environment*) { |
965 | 0 | StartIoThread(); |
966 | 0 | }); |
967 | |
|
968 | 0 | CHECK(start_io_thread_async_initialized); |
969 | 0 | uv_async_send(&start_io_thread_async); |
970 | 0 | } |
971 | | |
972 | 135k | void Agent::ContextCreated(Local<Context> context, const ContextInfo& info) { |
973 | 135k | if (client_ == nullptr) // This happens for a main context |
974 | 134k | return; |
975 | 1.13k | client_->contextCreated(context, info); |
976 | 1.13k | } |
977 | | |
978 | 136k | bool Agent::IsActive() { |
979 | 136k | if (client_ == nullptr) |
980 | 0 | return false; |
981 | 136k | return io_ != nullptr || client_->IsActive(); |
982 | 136k | } |
983 | | |
984 | | void Agent::SetParentHandle( |
985 | 0 | std::unique_ptr<ParentInspectorHandle> parent_handle) { |
986 | 0 | parent_handle_ = std::move(parent_handle); |
987 | 0 | } |
988 | | |
989 | | std::unique_ptr<ParentInspectorHandle> Agent::GetParentHandle( |
990 | 0 | uint64_t thread_id, const std::string& url, const std::string& name) { |
991 | 0 | THROW_IF_INSUFFICIENT_PERMISSIONS(parent_env_, |
992 | 0 | permission::PermissionScope::kInspector, |
993 | 0 | "GetParentHandle", |
994 | 0 | std::unique_ptr<ParentInspectorHandle>{}); |
995 | 0 | if (!parent_env_->should_create_inspector() && !client_) { |
996 | 0 | ThrowUninitializedInspectorError(parent_env_); |
997 | 0 | return std::unique_ptr<ParentInspectorHandle>{}; |
998 | 0 | } |
999 | | |
1000 | 0 | CHECK_NOT_NULL(client_); |
1001 | 0 | if (!parent_handle_) { |
1002 | 0 | return client_->getWorkerManager()->NewParentHandle(thread_id, url, name); |
1003 | 0 | } else { |
1004 | 0 | return parent_handle_->NewParentInspectorHandle(thread_id, url, name); |
1005 | 0 | } |
1006 | 0 | } |
1007 | | |
1008 | 0 | void Agent::WaitForConnect() { |
1009 | 0 | THROW_IF_INSUFFICIENT_PERMISSIONS( |
1010 | 0 | parent_env_, permission::PermissionScope::kInspector, "WaitForConnect"); |
1011 | 0 | if (!parent_env_->should_create_inspector() && !client_) { |
1012 | 0 | ThrowUninitializedInspectorError(parent_env_); |
1013 | 0 | return; |
1014 | 0 | } |
1015 | | |
1016 | 0 | CHECK_NOT_NULL(client_); |
1017 | 0 | client_->waitForFrontend(); |
1018 | 0 | } |
1019 | | |
1020 | 0 | std::shared_ptr<WorkerManager> Agent::GetWorkerManager() { |
1021 | 0 | THROW_IF_INSUFFICIENT_PERMISSIONS(parent_env_, |
1022 | 0 | permission::PermissionScope::kInspector, |
1023 | 0 | "GetWorkerManager", |
1024 | 0 | std::unique_ptr<WorkerManager>{}); |
1025 | 0 | if (!parent_env_->should_create_inspector() && !client_) { |
1026 | 0 | ThrowUninitializedInspectorError(parent_env_); |
1027 | 0 | return std::unique_ptr<WorkerManager>{}; |
1028 | 0 | } |
1029 | | |
1030 | 0 | CHECK_NOT_NULL(client_); |
1031 | 0 | return client_->getWorkerManager(); |
1032 | 0 | } |
1033 | | |
1034 | 0 | std::string Agent::GetWsUrl() const { |
1035 | 0 | if (io_ == nullptr) |
1036 | 0 | return ""; |
1037 | 0 | return io_->GetWsUrl(); |
1038 | 0 | } |
1039 | | |
1040 | 0 | SameThreadInspectorSession::~SameThreadInspectorSession() { |
1041 | 0 | auto client = client_.lock(); |
1042 | 0 | if (client) |
1043 | 0 | client->disconnectFrontend(session_id_); |
1044 | 0 | } |
1045 | | |
1046 | | void SameThreadInspectorSession::Dispatch( |
1047 | 0 | const v8_inspector::StringView& message) { |
1048 | 0 | auto client = client_.lock(); |
1049 | 0 | if (client) |
1050 | 0 | client->dispatchMessageFromFrontend(session_id_, message); |
1051 | 0 | } |
1052 | | |
1053 | | } // namespace inspector |
1054 | | } // namespace node |