/src/mozilla-central/xpcom/threads/nsThread.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "nsThread.h" |
8 | | |
9 | | #include "base/message_loop.h" |
10 | | #include "base/platform_thread.h" |
11 | | |
12 | | // Chromium's logging can sometimes leak through... |
13 | | #ifdef LOG |
14 | | #undef LOG |
15 | | #endif |
16 | | |
17 | | #include "mozilla/ReentrantMonitor.h" |
18 | | #include "nsMemoryPressure.h" |
19 | | #include "nsThreadManager.h" |
20 | | #include "nsIClassInfoImpl.h" |
21 | | #include "nsAutoPtr.h" |
22 | | #include "nsCOMPtr.h" |
23 | | #include "nsQueryObject.h" |
24 | | #include "pratom.h" |
25 | | #include "mozilla/BackgroundHangMonitor.h" |
26 | | #include "mozilla/CycleCollectedJSContext.h" |
27 | | #include "mozilla/Logging.h" |
28 | | #include "nsIObserverService.h" |
29 | | #include "mozilla/IOInterposer.h" |
30 | | #include "mozilla/ipc/MessageChannel.h" |
31 | | #include "mozilla/ipc/BackgroundChild.h" |
32 | | #include "mozilla/Preferences.h" |
33 | | #include "mozilla/Scheduler.h" |
34 | | #include "mozilla/SchedulerGroup.h" |
35 | | #include "mozilla/Services.h" |
36 | | #include "mozilla/StaticPrefs.h" |
37 | | #include "mozilla/SystemGroup.h" |
38 | | #include "nsXPCOMPrivate.h" |
39 | | #include "mozilla/ChaosMode.h" |
40 | | #include "mozilla/Telemetry.h" |
41 | | #include "mozilla/TimeStamp.h" |
42 | | #include "mozilla/Unused.h" |
43 | | #include "mozilla/dom/ScriptSettings.h" |
44 | | #include "nsThreadSyncDispatch.h" |
45 | | #include "nsServiceManagerUtils.h" |
46 | | #include "GeckoProfiler.h" |
47 | | #include "InputEventStatistics.h" |
48 | | #include "ThreadEventTarget.h" |
49 | | #include "ThreadDelay.h" |
50 | | |
51 | | #ifdef XP_LINUX |
52 | | #ifdef __GLIBC__ |
53 | | #include <gnu/libc-version.h> |
54 | | #endif |
55 | | #include <sys/mman.h> |
56 | | #include <sys/time.h> |
57 | | #include <sys/resource.h> |
58 | | #include <sched.h> |
59 | | #include <stdio.h> |
60 | | #endif |
61 | | |
62 | | #ifdef XP_WIN |
63 | | #include "mozilla/DynamicallyLinkedFunctionPtr.h" |
64 | | |
65 | | #include <winbase.h> |
66 | | |
67 | | using GetCurrentThreadStackLimitsFn = void (WINAPI*)( |
68 | | PULONG_PTR LowLimit, PULONG_PTR HighLimit); |
69 | | #endif |
70 | | |
71 | | #define HAVE_UALARM _BSD_SOURCE || (_XOPEN_SOURCE >= 500 || \ |
72 | | _XOPEN_SOURCE && _XOPEN_SOURCE_EXTENDED) && \ |
73 | | !(_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) |
74 | | |
75 | | #if defined(XP_LINUX) && !defined(ANDROID) && defined(_GNU_SOURCE) |
76 | | #define HAVE_SCHED_SETAFFINITY |
77 | | #endif |
78 | | |
79 | | #ifdef XP_MACOSX |
80 | | #include <mach/mach.h> |
81 | | #include <mach/thread_policy.h> |
82 | | #endif |
83 | | |
84 | | #ifdef MOZ_CANARY |
85 | | # include <unistd.h> |
86 | | # include <execinfo.h> |
87 | | # include <signal.h> |
88 | | # include <fcntl.h> |
89 | | # include "nsXULAppAPI.h" |
90 | | #endif |
91 | | |
92 | | #if defined(NS_FUNCTION_TIMER) && defined(_MSC_VER) |
93 | | #include "nsTimerImpl.h" |
94 | | #include "mozilla/StackWalk.h" |
95 | | #endif |
96 | | #ifdef NS_FUNCTION_TIMER |
97 | | #include "nsCRT.h" |
98 | | #endif |
99 | | |
100 | | #ifdef MOZ_TASK_TRACER |
101 | | #include "GeckoTaskTracer.h" |
102 | | #include "TracedTaskCommon.h" |
103 | | using namespace mozilla::tasktracer; |
104 | | #endif |
105 | | |
106 | | using namespace mozilla; |
107 | | |
108 | | static LazyLogModule sThreadLog("nsThread"); |
109 | | #ifdef LOG |
110 | | #undef LOG |
111 | | #endif |
112 | 196 | #define LOG(args) MOZ_LOG(sThreadLog, mozilla::LogLevel::Debug, args) |
113 | | |
114 | | NS_DECL_CI_INTERFACE_GETTER(nsThread) |
115 | | |
116 | | Array<char, nsThread::kRunnableNameBufSize> nsThread::sMainThreadRunnableName; |
117 | | |
118 | | //----------------------------------------------------------------------------- |
119 | | // Because we do not have our own nsIFactory, we have to implement nsIClassInfo |
120 | | // somewhat manually. |
121 | | |
122 | | class nsThreadClassInfo : public nsIClassInfo |
123 | | { |
124 | | public: |
125 | | NS_DECL_ISUPPORTS_INHERITED // no mRefCnt |
126 | | NS_DECL_NSICLASSINFO |
127 | | |
128 | | nsThreadClassInfo() |
129 | 0 | { |
130 | 0 | } |
131 | | }; |
132 | | |
133 | | NS_IMETHODIMP_(MozExternalRefCountType) |
134 | | nsThreadClassInfo::AddRef() |
135 | 0 | { |
136 | 0 | return 2; |
137 | 0 | } |
138 | | NS_IMETHODIMP_(MozExternalRefCountType) |
139 | | nsThreadClassInfo::Release() |
140 | 0 | { |
141 | 0 | return 1; |
142 | 0 | } |
143 | | NS_IMPL_QUERY_INTERFACE(nsThreadClassInfo, nsIClassInfo) |
144 | | |
145 | | NS_IMETHODIMP |
146 | | nsThreadClassInfo::GetInterfaces(uint32_t* aCount, nsIID*** aArray) |
147 | 0 | { |
148 | 0 | return NS_CI_INTERFACE_GETTER_NAME(nsThread)(aCount, aArray); |
149 | 0 | } |
150 | | |
151 | | NS_IMETHODIMP |
152 | | nsThreadClassInfo::GetScriptableHelper(nsIXPCScriptable** aResult) |
153 | 0 | { |
154 | 0 | *aResult = nullptr; |
155 | 0 | return NS_OK; |
156 | 0 | } |
157 | | |
158 | | NS_IMETHODIMP |
159 | | nsThreadClassInfo::GetContractID(nsACString& aResult) |
160 | 0 | { |
161 | 0 | aResult.SetIsVoid(true); |
162 | 0 | return NS_OK; |
163 | 0 | } |
164 | | |
165 | | NS_IMETHODIMP |
166 | | nsThreadClassInfo::GetClassDescription(nsACString& aResult) |
167 | 0 | { |
168 | 0 | aResult.SetIsVoid(true); |
169 | 0 | return NS_OK; |
170 | 0 | } |
171 | | |
172 | | NS_IMETHODIMP |
173 | | nsThreadClassInfo::GetClassID(nsCID** aResult) |
174 | 0 | { |
175 | 0 | *aResult = nullptr; |
176 | 0 | return NS_OK; |
177 | 0 | } |
178 | | |
179 | | NS_IMETHODIMP |
180 | | nsThreadClassInfo::GetFlags(uint32_t* aResult) |
181 | 0 | { |
182 | 0 | *aResult = THREADSAFE; |
183 | 0 | return NS_OK; |
184 | 0 | } |
185 | | |
186 | | NS_IMETHODIMP |
187 | | nsThreadClassInfo::GetClassIDNoAlloc(nsCID* aResult) |
188 | 0 | { |
189 | 0 | return NS_ERROR_NOT_AVAILABLE; |
190 | 0 | } |
191 | | |
192 | | //----------------------------------------------------------------------------- |
193 | | |
194 | | NS_IMPL_ADDREF(nsThread) |
195 | | NS_IMPL_RELEASE(nsThread) |
196 | 3 | NS_INTERFACE_MAP_BEGIN(nsThread) |
197 | 3 | NS_INTERFACE_MAP_ENTRY(nsIThread) |
198 | 3 | NS_INTERFACE_MAP_ENTRY(nsIThreadInternal) |
199 | 3 | NS_INTERFACE_MAP_ENTRY(nsIEventTarget) |
200 | 0 | NS_INTERFACE_MAP_ENTRY(nsISerialEventTarget) |
201 | 0 | NS_INTERFACE_MAP_ENTRY(nsISupportsPriority) |
202 | 0 | NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIThread) |
203 | 0 | if (aIID.Equals(NS_GET_IID(nsIClassInfo))) { |
204 | 0 | static nsThreadClassInfo sThreadClassInfo; |
205 | 0 | foundInterface = static_cast<nsIClassInfo*>(&sThreadClassInfo); |
206 | 0 | } else |
207 | 0 | NS_INTERFACE_MAP_END |
208 | | NS_IMPL_CI_INTERFACE_GETTER(nsThread, nsIThread, nsIThreadInternal, |
209 | | nsIEventTarget, nsISupportsPriority) |
210 | | |
211 | | //----------------------------------------------------------------------------- |
212 | | |
213 | | class nsThreadStartupEvent final : public Runnable |
214 | | { |
215 | | public: |
216 | | nsThreadStartupEvent() |
217 | | : Runnable("nsThreadStartupEvent") |
218 | | , mMon("nsThreadStartupEvent.mMon") |
219 | | , mInitialized(false) |
220 | 13 | { |
221 | 13 | } |
222 | | |
223 | | // This method does not return until the thread startup object is in the |
224 | | // completion state. |
225 | | void Wait() |
226 | 13 | { |
227 | 13 | ReentrantMonitorAutoEnter mon(mMon); |
228 | 26 | while (!mInitialized) { |
229 | 13 | mon.Wait(); |
230 | 13 | } |
231 | 13 | } |
232 | | |
233 | | private: |
234 | 13 | ~nsThreadStartupEvent() = default; |
235 | | |
236 | | NS_IMETHOD Run() override |
237 | 13 | { |
238 | 13 | ReentrantMonitorAutoEnter mon(mMon); |
239 | 13 | mInitialized = true; |
240 | 13 | mon.Notify(); |
241 | 13 | return NS_OK; |
242 | 13 | } |
243 | | |
244 | | ReentrantMonitor mMon; |
245 | | bool mInitialized; |
246 | | }; |
247 | | //----------------------------------------------------------------------------- |
248 | | |
249 | | struct nsThreadShutdownContext |
250 | | { |
251 | | nsThreadShutdownContext(NotNull<nsThread*> aTerminatingThread, |
252 | | NotNull<nsThread*> aJoiningThread, |
253 | | bool aAwaitingShutdownAck) |
254 | | : mTerminatingThread(aTerminatingThread) |
255 | | , mJoiningThread(aJoiningThread) |
256 | | , mAwaitingShutdownAck(aAwaitingShutdownAck) |
257 | | , mIsMainThreadJoining(NS_IsMainThread()) |
258 | 0 | { |
259 | 0 | MOZ_COUNT_CTOR(nsThreadShutdownContext); |
260 | 0 | } |
261 | | ~nsThreadShutdownContext() |
262 | 0 | { |
263 | 0 | MOZ_COUNT_DTOR(nsThreadShutdownContext); |
264 | 0 | } |
265 | | |
266 | | // NB: This will be the last reference. |
267 | | NotNull<RefPtr<nsThread>> mTerminatingThread; |
268 | | NotNull<nsThread*> MOZ_UNSAFE_REF("Thread manager is holding reference to joining thread") |
269 | | mJoiningThread; |
270 | | bool mAwaitingShutdownAck; |
271 | | bool mIsMainThreadJoining; |
272 | | }; |
273 | | |
274 | | // This event is responsible for notifying nsThread::Shutdown that it is time |
275 | | // to call PR_JoinThread. It implements nsICancelableRunnable so that it can |
276 | | // run on a DOM Worker thread (where all events must implement |
277 | | // nsICancelableRunnable.) |
278 | | class nsThreadShutdownAckEvent : public CancelableRunnable |
279 | | { |
280 | | public: |
281 | | explicit nsThreadShutdownAckEvent(NotNull<nsThreadShutdownContext*> aCtx) |
282 | | : CancelableRunnable("nsThreadShutdownAckEvent") |
283 | | , mShutdownContext(aCtx) |
284 | 0 | { |
285 | 0 | } |
286 | | NS_IMETHOD Run() override |
287 | 0 | { |
288 | 0 | mShutdownContext->mTerminatingThread->ShutdownComplete(mShutdownContext); |
289 | 0 | return NS_OK; |
290 | 0 | } |
291 | | nsresult Cancel() override |
292 | 0 | { |
293 | 0 | return Run(); |
294 | 0 | } |
295 | | private: |
296 | 0 | virtual ~nsThreadShutdownAckEvent() { } |
297 | | |
298 | | NotNull<nsThreadShutdownContext*> mShutdownContext; |
299 | | }; |
300 | | |
301 | | // This event is responsible for setting mShutdownContext |
302 | | class nsThreadShutdownEvent : public Runnable |
303 | | { |
304 | | public: |
305 | | nsThreadShutdownEvent(NotNull<nsThread*> aThr, |
306 | | NotNull<nsThreadShutdownContext*> aCtx) |
307 | | : Runnable("nsThreadShutdownEvent") |
308 | | , mThread(aThr) |
309 | | , mShutdownContext(aCtx) |
310 | 0 | { |
311 | 0 | } |
312 | | NS_IMETHOD Run() override |
313 | 0 | { |
314 | 0 | mThread->mShutdownContext = mShutdownContext; |
315 | 0 | MessageLoop::current()->Quit(); |
316 | 0 | return NS_OK; |
317 | 0 | } |
318 | | private: |
319 | | NotNull<RefPtr<nsThread>> mThread; |
320 | | NotNull<nsThreadShutdownContext*> mShutdownContext; |
321 | | }; |
322 | | |
323 | | //----------------------------------------------------------------------------- |
324 | | |
325 | | static void |
326 | | SetThreadAffinity(unsigned int cpu) |
327 | 0 | { |
328 | 0 | #ifdef HAVE_SCHED_SETAFFINITY |
329 | 0 | cpu_set_t cpus; |
330 | 0 | CPU_ZERO(&cpus); |
331 | 0 | CPU_SET(cpu, &cpus); |
332 | 0 | sched_setaffinity(0, sizeof(cpus), &cpus); |
333 | 0 | // Don't assert sched_setaffinity's return value because it intermittently (?) |
334 | 0 | // fails with EINVAL on Linux x64 try runs. |
335 | | #elif defined(XP_MACOSX) |
336 | | // OS X does not provide APIs to pin threads to specific processors, but you |
337 | | // can tag threads as belonging to the same "affinity set" and the OS will try |
338 | | // to run them on the same processor. To run threads on different processors, |
339 | | // tag them as belonging to different affinity sets. Tag 0, the default, means |
340 | | // "no affinity" so let's pretend each CPU has its own tag `cpu+1`. |
341 | | thread_affinity_policy_data_t policy; |
342 | | policy.affinity_tag = cpu + 1; |
343 | | MOZ_ALWAYS_TRUE(thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY, |
344 | | &policy.affinity_tag, 1) == KERN_SUCCESS); |
345 | | #elif defined(XP_WIN) |
346 | | MOZ_ALWAYS_TRUE(SetThreadIdealProcessor(GetCurrentThread(), cpu) != (DWORD)-1); |
347 | | #endif |
348 | | } |
349 | | |
350 | | static void |
351 | | SetupCurrentThreadForChaosMode() |
352 | 46 | { |
353 | 46 | if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) { |
354 | 46 | return; |
355 | 46 | } |
356 | 0 | |
357 | 0 | #ifdef XP_LINUX |
358 | 0 | // PR_SetThreadPriority doesn't really work since priorities > |
359 | 0 | // PR_PRIORITY_NORMAL can't be set by non-root users. Instead we'll just use |
360 | 0 | // setpriority(2) to set random 'nice values'. In regular Linux this is only |
361 | 0 | // a dynamic adjustment so it still doesn't really do what we want, but tools |
362 | 0 | // like 'rr' can be more aggressive about honoring these values. |
363 | 0 | // Some of these calls may fail due to trying to lower the priority |
364 | 0 | // (e.g. something may have already called setpriority() for this thread). |
365 | 0 | // This makes it hard to have non-main threads with higher priority than the |
366 | 0 | // main thread, but that's hard to fix. Tools like rr can choose to honor the |
367 | 0 | // requested values anyway. |
368 | 0 | // Use just 4 priorities so there's a reasonable chance of any two threads |
369 | 0 | // having equal priority. |
370 | 0 | setpriority(PRIO_PROCESS, 0, ChaosMode::randomUint32LessThan(4)); |
371 | | #else |
372 | | // We should set the affinity here but NSPR doesn't provide a way to expose it. |
373 | | uint32_t priority = ChaosMode::randomUint32LessThan(PR_PRIORITY_LAST + 1); |
374 | | PR_SetThreadPriority(PR_GetCurrentThread(), PRThreadPriority(priority)); |
375 | | #endif |
376 | |
|
377 | 0 | // Force half the threads to CPU 0 so they compete for CPU |
378 | 0 | if (ChaosMode::randomUint32LessThan(2)) { |
379 | 0 | SetThreadAffinity(0); |
380 | 0 | } |
381 | 0 | } |
382 | | |
383 | | namespace { |
384 | | |
385 | | struct ThreadInitData { |
386 | | nsThread* thread; |
387 | | const nsACString& name; |
388 | | }; |
389 | | |
390 | | } |
391 | | |
392 | | /* static */ mozilla::OffTheBooksMutex& |
393 | | nsThread::ThreadListMutex() |
394 | 46 | { |
395 | 46 | static OffTheBooksMutex sMutex("nsThread::ThreadListMutex"); |
396 | 46 | return sMutex; |
397 | 46 | } |
398 | | |
399 | | /* static */ LinkedList<nsThread>& |
400 | | nsThread::ThreadList() |
401 | 46 | { |
402 | 46 | static LinkedList<nsThread> sList; |
403 | 46 | return sList; |
404 | 46 | } |
405 | | |
406 | | /* static */ void |
407 | | nsThread::ClearThreadList() |
408 | 0 | { |
409 | 0 | OffTheBooksMutexAutoLock mal(ThreadListMutex()); |
410 | 0 | while (ThreadList().popFirst()) {} |
411 | 0 | } |
412 | | |
413 | | /* static */ nsThreadEnumerator |
414 | | nsThread::Enumerate() |
415 | 0 | { |
416 | 0 | return {}; |
417 | 0 | } |
418 | | |
419 | | /*static*/ void |
420 | | nsThread::ThreadFunc(void* aArg) |
421 | 13 | { |
422 | 13 | using mozilla::ipc::BackgroundChild; |
423 | 13 | |
424 | 13 | ThreadInitData* initData = static_cast<ThreadInitData*>(aArg); |
425 | 13 | nsThread* self = initData->thread; // strong reference |
426 | 13 | |
427 | 13 | self->mThread = PR_GetCurrentThread(); |
428 | 13 | self->mVirtualThread = GetCurrentVirtualThread(); |
429 | 13 | self->mEventTarget->SetCurrentThread(); |
430 | 13 | SetupCurrentThreadForChaosMode(); |
431 | 13 | |
432 | 13 | if (!initData->name.IsEmpty()) { |
433 | 13 | NS_SetCurrentThreadName(initData->name.BeginReading()); |
434 | 13 | } |
435 | 13 | |
436 | 13 | self->InitCommon(); |
437 | 13 | |
438 | 13 | // Inform the ThreadManager |
439 | 13 | nsThreadManager::get().RegisterCurrentThread(*self); |
440 | 13 | |
441 | 13 | mozilla::IOInterposer::RegisterCurrentThread(); |
442 | 13 | |
443 | 13 | // This must come after the call to nsThreadManager::RegisterCurrentThread(), |
444 | 13 | // because that call is needed to properly set up this thread as an nsThread, |
445 | 13 | // which profiler_register_thread() requires. See bug 1347007. |
446 | 13 | if (!initData->name.IsEmpty()) { |
447 | 13 | PROFILER_REGISTER_THREAD(initData->name.BeginReading()); |
448 | 13 | } |
449 | 13 | |
450 | 13 | // Wait for and process startup event |
451 | 13 | nsCOMPtr<nsIRunnable> event = self->mEvents->GetEvent(true, nullptr); |
452 | 13 | MOZ_ASSERT(event); |
453 | 13 | |
454 | 13 | initData = nullptr; // clear before unblocking nsThread::Init |
455 | 13 | |
456 | 13 | event->Run(); // unblocks nsThread::Init |
457 | 13 | event = nullptr; |
458 | 13 | |
459 | 13 | { |
460 | 13 | // Scope for MessageLoop. |
461 | 13 | nsAutoPtr<MessageLoop> loop( |
462 | 13 | new MessageLoop(MessageLoop::TYPE_MOZILLA_NONMAINTHREAD, self)); |
463 | 13 | |
464 | 13 | // Now, process incoming events... |
465 | 13 | loop->Run(); |
466 | 13 | |
467 | 13 | BackgroundChild::CloseForCurrentThread(); |
468 | 13 | |
469 | 13 | // NB: The main thread does not shut down here! It shuts down via |
470 | 13 | // nsThreadManager::Shutdown. |
471 | 13 | |
472 | 13 | // Do NS_ProcessPendingEvents but with special handling to set |
473 | 13 | // mEventsAreDoomed atomically with the removal of the last event. The key |
474 | 13 | // invariant here is that we will never permit PutEvent to succeed if the |
475 | 13 | // event would be left in the queue after our final call to |
476 | 13 | // NS_ProcessPendingEvents. We also have to keep processing events as long |
477 | 13 | // as we have outstanding mRequestedShutdownContexts. |
478 | 13 | while (true) { |
479 | 0 | // Check and see if we're waiting on any threads. |
480 | 0 | self->WaitForAllAsynchronousShutdowns(); |
481 | 0 |
|
482 | 0 | if (self->mEvents->ShutdownIfNoPendingEvents()) { |
483 | 0 | break; |
484 | 0 | } |
485 | 0 | NS_ProcessPendingEvents(self); |
486 | 0 | } |
487 | 13 | } |
488 | 13 | |
489 | 13 | mozilla::IOInterposer::UnregisterCurrentThread(); |
490 | 13 | |
491 | 13 | // Inform the threadmanager that this thread is going away |
492 | 13 | nsThreadManager::get().UnregisterCurrentThread(*self); |
493 | 13 | |
494 | 13 | PROFILER_UNREGISTER_THREAD(); |
495 | 13 | |
496 | 13 | // Dispatch shutdown ACK |
497 | 13 | NotNull<nsThreadShutdownContext*> context = |
498 | 13 | WrapNotNull(self->mShutdownContext); |
499 | 13 | MOZ_ASSERT(context->mTerminatingThread == self); |
500 | 13 | event = do_QueryObject(new nsThreadShutdownAckEvent(context)); |
501 | 13 | if (context->mIsMainThreadJoining) { |
502 | 0 | SystemGroup::Dispatch(TaskCategory::Other, event.forget()); |
503 | 13 | } else { |
504 | 13 | context->mJoiningThread->Dispatch(event, NS_DISPATCH_NORMAL); |
505 | 13 | } |
506 | 13 | |
507 | 13 | // Release any observer of the thread here. |
508 | 13 | self->SetObserver(nullptr); |
509 | 13 | |
510 | | #ifdef MOZ_TASK_TRACER |
511 | | FreeTraceInfo(); |
512 | | #endif |
513 | | |
514 | 13 | NS_RELEASE(self); |
515 | 13 | } |
516 | | |
517 | | void |
518 | | nsThread::InitCommon() |
519 | 46 | { |
520 | 46 | mThreadId = uint32_t(PlatformThread::CurrentId()); |
521 | 46 | |
522 | 46 | { |
523 | 46 | #if defined(XP_LINUX) |
524 | 46 | pthread_attr_t attr; |
525 | 46 | pthread_attr_init(&attr); |
526 | 46 | pthread_getattr_np(pthread_self(), &attr); |
527 | 46 | |
528 | 46 | size_t stackSize; |
529 | 46 | pthread_attr_getstack(&attr, &mStackBase, &stackSize); |
530 | 46 | |
531 | 46 | // Glibc prior to 2.27 reports the stack size and base including the guard |
532 | 46 | // region, so we need to compensate for it to get accurate accounting. |
533 | 46 | // Also, this behavior difference isn't guarded by a versioned symbol, so we |
534 | 46 | // actually need to check the runtime glibc version, not the version we were |
535 | 46 | // compiled against. |
536 | 46 | static bool sAdjustForGuardSize = ({ |
537 | 46 | #ifdef __GLIBC__ |
538 | 46 | unsigned major, minor; |
539 | 46 | sscanf(gnu_get_libc_version(), "%u.%u", &major, &minor) < 2 || |
540 | 46 | major < 2 || (major == 2 && minor < 27); |
541 | | #else |
542 | | false; |
543 | | #endif |
544 | | }); |
545 | 46 | if (sAdjustForGuardSize) { |
546 | 46 | size_t guardSize; |
547 | 46 | pthread_attr_getguardsize(&attr, &guardSize); |
548 | 46 | |
549 | 46 | // Note: This assumes that the stack grows down, as is the case on all of |
550 | 46 | // our tier 1 platforms. On platforms where the stack grows up, the |
551 | 46 | // mStackBase adjustment is unnecessary, but doesn't cause any harm other |
552 | 46 | // than under-counting stack memory usage by one page. |
553 | 46 | mStackBase = reinterpret_cast<char*>(mStackBase) + guardSize; |
554 | 46 | stackSize -= guardSize; |
555 | 46 | } |
556 | 46 | |
557 | 46 | mStackSize = stackSize; |
558 | 46 | |
559 | 46 | // This is a bit of a hack. |
560 | 46 | // |
561 | 46 | // We really do want the NOHUGEPAGE flag on our thread stacks, since we |
562 | 46 | // don't expect any of them to need anywhere near 2MB of space. But setting |
563 | 46 | // it here is too late to have an effect, since the first stack page has |
564 | 46 | // already been faulted in existence, and NSPR doesn't give us a way to set |
565 | 46 | // it beforehand. |
566 | 46 | // |
567 | 46 | // What this does get us, however, is a different set of VM flags on our |
568 | 46 | // thread stacks compared to normal heap memory. Which makes the Linux |
569 | 46 | // kernel report them as separate regions, even when they are adjacent to |
570 | 46 | // heap memory. This allows us to accurately track the actual memory |
571 | 46 | // consumption of our allocated stacks. |
572 | 46 | madvise(mStackBase, stackSize, MADV_NOHUGEPAGE); |
573 | 46 | |
574 | 46 | pthread_attr_destroy(&attr); |
575 | | #elif defined(XP_WIN) |
576 | | static const DynamicallyLinkedFunctionPtr<GetCurrentThreadStackLimitsFn> |
577 | | sGetStackLimits(L"kernel32.dll", "GetCurrentThreadStackLimits"); |
578 | | |
579 | | if (sGetStackLimits) { |
580 | | ULONG_PTR stackBottom, stackTop; |
581 | | sGetStackLimits(&stackBottom, &stackTop); |
582 | | mStackBase = reinterpret_cast<void*>(stackBottom); |
583 | | mStackSize = stackTop - stackBottom; |
584 | | } |
585 | | #endif |
586 | | } |
587 | 46 | |
588 | 46 | OffTheBooksMutexAutoLock mal(ThreadListMutex()); |
589 | 46 | ThreadList().insertBack(this); |
590 | 46 | } |
591 | | |
592 | | //----------------------------------------------------------------------------- |
593 | | |
594 | | #ifdef MOZ_CANARY |
595 | | int sCanaryOutputFD = -1; |
596 | | #endif |
597 | | |
598 | | nsThread::nsThread(NotNull<SynchronizedEventQueue*> aQueue, |
599 | | MainThreadFlag aMainThread, |
600 | | uint32_t aStackSize) |
601 | | : mEvents(aQueue.get()) |
602 | | , mEventTarget(new ThreadEventTarget(mEvents.get(), aMainThread == MAIN_THREAD)) |
603 | | , mShutdownContext(nullptr) |
604 | | , mScriptObserver(nullptr) |
605 | | , mThread(nullptr) |
606 | | , mStackSize(aStackSize) |
607 | | , mNestedEventLoopDepth(0) |
608 | | , mCurrentEventLoopDepth(-1) |
609 | | , mShutdownRequired(false) |
610 | | , mPriority(PRIORITY_NORMAL) |
611 | | , mIsMainThread(uint8_t(aMainThread)) |
612 | | , mCanInvokeJS(false) |
613 | | , mCurrentEvent(nullptr) |
614 | | , mCurrentEventStart(TimeStamp::Now()) |
615 | | , mCurrentPerformanceCounter(nullptr) |
616 | 46 | { |
617 | 46 | } |
618 | | |
619 | | nsThread::~nsThread() |
620 | 0 | { |
621 | 0 | NS_ASSERTION(mRequestedShutdownContexts.IsEmpty(), |
622 | 0 | "shouldn't be waiting on other threads to shutdown"); |
623 | 0 |
|
624 | 0 | // We shouldn't need to lock before checking isInList at this point. We're |
625 | 0 | // destroying the last reference to this object, so there's no way for anyone |
626 | 0 | // else to remove it in the middle of our check. And the not-in-list state is |
627 | 0 | // determined the element's next and previous members pointing to itself, so a |
628 | 0 | // non-atomic update to an adjacent member won't affect the outcome either. |
629 | 0 | if (isInList()) { |
630 | 0 | OffTheBooksMutexAutoLock mal(ThreadListMutex()); |
631 | 0 | removeFrom(ThreadList()); |
632 | 0 | } |
633 | 0 |
|
634 | | #ifdef DEBUG |
635 | | // We deliberately leak these so they can be tracked by the leak checker. |
636 | | // If you're having nsThreadShutdownContext leaks, you can set: |
637 | | // XPCOM_MEM_LOG_CLASSES=nsThreadShutdownContext |
638 | | // during a test run and that will at least tell you what thread is |
639 | | // requesting shutdown on another, which can be helpful for diagnosing |
640 | | // the leak. |
641 | | for (size_t i = 0; i < mRequestedShutdownContexts.Length(); ++i) { |
642 | | Unused << mRequestedShutdownContexts[i].forget(); |
643 | | } |
644 | | #endif |
645 | | } |
646 | | |
647 | | nsresult |
648 | | nsThread::Init(const nsACString& aName) |
649 | 13 | { |
650 | 13 | // spawn thread and wait until it is fully setup |
651 | 13 | RefPtr<nsThreadStartupEvent> startup = new nsThreadStartupEvent(); |
652 | 13 | |
653 | 13 | NS_ADDREF_THIS(); |
654 | 13 | |
655 | 13 | mShutdownRequired = true; |
656 | 13 | |
657 | 13 | ThreadInitData initData = { this, aName }; |
658 | 13 | |
659 | 13 | // ThreadFunc is responsible for setting mThread |
660 | 13 | if (!PR_CreateThread(PR_USER_THREAD, ThreadFunc, &initData, |
661 | 13 | PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, |
662 | 13 | PR_JOINABLE_THREAD, mStackSize)) { |
663 | 0 | NS_RELEASE_THIS(); |
664 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
665 | 0 | } |
666 | 13 | |
667 | 13 | // ThreadFunc will wait for this event to be run before it tries to access |
668 | 13 | // mThread. By delaying insertion of this event into the queue, we ensure |
669 | 13 | // that mThread is set properly. |
670 | 13 | { |
671 | 13 | mEvents->PutEvent(do_AddRef(startup), EventPriority::Normal); // retain a reference |
672 | 13 | } |
673 | 13 | |
674 | 13 | // Wait for thread to call ThreadManager::SetupCurrentThread, which completes |
675 | 13 | // initialization of ThreadFunc. |
676 | 13 | startup->Wait(); |
677 | 13 | return NS_OK; |
678 | 13 | } |
679 | | |
680 | | nsresult |
681 | | nsThread::InitCurrentThread() |
682 | 33 | { |
683 | 33 | mThread = PR_GetCurrentThread(); |
684 | 33 | mVirtualThread = GetCurrentVirtualThread(); |
685 | 33 | SetupCurrentThreadForChaosMode(); |
686 | 33 | InitCommon(); |
687 | 33 | |
688 | 33 | nsThreadManager::get().RegisterCurrentThread(*this); |
689 | 33 | return NS_OK; |
690 | 33 | } |
691 | | |
692 | | //----------------------------------------------------------------------------- |
693 | | // nsIEventTarget |
694 | | |
695 | | NS_IMETHODIMP |
696 | | nsThread::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) |
697 | 0 | { |
698 | 0 | nsCOMPtr<nsIRunnable> event(aEvent); |
699 | 0 | return mEventTarget->Dispatch(event.forget(), aFlags); |
700 | 0 | } |
701 | | |
702 | | NS_IMETHODIMP |
703 | | nsThread::Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags) |
704 | 138 | { |
705 | 138 | LOG(("THRD(%p) Dispatch [%p %x]\n", this, /* XXX aEvent */nullptr, aFlags)); |
706 | 138 | |
707 | 138 | return mEventTarget->Dispatch(std::move(aEvent), aFlags); |
708 | 138 | } |
709 | | |
710 | | NS_IMETHODIMP |
711 | | nsThread::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aDelayMs) |
712 | 0 | { |
713 | 0 | return mEventTarget->DelayedDispatch(std::move(aEvent), aDelayMs); |
714 | 0 | } |
715 | | |
716 | | NS_IMETHODIMP |
717 | | nsThread::IsOnCurrentThread(bool* aResult) |
718 | 0 | { |
719 | 0 | return mEventTarget->IsOnCurrentThread(aResult); |
720 | 0 | } |
721 | | |
722 | | NS_IMETHODIMP_(bool) |
723 | | nsThread::IsOnCurrentThreadInfallible() |
724 | 0 | { |
725 | 0 | // Rely on mVirtualThread being correct. |
726 | 0 | MOZ_CRASH("IsOnCurrentThreadInfallible should never be called on nsIThread"); |
727 | 0 | } |
728 | | |
729 | | //----------------------------------------------------------------------------- |
730 | | // nsIThread |
731 | | |
732 | | NS_IMETHODIMP |
733 | | nsThread::GetPRThread(PRThread** aResult) |
734 | 3 | { |
735 | 3 | *aResult = mThread; |
736 | 3 | return NS_OK; |
737 | 3 | } |
738 | | |
739 | | NS_IMETHODIMP |
740 | | nsThread::GetCanInvokeJS(bool* aResult) |
741 | 0 | { |
742 | 0 | *aResult = mCanInvokeJS; |
743 | 0 | return NS_OK; |
744 | 0 | } |
745 | | |
746 | | NS_IMETHODIMP |
747 | | nsThread::SetCanInvokeJS(bool aCanInvokeJS) |
748 | 3 | { |
749 | 3 | mCanInvokeJS = aCanInvokeJS; |
750 | 3 | return NS_OK; |
751 | 3 | } |
752 | | |
753 | | NS_IMETHODIMP |
754 | | nsThread::AsyncShutdown() |
755 | 0 | { |
756 | 0 | LOG(("THRD(%p) async shutdown\n", this)); |
757 | 0 |
|
758 | 0 | // XXX If we make this warn, then we hit that warning at xpcom shutdown while |
759 | 0 | // shutting down a thread in a thread pool. That happens b/c the thread |
760 | 0 | // in the thread pool is already shutdown by the thread manager. |
761 | 0 | if (!mThread) { |
762 | 0 | return NS_OK; |
763 | 0 | } |
764 | 0 | |
765 | 0 | return !!ShutdownInternal(/* aSync = */ false) ? NS_OK : NS_ERROR_UNEXPECTED; |
766 | 0 | } |
767 | | |
768 | | nsThreadShutdownContext* |
769 | | nsThread::ShutdownInternal(bool aSync) |
770 | 0 | { |
771 | 0 | MOZ_ASSERT(mThread); |
772 | 0 | MOZ_ASSERT(mThread != PR_GetCurrentThread()); |
773 | 0 | if (NS_WARN_IF(mThread == PR_GetCurrentThread())) { |
774 | 0 | return nullptr; |
775 | 0 | } |
776 | 0 | |
777 | 0 | // Prevent multiple calls to this method |
778 | 0 | if (!mShutdownRequired.compareExchange(true, false)) { |
779 | 0 | return nullptr; |
780 | 0 | } |
781 | 0 | |
782 | 0 | { |
783 | 0 | OffTheBooksMutexAutoLock mal(ThreadListMutex()); |
784 | 0 | if (isInList()) { |
785 | 0 | removeFrom(ThreadList()); |
786 | 0 | } |
787 | 0 | } |
788 | 0 |
|
789 | 0 | NotNull<nsThread*> currentThread = |
790 | 0 | WrapNotNull(nsThreadManager::get().GetCurrentThread()); |
791 | 0 |
|
792 | 0 | nsAutoPtr<nsThreadShutdownContext>& context = |
793 | 0 | *currentThread->mRequestedShutdownContexts.AppendElement(); |
794 | 0 | context = new nsThreadShutdownContext(WrapNotNull(this), currentThread, aSync); |
795 | 0 |
|
796 | 0 | // Set mShutdownContext and wake up the thread in case it is waiting for |
797 | 0 | // events to process. |
798 | 0 | nsCOMPtr<nsIRunnable> event = |
799 | 0 | new nsThreadShutdownEvent(WrapNotNull(this), WrapNotNull(context.get())); |
800 | 0 | // XXXroc What if posting the event fails due to OOM? |
801 | 0 | mEvents->PutEvent(event.forget(), EventPriority::Normal); |
802 | 0 |
|
803 | 0 | // We could still end up with other events being added after the shutdown |
804 | 0 | // task, but that's okay because we process pending events in ThreadFunc |
805 | 0 | // after setting mShutdownContext just before exiting. |
806 | 0 | return context; |
807 | 0 | } |
808 | | |
809 | | void |
810 | | nsThread::ShutdownComplete(NotNull<nsThreadShutdownContext*> aContext) |
811 | 0 | { |
812 | 0 | MOZ_ASSERT(mThread); |
813 | 0 | MOZ_ASSERT(aContext->mTerminatingThread == this); |
814 | 0 |
|
815 | 0 | { |
816 | 0 | OffTheBooksMutexAutoLock mal(ThreadListMutex()); |
817 | 0 | if (isInList()) { |
818 | 0 | removeFrom(ThreadList()); |
819 | 0 | } |
820 | 0 | } |
821 | 0 |
|
822 | 0 | if (aContext->mAwaitingShutdownAck) { |
823 | 0 | // We're in a synchronous shutdown, so tell whatever is up the stack that |
824 | 0 | // we're done and unwind the stack so it can call us again. |
825 | 0 | aContext->mAwaitingShutdownAck = false; |
826 | 0 | return; |
827 | 0 | } |
828 | 0 | |
829 | 0 | // Now, it should be safe to join without fear of dead-locking. |
830 | 0 | |
831 | 0 | PR_JoinThread(mThread); |
832 | 0 | mThread = nullptr; |
833 | 0 |
|
834 | | #ifdef DEBUG |
835 | | nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserver(); |
836 | | MOZ_ASSERT(!obs, "Should have been cleared at shutdown!"); |
837 | | #endif |
838 | |
|
839 | 0 | // Delete aContext. |
840 | 0 | MOZ_ALWAYS_TRUE( |
841 | 0 | aContext->mJoiningThread->mRequestedShutdownContexts.RemoveElement(aContext)); |
842 | 0 | } |
843 | | |
844 | | void |
845 | | nsThread::WaitForAllAsynchronousShutdowns() |
846 | 0 | { |
847 | 0 | // This is the motivating example for why SpinEventLoop has the template |
848 | 0 | // parameter we are providing here. |
849 | 0 | SpinEventLoopUntil<ProcessFailureBehavior::IgnoreAndContinue>([&]() { |
850 | 0 | return mRequestedShutdownContexts.IsEmpty(); |
851 | 0 | }, this); |
852 | 0 | } |
853 | | |
854 | | NS_IMETHODIMP |
855 | | nsThread::Shutdown() |
856 | 0 | { |
857 | 0 | LOG(("THRD(%p) sync shutdown\n", this)); |
858 | 0 |
|
859 | 0 | // XXX If we make this warn, then we hit that warning at xpcom shutdown while |
860 | 0 | // shutting down a thread in a thread pool. That happens b/c the thread |
861 | 0 | // in the thread pool is already shutdown by the thread manager. |
862 | 0 | if (!mThread) { |
863 | 0 | return NS_OK; |
864 | 0 | } |
865 | 0 | |
866 | 0 | nsThreadShutdownContext* maybeContext = ShutdownInternal(/* aSync = */ true); |
867 | 0 | NS_ENSURE_TRUE(maybeContext, NS_ERROR_UNEXPECTED); |
868 | 0 | NotNull<nsThreadShutdownContext*> context = WrapNotNull(maybeContext); |
869 | 0 |
|
870 | 0 | // Process events on the current thread until we receive a shutdown ACK. |
871 | 0 | // Allows waiting; ensure no locks are held that would deadlock us! |
872 | 0 | SpinEventLoopUntil([&, context]() { |
873 | 0 | return !context->mAwaitingShutdownAck; |
874 | 0 | }, context->mJoiningThread); |
875 | 0 |
|
876 | 0 | ShutdownComplete(context); |
877 | 0 |
|
878 | 0 | return NS_OK; |
879 | 0 | } |
880 | | |
881 | | NS_IMETHODIMP |
882 | | nsThread::HasPendingEvents(bool* aResult) |
883 | 19 | { |
884 | 19 | if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) { |
885 | 0 | return NS_ERROR_NOT_SAME_THREAD; |
886 | 0 | } |
887 | 19 | |
888 | 19 | *aResult = mEvents->HasPendingEvent(); |
889 | 19 | return NS_OK; |
890 | 19 | } |
891 | | |
892 | | NS_IMETHODIMP |
893 | | nsThread::IdleDispatch(already_AddRefed<nsIRunnable> aEvent) |
894 | 43 | { |
895 | 43 | nsCOMPtr<nsIRunnable> event = aEvent; |
896 | 43 | |
897 | 43 | if (NS_WARN_IF(!event)) { |
898 | 0 | return NS_ERROR_INVALID_ARG; |
899 | 0 | } |
900 | 43 | |
901 | 43 | if (!mEvents->PutEvent(event.forget(), EventPriority::Idle)) { |
902 | 0 | NS_WARNING("An idle event was posted to a thread that will never run it (rejected)"); |
903 | 0 | return NS_ERROR_UNEXPECTED; |
904 | 0 | } |
905 | 43 | |
906 | 43 | return NS_OK; |
907 | 43 | } |
908 | | |
909 | | #ifdef MOZ_CANARY |
910 | | void canary_alarm_handler(int signum); |
911 | | |
912 | | class Canary |
913 | | { |
914 | | //XXX ToDo: support nested loops |
915 | | public: |
916 | | Canary() |
917 | | { |
918 | | if (sCanaryOutputFD > 0 && EventLatencyIsImportant()) { |
919 | | signal(SIGALRM, canary_alarm_handler); |
920 | | ualarm(15000, 0); |
921 | | } |
922 | | } |
923 | | |
924 | | ~Canary() |
925 | | { |
926 | | if (sCanaryOutputFD != 0 && EventLatencyIsImportant()) { |
927 | | ualarm(0, 0); |
928 | | } |
929 | | } |
930 | | |
931 | | static bool EventLatencyIsImportant() |
932 | | { |
933 | | return NS_IsMainThread() && XRE_IsParentProcess(); |
934 | | } |
935 | | }; |
936 | | |
937 | | void canary_alarm_handler(int signum) |
938 | | { |
939 | | void* array[30]; |
940 | | const char msg[29] = "event took too long to run:\n"; |
941 | | // use write to be safe in the signal handler |
942 | | write(sCanaryOutputFD, msg, sizeof(msg)); |
943 | | backtrace_symbols_fd(array, backtrace(array, 30), sCanaryOutputFD); |
944 | | } |
945 | | |
946 | | #endif |
947 | | |
948 | | #define NOTIFY_EVENT_OBSERVERS(observers_, func_, params_) \ |
949 | 80 | do { \ |
950 | 80 | if (!observers_.IsEmpty()) { \ |
951 | 0 | nsTObserverArray<nsCOMPtr<nsIThreadObserver>>::ForwardIterator \ |
952 | 0 | iter_(observers_); \ |
953 | 0 | nsCOMPtr<nsIThreadObserver> obs_; \ |
954 | 0 | while (iter_.HasMore()) { \ |
955 | 0 | obs_ = iter_.GetNext(); \ |
956 | 0 | obs_ -> func_ params_ ; \ |
957 | 0 | } \ |
958 | 0 | } \ |
959 | 80 | } while(0) |
960 | | |
961 | | #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY |
962 | | static bool |
963 | | GetLabeledRunnableName(nsIRunnable* aEvent, |
964 | | nsACString& aName, |
965 | | EventPriority aPriority) |
966 | 0 | { |
967 | 0 | bool labeled = false; |
968 | 0 | if (RefPtr<SchedulerGroup::Runnable> groupRunnable = do_QueryObject(aEvent)) { |
969 | 0 | labeled = true; |
970 | 0 | MOZ_ALWAYS_TRUE(NS_SUCCEEDED(groupRunnable->GetName(aName))); |
971 | 0 | } else if (nsCOMPtr<nsINamed> named = do_QueryInterface(aEvent)) { |
972 | 0 | MOZ_ALWAYS_TRUE(NS_SUCCEEDED(named->GetName(aName))); |
973 | 0 | } else { |
974 | 0 | aName.AssignLiteral("non-nsINamed runnable"); |
975 | 0 | } |
976 | 0 | if (aName.IsEmpty()) { |
977 | 0 | aName.AssignLiteral("anonymous runnable"); |
978 | 0 | } |
979 | 0 |
|
980 | 0 | if (!labeled && aPriority > EventPriority::Input) { |
981 | 0 | aName.AppendLiteral("(unlabeled)"); |
982 | 0 | } |
983 | 0 |
|
984 | 0 | return labeled; |
985 | 0 | } |
986 | | #endif |
987 | | |
988 | | mozilla::PerformanceCounter* |
989 | | nsThread::GetPerformanceCounter(nsIRunnable* aEvent) |
990 | 0 | { |
991 | 0 | RefPtr<SchedulerGroup::Runnable> docRunnable = do_QueryObject(aEvent); |
992 | 0 | if (docRunnable) { |
993 | 0 | mozilla::dom::DocGroup* docGroup = docRunnable->DocGroup(); |
994 | 0 | if (docGroup) { |
995 | 0 | return docGroup->GetPerformanceCounter(); |
996 | 0 | } |
997 | 0 | } |
998 | 0 | return nullptr; |
999 | 0 | } |
1000 | | |
1001 | | size_t |
1002 | | nsThread::ShallowSizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const |
1003 | 0 | { |
1004 | 0 | size_t n = 0; |
1005 | 0 | if (mShutdownContext) { |
1006 | 0 | n += aMallocSizeOf(mShutdownContext); |
1007 | 0 | } |
1008 | 0 | n += mRequestedShutdownContexts.ShallowSizeOfExcludingThis(aMallocSizeOf); |
1009 | 0 | return aMallocSizeOf(this) + aMallocSizeOf(mThread) + n; |
1010 | 0 | } |
1011 | | |
1012 | | size_t |
1013 | | nsThread::SizeOfEventQueues(mozilla::MallocSizeOf aMallocSizeOf) const |
1014 | 0 | { |
1015 | 0 | size_t n = 0; |
1016 | 0 | if (mCurrentPerformanceCounter) { |
1017 | 0 | n += aMallocSizeOf(mCurrentPerformanceCounter); |
1018 | 0 | } |
1019 | 0 | if (mEventTarget) { |
1020 | 0 | // The size of mEvents is reported by mEventTarget. |
1021 | 0 | n += mEventTarget->SizeOfIncludingThis(aMallocSizeOf); |
1022 | 0 | } |
1023 | 0 | return n; |
1024 | 0 | } |
1025 | | |
1026 | | size_t |
1027 | | nsThread::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const |
1028 | 0 | { |
1029 | 0 | return ShallowSizeOfIncludingThis(aMallocSizeOf) + SizeOfEventQueues(aMallocSizeOf); |
1030 | 0 | } |
1031 | | |
1032 | | NS_IMETHODIMP |
1033 | | nsThread::ProcessNextEvent(bool aMayWait, bool* aResult) |
1034 | 40 | { |
1035 | 40 | LOG(("THRD(%p) ProcessNextEvent [%u %u]\n", this, aMayWait, |
1036 | 40 | mNestedEventLoopDepth)); |
1037 | 40 | |
1038 | 40 | if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) { |
1039 | 0 | return NS_ERROR_NOT_SAME_THREAD; |
1040 | 0 | } |
1041 | 40 | |
1042 | 40 | // When recording or replaying, vsync observers are notified whenever |
1043 | 40 | // processing events on the main thread. Waiting for explicit vsync messages |
1044 | 40 | // from the UI process can result in paints happening at unexpected times. |
1045 | 40 | if (recordreplay::IsRecordingOrReplaying() && mIsMainThread == MAIN_THREAD) { |
1046 | 0 | recordreplay::child::NotifyVsyncObserver(); |
1047 | 0 | } |
1048 | 40 | |
1049 | 40 | // The toplevel event loop normally blocks waiting for the next event, but |
1050 | 40 | // if we're trying to shut this thread down, we must exit the event loop when |
1051 | 40 | // the event queue is empty. |
1052 | 40 | // This only applys to the toplevel event loop! Nested event loops (e.g. |
1053 | 40 | // during sync dispatch) are waiting for some state change and must be able |
1054 | 40 | // to block even if something has requested shutdown of the thread. Otherwise |
1055 | 40 | // we'll just busywait as we endlessly look for an event, fail to find one, |
1056 | 40 | // and repeat the nested event loop since its state change hasn't happened yet. |
1057 | 40 | bool reallyWait = aMayWait && (mNestedEventLoopDepth > 0 || !ShuttingDown()); |
1058 | 40 | |
1059 | 40 | Maybe<Scheduler::EventLoopActivation> activation; |
1060 | 40 | if (IsMainThread()) { |
1061 | 0 | DoMainThreadSpecificProcessing(reallyWait); |
1062 | 0 | activation.emplace(); |
1063 | 0 | } |
1064 | 40 | |
1065 | 40 | ++mNestedEventLoopDepth; |
1066 | 40 | |
1067 | 40 | // We only want to create an AutoNoJSAPI on threads that actually do DOM stuff |
1068 | 40 | // (including workers). Those are exactly the threads that have an |
1069 | 40 | // mScriptObserver. |
1070 | 40 | Maybe<dom::AutoNoJSAPI> noJSAPI; |
1071 | 40 | bool callScriptObserver = !!mScriptObserver; |
1072 | 40 | if (callScriptObserver) { |
1073 | 0 | noJSAPI.emplace(); |
1074 | 0 | mScriptObserver->BeforeProcessTask(reallyWait); |
1075 | 0 | } |
1076 | 40 | |
1077 | 40 | nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserverOnThread(); |
1078 | 40 | if (obs) { |
1079 | 8 | obs->OnProcessNextEvent(this, reallyWait); |
1080 | 8 | } |
1081 | 40 | |
1082 | 40 | NOTIFY_EVENT_OBSERVERS(EventQueue()->EventObservers(), OnProcessNextEvent, (this, reallyWait)); |
1083 | 40 | |
1084 | | #ifdef MOZ_CANARY |
1085 | | Canary canary; |
1086 | | #endif |
1087 | | nsresult rv = NS_OK; |
1088 | 40 | |
1089 | 40 | { |
1090 | 40 | // Scope for |event| to make sure that its destructor fires while |
1091 | 40 | // mNestedEventLoopDepth has been incremented, since that destructor can |
1092 | 40 | // also do work. |
1093 | 40 | EventPriority priority; |
1094 | 40 | nsCOMPtr<nsIRunnable> event = mEvents->GetEvent(reallyWait, &priority); |
1095 | 40 | |
1096 | 40 | if (activation.isSome()) { |
1097 | 0 | activation.ref().SetEvent(event, priority); |
1098 | 0 | } |
1099 | 40 | |
1100 | 40 | *aResult = (event.get() != nullptr); |
1101 | 40 | |
1102 | 40 | if (event) { |
1103 | 18 | LOG(("THRD(%p) running [%p]\n", this, event.get())); |
1104 | 18 | |
1105 | 18 | // Delay event processing to encourage whoever dispatched this event |
1106 | 18 | // to run. |
1107 | 18 | DelayForChaosMode(ChaosFeature::TaskRunning, 1000); |
1108 | 18 | |
1109 | 18 | if (IsMainThread()) { |
1110 | 0 | BackgroundHangMonitor().NotifyActivity(); |
1111 | 0 | } |
1112 | 18 | |
1113 | 18 | bool schedulerLoggingEnabled = mozilla::StaticPrefs::dom_performance_enable_scheduler_timing(); |
1114 | 18 | if (schedulerLoggingEnabled |
1115 | 18 | && mNestedEventLoopDepth > mCurrentEventLoopDepth |
1116 | 18 | && mCurrentPerformanceCounter) { |
1117 | 0 | // This is a recursive call, we're saving the time |
1118 | 0 | // spent in the parent event if the runnable is linked to a DocGroup. |
1119 | 0 | mozilla::TimeDuration duration = TimeStamp::Now() - mCurrentEventStart; |
1120 | 0 | mCurrentPerformanceCounter->IncrementExecutionDuration(duration.ToMicroseconds()); |
1121 | 0 | } |
1122 | 18 | |
1123 | 18 | #ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY |
1124 | 18 | // If we're on the main thread, we want to record our current runnable's |
1125 | 18 | // name in a static so that BHR can record it. |
1126 | 18 | Array<char, kRunnableNameBufSize> restoreRunnableName; |
1127 | 18 | restoreRunnableName[0] = '\0'; |
1128 | 18 | auto clear = MakeScopeExit([&] { |
1129 | 11 | if (IsMainThread()) { |
1130 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
1131 | 0 | sMainThreadRunnableName = restoreRunnableName; |
1132 | 0 | } |
1133 | 11 | }); |
1134 | 18 | if (IsMainThread()) { |
1135 | 0 | nsAutoCString name; |
1136 | 0 | GetLabeledRunnableName(event, name, priority); |
1137 | 0 |
|
1138 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
1139 | 0 | restoreRunnableName = sMainThreadRunnableName; |
1140 | 0 |
|
1141 | 0 | // Copy the name into sMainThreadRunnableName's buffer, and append a |
1142 | 0 | // terminating null. |
1143 | 0 | uint32_t length = std::min((uint32_t) kRunnableNameBufSize - 1, |
1144 | 0 | (uint32_t) name.Length()); |
1145 | 0 | memcpy(sMainThreadRunnableName.begin(), name.BeginReading(), length); |
1146 | 0 | sMainThreadRunnableName[length] = '\0'; |
1147 | 0 | } |
1148 | 18 | #endif |
1149 | 18 | Maybe<AutoTimeDurationHelper> timeDurationHelper; |
1150 | 18 | if (priority == EventPriority::Input) { |
1151 | 0 | timeDurationHelper.emplace(); |
1152 | 0 | } |
1153 | 18 | |
1154 | 18 | // The event starts to run, storing the timestamp. |
1155 | 18 | bool recursiveEvent = false; |
1156 | 18 | RefPtr<mozilla::PerformanceCounter> currentPerformanceCounter; |
1157 | 18 | if (schedulerLoggingEnabled) { |
1158 | 0 | recursiveEvent = mNestedEventLoopDepth > mCurrentEventLoopDepth; |
1159 | 0 | mCurrentEventStart = mozilla::TimeStamp::Now(); |
1160 | 0 | mCurrentEvent = event; |
1161 | 0 | mCurrentEventLoopDepth = mNestedEventLoopDepth; |
1162 | 0 | mCurrentPerformanceCounter = GetPerformanceCounter(event); |
1163 | 0 | currentPerformanceCounter = mCurrentPerformanceCounter; |
1164 | 0 | } |
1165 | 18 | |
1166 | 18 | event->Run(); |
1167 | 18 | |
1168 | 18 | // End of execution, we can send the duration for the group |
1169 | 18 | if (schedulerLoggingEnabled) { |
1170 | 0 | if (recursiveEvent) { |
1171 | 0 | // If we're in a recursive call, reset the timer, |
1172 | 0 | // so the parent gets its remaining execution time right. |
1173 | 0 | mCurrentEventStart = mozilla::TimeStamp::Now(); |
1174 | 0 | mCurrentPerformanceCounter = currentPerformanceCounter; |
1175 | 0 | } else { |
1176 | 0 | // We're done with this dispatch |
1177 | 0 | if (currentPerformanceCounter) { |
1178 | 0 | mozilla::TimeDuration duration = TimeStamp::Now() - mCurrentEventStart; |
1179 | 0 | currentPerformanceCounter->IncrementExecutionDuration(duration.ToMicroseconds()); |
1180 | 0 | } |
1181 | 0 | mCurrentEvent = nullptr; |
1182 | 0 | mCurrentEventLoopDepth = -1; |
1183 | 0 | mCurrentPerformanceCounter = nullptr; |
1184 | 0 | } |
1185 | 0 | } |
1186 | 22 | } else if (aMayWait) { |
1187 | 0 | MOZ_ASSERT(ShuttingDown(), |
1188 | 0 | "This should only happen when shutting down"); |
1189 | 0 | rv = NS_ERROR_UNEXPECTED; |
1190 | 0 | } |
1191 | 40 | } |
1192 | 40 | |
1193 | 40 | NOTIFY_EVENT_OBSERVERS(EventQueue()->EventObservers(), AfterProcessNextEvent, (this, *aResult)); |
1194 | 40 | |
1195 | 40 | if (obs) { |
1196 | 8 | obs->AfterProcessNextEvent(this, *aResult); |
1197 | 8 | } |
1198 | 40 | |
1199 | 40 | if (callScriptObserver) { |
1200 | 0 | if (mScriptObserver) { |
1201 | 0 | mScriptObserver->AfterProcessTask(mNestedEventLoopDepth); |
1202 | 0 | } |
1203 | 0 | noJSAPI.reset(); |
1204 | 0 | } |
1205 | 40 | |
1206 | 40 | --mNestedEventLoopDepth; |
1207 | 40 | |
1208 | 40 | return rv; |
1209 | 40 | } |
1210 | | |
1211 | | //----------------------------------------------------------------------------- |
1212 | | // nsISupportsPriority |
1213 | | |
1214 | | NS_IMETHODIMP |
1215 | | nsThread::GetPriority(int32_t* aPriority) |
1216 | 0 | { |
1217 | 0 | *aPriority = mPriority; |
1218 | 0 | return NS_OK; |
1219 | 0 | } |
1220 | | |
1221 | | NS_IMETHODIMP |
1222 | | nsThread::SetPriority(int32_t aPriority) |
1223 | 0 | { |
1224 | 0 | if (NS_WARN_IF(!mThread)) { |
1225 | 0 | return NS_ERROR_NOT_INITIALIZED; |
1226 | 0 | } |
1227 | 0 | |
1228 | 0 | // NSPR defines the following four thread priorities: |
1229 | 0 | // PR_PRIORITY_LOW |
1230 | 0 | // PR_PRIORITY_NORMAL |
1231 | 0 | // PR_PRIORITY_HIGH |
1232 | 0 | // PR_PRIORITY_URGENT |
1233 | 0 | // We map the priority values defined on nsISupportsPriority to these values. |
1234 | 0 | |
1235 | 0 | mPriority = aPriority; |
1236 | 0 |
|
1237 | 0 | PRThreadPriority pri; |
1238 | 0 | if (mPriority <= PRIORITY_HIGHEST) { |
1239 | 0 | pri = PR_PRIORITY_URGENT; |
1240 | 0 | } else if (mPriority < PRIORITY_NORMAL) { |
1241 | 0 | pri = PR_PRIORITY_HIGH; |
1242 | 0 | } else if (mPriority > PRIORITY_NORMAL) { |
1243 | 0 | pri = PR_PRIORITY_LOW; |
1244 | 0 | } else { |
1245 | 0 | pri = PR_PRIORITY_NORMAL; |
1246 | 0 | } |
1247 | 0 | // If chaos mode is active, retain the randomly chosen priority |
1248 | 0 | if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) { |
1249 | 0 | PR_SetThreadPriority(mThread, pri); |
1250 | 0 | } |
1251 | 0 |
|
1252 | 0 | return NS_OK; |
1253 | 0 | } |
1254 | | |
1255 | | NS_IMETHODIMP |
1256 | | nsThread::AdjustPriority(int32_t aDelta) |
1257 | 0 | { |
1258 | 0 | return SetPriority(mPriority + aDelta); |
1259 | 0 | } |
1260 | | |
1261 | | //----------------------------------------------------------------------------- |
1262 | | // nsIThreadInternal |
1263 | | |
1264 | | NS_IMETHODIMP |
1265 | | nsThread::GetObserver(nsIThreadObserver** aObs) |
1266 | 0 | { |
1267 | 0 | nsCOMPtr<nsIThreadObserver> obs = mEvents->GetObserver(); |
1268 | 0 | obs.forget(aObs); |
1269 | 0 | return NS_OK; |
1270 | 0 | } |
1271 | | |
1272 | | NS_IMETHODIMP |
1273 | | nsThread::SetObserver(nsIThreadObserver* aObs) |
1274 | 3 | { |
1275 | 3 | if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) { |
1276 | 0 | return NS_ERROR_NOT_SAME_THREAD; |
1277 | 0 | } |
1278 | 3 | |
1279 | 3 | mEvents->SetObserver(aObs); |
1280 | 3 | return NS_OK; |
1281 | 3 | } |
1282 | | |
1283 | | uint32_t |
1284 | | nsThread::RecursionDepth() const |
1285 | 3 | { |
1286 | 3 | MOZ_ASSERT(PR_GetCurrentThread() == mThread); |
1287 | 3 | return mNestedEventLoopDepth; |
1288 | 3 | } |
1289 | | |
1290 | | NS_IMETHODIMP |
1291 | | nsThread::AddObserver(nsIThreadObserver* aObserver) |
1292 | 0 | { |
1293 | 0 | if (NS_WARN_IF(!aObserver)) { |
1294 | 0 | return NS_ERROR_INVALID_ARG; |
1295 | 0 | } |
1296 | 0 | if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) { |
1297 | 0 | return NS_ERROR_NOT_SAME_THREAD; |
1298 | 0 | } |
1299 | 0 | |
1300 | 0 | EventQueue()->AddObserver(aObserver); |
1301 | 0 |
|
1302 | 0 | return NS_OK; |
1303 | 0 | } |
1304 | | |
1305 | | NS_IMETHODIMP |
1306 | | nsThread::RemoveObserver(nsIThreadObserver* aObserver) |
1307 | 0 | { |
1308 | 0 | if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) { |
1309 | 0 | return NS_ERROR_NOT_SAME_THREAD; |
1310 | 0 | } |
1311 | 0 | |
1312 | 0 | EventQueue()->RemoveObserver(aObserver); |
1313 | 0 |
|
1314 | 0 | return NS_OK; |
1315 | 0 | } |
1316 | | |
1317 | | void |
1318 | | nsThread::SetScriptObserver(mozilla::CycleCollectedJSContext* aScriptObserver) |
1319 | 3 | { |
1320 | 3 | if (!aScriptObserver) { |
1321 | 0 | mScriptObserver = nullptr; |
1322 | 0 | return; |
1323 | 0 | } |
1324 | 3 | |
1325 | 3 | MOZ_ASSERT(!mScriptObserver); |
1326 | 3 | mScriptObserver = aScriptObserver; |
1327 | 3 | } |
1328 | | |
1329 | | void |
1330 | | nsThread::DoMainThreadSpecificProcessing(bool aReallyWait) |
1331 | 0 | { |
1332 | 0 | MOZ_ASSERT(IsMainThread()); |
1333 | 0 |
|
1334 | 0 | ipc::CancelCPOWs(); |
1335 | 0 |
|
1336 | 0 | if (aReallyWait) { |
1337 | 0 | BackgroundHangMonitor().NotifyWait(); |
1338 | 0 | } |
1339 | 0 |
|
1340 | 0 | // Fire a memory pressure notification, if one is pending. |
1341 | 0 | if (!ShuttingDown()) { |
1342 | 0 | MemoryPressureState mpPending = NS_GetPendingMemoryPressure(); |
1343 | 0 | if (mpPending != MemPressure_None) { |
1344 | 0 | nsCOMPtr<nsIObserverService> os = services::GetObserverService(); |
1345 | 0 |
|
1346 | 0 | if (os) { |
1347 | 0 | if (mpPending == MemPressure_Stopping) { |
1348 | 0 | os->NotifyObservers(nullptr, "memory-pressure-stop", nullptr); |
1349 | 0 | } else { |
1350 | 0 | os->NotifyObservers(nullptr, "memory-pressure", |
1351 | 0 | mpPending == MemPressure_New ? u"low-memory" : |
1352 | 0 | u"low-memory-ongoing"); |
1353 | 0 | } |
1354 | 0 | } else { |
1355 | 0 | NS_WARNING("Can't get observer service!"); |
1356 | 0 | } |
1357 | 0 | } |
1358 | 0 | } |
1359 | 0 | } |
1360 | | |
1361 | | NS_IMETHODIMP |
1362 | | nsThread::GetEventTarget(nsIEventTarget** aEventTarget) |
1363 | 0 | { |
1364 | 0 | nsCOMPtr<nsIEventTarget> target = this; |
1365 | 0 | target.forget(aEventTarget); |
1366 | 0 | return NS_OK; |
1367 | 0 | } |
1368 | | |
1369 | | nsIEventTarget* |
1370 | | nsThread::EventTarget() |
1371 | 125 | { |
1372 | 125 | return this; |
1373 | 125 | } |
1374 | | |
1375 | | nsISerialEventTarget* |
1376 | | nsThread::SerialEventTarget() |
1377 | 0 | { |
1378 | 0 | return this; |
1379 | 0 | } |