/src/mozilla-central/xpcom/threads/Scheduler.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "Scheduler.h" |
8 | | |
9 | | #include "jsfriendapi.h" |
10 | | #include "LabeledEventQueue.h" |
11 | | #include "LeakRefPtr.h" |
12 | | #include "MainThreadQueue.h" |
13 | | #include "mozilla/CooperativeThreadPool.h" |
14 | | #include "mozilla/dom/ScriptSettings.h" |
15 | | #include "mozilla/ipc/BackgroundChild.h" |
16 | | #include "mozilla/SchedulerGroup.h" |
17 | | #include "nsCycleCollector.h" |
18 | | #include "nsIThread.h" |
19 | | #include "nsPrintfCString.h" |
20 | | #include "nsThread.h" |
21 | | #include "nsThreadManager.h" |
22 | | #include "PrioritizedEventQueue.h" |
23 | | #include "xpcpublic.h" |
24 | | #include "xpccomponents.h" |
25 | | |
26 | | // Windows silliness. winbase.h defines an empty no-argument Yield macro. |
27 | | #undef Yield |
28 | | |
29 | | using namespace mozilla; |
30 | | |
31 | | // Using the anonymous namespace here causes GCC to generate: |
32 | | // error: 'mozilla::SchedulerImpl' has a field 'mozilla::SchedulerImpl::mQueue' whose type uses the anonymous namespace |
33 | | namespace mozilla { |
34 | | namespace detail { |
35 | | |
36 | | class SchedulerEventQueue final : public SynchronizedEventQueue |
37 | | { |
38 | | public: |
39 | | explicit SchedulerEventQueue(UniquePtr<AbstractEventQueue> aQueue) |
40 | | : mLock("Scheduler") |
41 | | , mNonCooperativeCondVar(mLock, "SchedulerNonCoop") |
42 | | , mQueue(std::move(aQueue)) |
43 | | , mScheduler(nullptr) |
44 | 0 | {} |
45 | | |
46 | | bool PutEvent(already_AddRefed<nsIRunnable>&& aEvent, |
47 | | EventPriority aPriority) final; |
48 | | |
49 | 0 | void Disconnect(const MutexAutoLock& aProofOfLock) final {} |
50 | | |
51 | | already_AddRefed<nsIRunnable> GetEvent(bool aMayWait, |
52 | | EventPriority* aPriority) final; |
53 | | bool HasPendingEvent() final; |
54 | | bool HasPendingEvent(const MutexAutoLock& aProofOfLock); |
55 | | |
56 | | bool ShutdownIfNoPendingEvents() final; |
57 | | |
58 | | already_AddRefed<nsIThreadObserver> GetObserver() final; |
59 | | already_AddRefed<nsIThreadObserver> GetObserverOnThread() final; |
60 | | void SetObserver(nsIThreadObserver* aObserver) final; |
61 | | |
62 | | void EnableInputEventPrioritization() final; |
63 | | void FlushInputEventPrioritization() final; |
64 | | void SuspendInputEventPrioritization() final; |
65 | | void ResumeInputEventPrioritization() final; |
66 | | |
67 | | bool UseCooperativeScheduling() const; |
68 | | void SetScheduler(SchedulerImpl* aScheduler); |
69 | | |
70 | 0 | Mutex& MutexRef() { return mLock; } |
71 | | |
72 | | size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const override |
73 | 0 | { |
74 | 0 | return SynchronizedEventQueue::SizeOfExcludingThis(aMallocSizeOf) + |
75 | 0 | mQueue->SizeOfIncludingThis(aMallocSizeOf); |
76 | 0 | } |
77 | | |
78 | | private: |
79 | | Mutex mLock; |
80 | | CondVar mNonCooperativeCondVar; |
81 | | |
82 | | // Using the actual type here would avoid a virtual dispatch. However, that |
83 | | // would prevent us from switching between EventQueue and LabeledEventQueue at |
84 | | // runtime. |
85 | | UniquePtr<AbstractEventQueue> mQueue; |
86 | | |
87 | | bool mEventsAreDoomed = false; |
88 | | SchedulerImpl* mScheduler; |
89 | | nsCOMPtr<nsIThreadObserver> mObserver; |
90 | | }; |
91 | | |
92 | | } // namespace detail |
93 | | } // namespace mozilla |
94 | | |
95 | | using mozilla::detail::SchedulerEventQueue; |
96 | | |
97 | | class mozilla::SchedulerImpl |
98 | | { |
99 | | public: |
100 | | explicit SchedulerImpl(SchedulerEventQueue* aQueue); |
101 | | |
102 | | void Start(); |
103 | | void Stop(already_AddRefed<nsIRunnable> aStoppedCallback); |
104 | | void Shutdown(); |
105 | | |
106 | | void Dispatch(already_AddRefed<nsIRunnable> aEvent); |
107 | | |
108 | | void Yield(); |
109 | | |
110 | | static void EnterNestedEventLoop(Scheduler::EventLoopActivation& aOuterActivation); |
111 | | static void ExitNestedEventLoop(Scheduler::EventLoopActivation& aOuterActivation); |
112 | | |
113 | | static void StartEvent(Scheduler::EventLoopActivation& aActivation); |
114 | | static void FinishEvent(Scheduler::EventLoopActivation& aActivation); |
115 | | |
116 | | void SetJSContext(size_t aIndex, JSContext* aCx) |
117 | 0 | { |
118 | 0 | mContexts[aIndex] = aCx; |
119 | 0 | } |
120 | | |
121 | | static void YieldCallback(JSContext* aCx); |
122 | | static bool InterruptCallback(JSContext* aCx); |
123 | | |
124 | 0 | CooperativeThreadPool* GetThreadPool() { return mThreadPool.get(); } |
125 | | |
126 | 0 | static bool UnlabeledEventRunning() { return sUnlabeledEventRunning; } |
127 | 0 | static bool AnyEventRunning() { return sNumThreadsRunning > 0; } |
128 | | |
129 | | void BlockThreadedExecution(nsIBlockThreadedExecutionCallback* aCallback); |
130 | | void UnblockThreadedExecution(); |
131 | | |
132 | 0 | CooperativeThreadPool::Resource* GetQueueResource() { return &mQueueResource; } |
133 | 0 | bool UseCooperativeScheduling() const { return mQueue->UseCooperativeScheduling(); } |
134 | | |
135 | | // Preferences. |
136 | | static bool sPrefChaoticScheduling; |
137 | | static bool sPrefPreemption; |
138 | | static size_t sPrefThreadCount; |
139 | | static bool sPrefUseMultipleQueues; |
140 | | |
141 | | private: |
142 | | void Interrupt(JSContext* aCx); |
143 | | void YieldFromJS(JSContext* aCx); |
144 | | |
145 | | static void SwitcherThread(void* aData); |
146 | | void Switcher(); |
147 | | |
148 | | size_t mNumThreads; |
149 | | |
150 | | // Protects mQueue as well as mThreadPool. The lock comes from the SchedulerEventQueue. |
151 | | Mutex& mLock; |
152 | | CondVar mShutdownCondVar; |
153 | | |
154 | | bool mShuttingDown; |
155 | | |
156 | | // Runnable to call when the scheduler has finished shutting down. |
157 | | nsTArray<nsCOMPtr<nsIRunnable>> mShutdownCallbacks; |
158 | | |
159 | | UniquePtr<CooperativeThreadPool> mThreadPool; |
160 | | |
161 | | RefPtr<SchedulerEventQueue> mQueue; |
162 | | |
163 | | class QueueResource : public CooperativeThreadPool::Resource |
164 | | { |
165 | | public: |
166 | | explicit QueueResource(SchedulerImpl* aScheduler) |
167 | | : mScheduler(aScheduler) |
168 | 0 | {} |
169 | | |
170 | | bool IsAvailable(const MutexAutoLock& aProofOfLock) override; |
171 | | |
172 | | private: |
173 | | SchedulerImpl* mScheduler; |
174 | | }; |
175 | | QueueResource mQueueResource; |
176 | | |
177 | | class SystemZoneResource : public CooperativeThreadPool::Resource |
178 | | { |
179 | | public: |
180 | | explicit SystemZoneResource(SchedulerImpl* aScheduler) |
181 | 0 | : mScheduler(aScheduler) {} |
182 | | |
183 | | bool IsAvailable(const MutexAutoLock& aProofOfLock) override; |
184 | | |
185 | | private: |
186 | | SchedulerImpl* mScheduler; |
187 | | }; |
188 | | SystemZoneResource mSystemZoneResource; |
189 | | |
190 | | class ThreadController : public CooperativeThreadPool::Controller |
191 | | { |
192 | | public: |
193 | | ThreadController(SchedulerImpl* aScheduler, SchedulerEventQueue* aQueue) |
194 | | : mScheduler(aScheduler) |
195 | | , mMainVirtual(GetCurrentVirtualThread()) |
196 | | , mMainLoop(MessageLoop::current()) |
197 | | , mOldMainLoop(nullptr) |
198 | | , mMainQueue(aQueue) |
199 | 0 | {} |
200 | | |
201 | | void OnStartThread(size_t aIndex, const nsACString& aName, void* aStackTop) override; |
202 | | void OnStopThread(size_t aIndex) override; |
203 | | |
204 | | void OnSuspendThread(size_t aIndex) override; |
205 | | void OnResumeThread(size_t aIndex) override; |
206 | | |
207 | | private: |
208 | | SchedulerImpl* mScheduler; |
209 | | PRThread* mMainVirtual; |
210 | | MessageLoop* mMainLoop; |
211 | | MessageLoop* mOldMainLoop; |
212 | | RefPtr<SynchronizedEventQueue> mMainQueue; |
213 | | }; |
214 | | ThreadController mController; |
215 | | |
216 | | static size_t sNumThreadsRunning; |
217 | | static bool sUnlabeledEventRunning; |
218 | | |
219 | | // Number of times that BlockThreadedExecution has been called without |
220 | | // corresponding calls to UnblockThreadedExecution. If this is non-zero, |
221 | | // scheduling is disabled. |
222 | | size_t mNumSchedulerBlocks = 0; |
223 | | |
224 | | JSContext* mContexts[CooperativeThreadPool::kMaxThreads]; |
225 | | }; |
226 | | |
227 | | bool SchedulerImpl::sPrefChaoticScheduling = false; |
228 | | bool SchedulerImpl::sPrefPreemption = false; |
229 | | bool SchedulerImpl::sPrefUseMultipleQueues = false; |
230 | | size_t SchedulerImpl::sPrefThreadCount = 2; |
231 | | |
232 | | size_t SchedulerImpl::sNumThreadsRunning; |
233 | | bool SchedulerImpl::sUnlabeledEventRunning; |
234 | | |
235 | | bool |
236 | | SchedulerEventQueue::PutEvent(already_AddRefed<nsIRunnable>&& aEvent, |
237 | | EventPriority aPriority) |
238 | 0 | { |
239 | 0 | // We want to leak the reference when we fail to dispatch it, so that |
240 | 0 | // we won't release the event in a wrong thread. |
241 | 0 | LeakRefPtr<nsIRunnable> event(std::move(aEvent)); |
242 | 0 | nsCOMPtr<nsIThreadObserver> obs; |
243 | 0 |
|
244 | 0 | { |
245 | 0 | MutexAutoLock lock(mLock); |
246 | 0 |
|
247 | 0 | if (mEventsAreDoomed) { |
248 | 0 | return false; |
249 | 0 | } |
250 | 0 | |
251 | 0 | mQueue->PutEvent(event.take(), aPriority, lock); |
252 | 0 |
|
253 | 0 | if (mScheduler) { |
254 | 0 | CooperativeThreadPool* pool = mScheduler->GetThreadPool(); |
255 | 0 | MOZ_ASSERT(pool); |
256 | 0 | pool->RecheckBlockers(lock); |
257 | 0 | } else { |
258 | 0 | mNonCooperativeCondVar.Notify(); |
259 | 0 | } |
260 | 0 |
|
261 | 0 | // Make sure to grab the observer before dropping the lock, otherwise the |
262 | 0 | // event that we just placed into the queue could run and eventually delete |
263 | 0 | // this nsThread before the calling thread is scheduled again. We would then |
264 | 0 | // crash while trying to access a dead nsThread. |
265 | 0 | obs = mObserver; |
266 | 0 | } |
267 | 0 |
|
268 | 0 | if (obs) { |
269 | 0 | obs->OnDispatchedEvent(); |
270 | 0 | } |
271 | 0 |
|
272 | 0 | return true; |
273 | 0 | } |
274 | | |
275 | | already_AddRefed<nsIRunnable> |
276 | | SchedulerEventQueue::GetEvent(bool aMayWait, |
277 | | EventPriority* aPriority) |
278 | 0 | { |
279 | 0 | MutexAutoLock lock(mLock); |
280 | 0 |
|
281 | 0 | if (SchedulerImpl::sPrefChaoticScheduling) { |
282 | 0 | CooperativeThreadPool::Yield(nullptr, lock); |
283 | 0 | } |
284 | 0 |
|
285 | 0 | nsCOMPtr<nsIRunnable> event; |
286 | 0 | for (;;) { |
287 | 0 | event = mQueue->GetEvent(aPriority, lock); |
288 | 0 |
|
289 | 0 | if (event || !aMayWait) { |
290 | 0 | break; |
291 | 0 | } |
292 | 0 | |
293 | 0 | if (mScheduler) { |
294 | 0 | CooperativeThreadPool::Yield(mScheduler->GetQueueResource(), lock); |
295 | 0 | } else { |
296 | 0 | AUTO_PROFILER_LABEL("SchedulerEventQueue::GetEvent::Wait", IDLE); |
297 | 0 | mNonCooperativeCondVar.Wait(); |
298 | 0 | } |
299 | 0 | } |
300 | 0 |
|
301 | 0 | return event.forget(); |
302 | 0 | } |
303 | | |
304 | | bool |
305 | | SchedulerEventQueue::HasPendingEvent() |
306 | 0 | { |
307 | 0 | MutexAutoLock lock(mLock); |
308 | 0 | return HasPendingEvent(lock); |
309 | 0 | } |
310 | | |
311 | | bool |
312 | | SchedulerEventQueue::HasPendingEvent(const MutexAutoLock& aProofOfLock) |
313 | 0 | { |
314 | 0 | return mQueue->HasReadyEvent(aProofOfLock); |
315 | 0 | } |
316 | | |
317 | | bool |
318 | | SchedulerEventQueue::ShutdownIfNoPendingEvents() |
319 | 0 | { |
320 | 0 | MutexAutoLock lock(mLock); |
321 | 0 |
|
322 | 0 | MOZ_ASSERT(!mScheduler); |
323 | 0 |
|
324 | 0 | if (mQueue->IsEmpty(lock)) { |
325 | 0 | mEventsAreDoomed = true; |
326 | 0 | return true; |
327 | 0 | } |
328 | 0 | return false; |
329 | 0 | } |
330 | | |
331 | | bool |
332 | | SchedulerEventQueue::UseCooperativeScheduling() const |
333 | 0 | { |
334 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
335 | 0 | return !!mScheduler; |
336 | 0 | } |
337 | | |
338 | | void |
339 | | SchedulerEventQueue::SetScheduler(SchedulerImpl* aScheduler) |
340 | 0 | { |
341 | 0 | MutexAutoLock lock(mLock); |
342 | 0 | mScheduler = aScheduler; |
343 | 0 | } |
344 | | |
345 | | already_AddRefed<nsIThreadObserver> |
346 | | SchedulerEventQueue::GetObserver() |
347 | 0 | { |
348 | 0 | MutexAutoLock lock(mLock); |
349 | 0 | return do_AddRef(mObserver); |
350 | 0 | } |
351 | | |
352 | | already_AddRefed<nsIThreadObserver> |
353 | | SchedulerEventQueue::GetObserverOnThread() |
354 | 0 | { |
355 | 0 | MOZ_ASSERT(NS_IsMainThread()); |
356 | 0 | return do_AddRef(mObserver); |
357 | 0 | } |
358 | | |
359 | | void |
360 | | SchedulerEventQueue::SetObserver(nsIThreadObserver* aObserver) |
361 | 0 | { |
362 | 0 | MutexAutoLock lock(mLock); |
363 | 0 | mObserver = aObserver; |
364 | 0 | } |
365 | | |
366 | | void |
367 | | SchedulerEventQueue::EnableInputEventPrioritization() |
368 | 0 | { |
369 | 0 | MutexAutoLock lock(mLock); |
370 | 0 | mQueue->EnableInputEventPrioritization(lock); |
371 | 0 | } |
372 | | |
373 | | void |
374 | | SchedulerEventQueue::FlushInputEventPrioritization() |
375 | 0 | { |
376 | 0 | MutexAutoLock lock(mLock); |
377 | 0 | mQueue->FlushInputEventPrioritization(lock); |
378 | 0 | } |
379 | | |
380 | | void |
381 | | SchedulerEventQueue::SuspendInputEventPrioritization() |
382 | 0 | { |
383 | 0 | MutexAutoLock lock(mLock); |
384 | 0 | mQueue->SuspendInputEventPrioritization(lock); |
385 | 0 | } |
386 | | |
387 | | void |
388 | | SchedulerEventQueue::ResumeInputEventPrioritization() |
389 | 0 | { |
390 | 0 | MutexAutoLock lock(mLock); |
391 | 0 | mQueue->ResumeInputEventPrioritization(lock); |
392 | 0 | } |
393 | | |
394 | | UniquePtr<SchedulerImpl> Scheduler::sScheduler; |
395 | | |
396 | | SchedulerImpl::SchedulerImpl(SchedulerEventQueue* aQueue) |
397 | | : mNumThreads(sPrefThreadCount) |
398 | | , mLock(aQueue->MutexRef()) |
399 | | , mShutdownCondVar(aQueue->MutexRef(), "SchedulerImpl") |
400 | | , mShuttingDown(false) |
401 | | , mQueue(aQueue) |
402 | | , mQueueResource(this) |
403 | | , mSystemZoneResource(this) |
404 | | , mController(this, aQueue) |
405 | | , mContexts() |
406 | 0 | { |
407 | 0 | } |
408 | | |
409 | | void |
410 | | SchedulerImpl::Interrupt(JSContext* aCx) |
411 | 0 | { |
412 | 0 | MutexAutoLock lock(mLock); |
413 | 0 | CooperativeThreadPool::Yield(nullptr, lock); |
414 | 0 | } |
415 | | |
416 | | /* static */ bool |
417 | | SchedulerImpl::InterruptCallback(JSContext* aCx) |
418 | 0 | { |
419 | 0 | Scheduler::sScheduler->Interrupt(aCx); |
420 | 0 | return true; |
421 | 0 | } |
422 | | |
423 | | void |
424 | | SchedulerImpl::YieldFromJS(JSContext* aCx) |
425 | 0 | { |
426 | 0 | MutexAutoLock lock(mLock); |
427 | 0 | CooperativeThreadPool::Yield(&mSystemZoneResource, lock); |
428 | 0 | } |
429 | | |
430 | | /* static */ void |
431 | | SchedulerImpl::YieldCallback(JSContext* aCx) |
432 | 0 | { |
433 | 0 | Scheduler::sScheduler->YieldFromJS(aCx); |
434 | 0 | } |
435 | | |
436 | | void |
437 | | SchedulerImpl::Switcher() |
438 | 0 | { |
439 | 0 | // This thread switcher is extremely basic and only meant for testing. The |
440 | 0 | // goal is to switch as much as possible without regard for performance. |
441 | 0 |
|
442 | 0 | MutexAutoLock lock(mLock); |
443 | 0 | while (!mShuttingDown) { |
444 | 0 | CooperativeThreadPool::SelectedThread threadIndex = mThreadPool->CurrentThreadIndex(lock); |
445 | 0 | if (threadIndex.is<size_t>()) { |
446 | 0 | JSContext* cx = mContexts[threadIndex.as<size_t>()]; |
447 | 0 | if (cx) { |
448 | 0 | JS_RequestInterruptCallbackCanWait(cx); |
449 | 0 | } |
450 | 0 | } |
451 | 0 |
|
452 | 0 | mShutdownCondVar.Wait(TimeDuration::FromMicroseconds(50)); |
453 | 0 | } |
454 | 0 | } |
455 | | |
456 | | /* static */ void |
457 | | SchedulerImpl::SwitcherThread(void* aData) |
458 | 0 | { |
459 | 0 | static_cast<SchedulerImpl*>(aData)->Switcher(); |
460 | 0 | } |
461 | | |
462 | | void |
463 | | SchedulerImpl::Start() |
464 | 0 | { |
465 | 0 | MOZ_ASSERT(mNumSchedulerBlocks == 0); |
466 | 0 |
|
467 | 0 | NS_DispatchToMainThread(NS_NewRunnableFunction("Scheduler::Start", [this]() -> void { |
468 | 0 | // Let's pretend the runnable here isn't actually running. |
469 | 0 | MOZ_ASSERT(sUnlabeledEventRunning); |
470 | 0 | sUnlabeledEventRunning = false; |
471 | 0 | MOZ_ASSERT(sNumThreadsRunning == 1); |
472 | 0 | sNumThreadsRunning = 0; |
473 | 0 |
|
474 | 0 | mQueue->SetScheduler(this); |
475 | 0 |
|
476 | 0 | xpc::YieldCooperativeContext(); |
477 | 0 |
|
478 | 0 | mThreadPool = MakeUnique<CooperativeThreadPool>(mNumThreads, mLock, |
479 | 0 | mController); |
480 | 0 |
|
481 | 0 | PRThread* switcher = nullptr; |
482 | 0 | if (sPrefPreemption) { |
483 | 0 | switcher = PR_CreateThread(PR_USER_THREAD, |
484 | 0 | SwitcherThread, |
485 | 0 | this, |
486 | 0 | PR_PRIORITY_HIGH, |
487 | 0 | PR_GLOBAL_THREAD, |
488 | 0 | PR_JOINABLE_THREAD, |
489 | 0 | 0); |
490 | 0 | } |
491 | 0 |
|
492 | 0 | { |
493 | 0 | MutexAutoLock mutex(mLock); |
494 | 0 | while (!mShuttingDown) { |
495 | 0 | mShutdownCondVar.Wait(); |
496 | 0 | } |
497 | 0 | } |
498 | 0 |
|
499 | 0 | if (switcher) { |
500 | 0 | PR_JoinThread(switcher); |
501 | 0 | } |
502 | 0 |
|
503 | 0 | mThreadPool->Shutdown(); |
504 | 0 | mThreadPool = nullptr; |
505 | 0 |
|
506 | 0 | mQueue->SetScheduler(nullptr); |
507 | 0 |
|
508 | 0 | xpc::ResumeCooperativeContext(); |
509 | 0 |
|
510 | 0 | // Put things back to the way they were before we started scheduling. |
511 | 0 | MOZ_ASSERT(!sUnlabeledEventRunning); |
512 | 0 | sUnlabeledEventRunning = true; |
513 | 0 | MOZ_ASSERT(sNumThreadsRunning == 0); |
514 | 0 | sNumThreadsRunning = 1; |
515 | 0 |
|
516 | 0 | mShuttingDown = false; |
517 | 0 | nsTArray<nsCOMPtr<nsIRunnable>> callbacks = std::move(mShutdownCallbacks); |
518 | 0 | for (nsIRunnable* runnable : callbacks) { |
519 | 0 | runnable->Run(); |
520 | 0 | } |
521 | 0 | })); |
522 | 0 | } |
523 | | |
524 | | void |
525 | | SchedulerImpl::Stop(already_AddRefed<nsIRunnable> aStoppedCallback) |
526 | 0 | { |
527 | 0 | MOZ_ASSERT(mNumSchedulerBlocks > 0); |
528 | 0 |
|
529 | 0 | // Note that this may be called when mShuttingDown is already true. We still |
530 | 0 | // want to invoke the callback in that case. |
531 | 0 |
|
532 | 0 | MutexAutoLock lock(mLock); |
533 | 0 | mShuttingDown = true; |
534 | 0 | mShutdownCallbacks.AppendElement(aStoppedCallback); |
535 | 0 | mShutdownCondVar.Notify(); |
536 | 0 | } |
537 | | |
538 | | void |
539 | | SchedulerImpl::Shutdown() |
540 | 0 | { |
541 | 0 | MOZ_ASSERT(mNumSchedulerBlocks == 0); |
542 | 0 |
|
543 | 0 | MutexAutoLock lock(mLock); |
544 | 0 | mShuttingDown = true; |
545 | 0 |
|
546 | 0 | // Delete the SchedulerImpl once shutdown is complete. |
547 | 0 | mShutdownCallbacks.AppendElement(NS_NewRunnableFunction("SchedulerImpl::Shutdown", |
548 | 0 | [] { Scheduler::sScheduler = nullptr; })); |
549 | 0 |
|
550 | 0 | mShutdownCondVar.Notify(); |
551 | 0 | } |
552 | | |
553 | | bool |
554 | | SchedulerImpl::QueueResource::IsAvailable(const MutexAutoLock& aProofOfLock) |
555 | 0 | { |
556 | 0 | mScheduler->mLock.AssertCurrentThreadOwns(); |
557 | 0 |
|
558 | 0 | RefPtr<SchedulerEventQueue> queue = mScheduler->mQueue; |
559 | 0 | return queue->HasPendingEvent(aProofOfLock); |
560 | 0 | } |
561 | | |
562 | | bool |
563 | | SchedulerImpl::SystemZoneResource::IsAvailable(const MutexAutoLock& aProofOfLock) |
564 | 0 | { |
565 | 0 | mScheduler->mLock.AssertCurrentThreadOwns(); |
566 | 0 |
|
567 | 0 | // It doesn't matter which context we pick; we really just some main-thread |
568 | 0 | // JSContext. |
569 | 0 | JSContext* cx = mScheduler->mContexts[0]; |
570 | 0 | return js::SystemZoneAvailable(cx); |
571 | 0 | } |
572 | | |
573 | | MOZ_THREAD_LOCAL(Scheduler::EventLoopActivation*) Scheduler::EventLoopActivation::sTopActivation; |
574 | | |
575 | | /* static */ void |
576 | | Scheduler::EventLoopActivation::Init() |
577 | 3 | { |
578 | 3 | sTopActivation.infallibleInit(); |
579 | 3 | } |
580 | | |
581 | | Scheduler::EventLoopActivation::EventLoopActivation() |
582 | | : mPrev(sTopActivation.get()) |
583 | | , mProcessingEvent(false) |
584 | | , mIsLabeled(false) |
585 | | , mPriority(EventPriority::Normal) |
586 | 0 | { |
587 | 0 | sTopActivation.set(this); |
588 | 0 |
|
589 | 0 | if (mPrev && mPrev->mProcessingEvent) { |
590 | 0 | SchedulerImpl::EnterNestedEventLoop(*mPrev); |
591 | 0 | } |
592 | 0 | } |
593 | | |
594 | | Scheduler::EventLoopActivation::~EventLoopActivation() |
595 | 0 | { |
596 | 0 | if (mProcessingEvent) { |
597 | 0 | SchedulerImpl::FinishEvent(*this); |
598 | 0 | } |
599 | 0 |
|
600 | 0 | MOZ_ASSERT(sTopActivation.get() == this); |
601 | 0 | sTopActivation.set(mPrev); |
602 | 0 |
|
603 | 0 | if (mPrev && mPrev->mProcessingEvent) { |
604 | 0 | SchedulerImpl::ExitNestedEventLoop(*mPrev); |
605 | 0 | } |
606 | 0 | } |
607 | | |
608 | | /* static */ void |
609 | | SchedulerImpl::StartEvent(Scheduler::EventLoopActivation& aActivation) |
610 | 0 | { |
611 | 0 | MOZ_ASSERT(!sUnlabeledEventRunning); |
612 | 0 | if (aActivation.IsLabeled()) { |
613 | 0 | SchedulerGroup::SetValidatingAccess(SchedulerGroup::StartValidation); |
614 | 0 | aActivation.EventGroupsAffected().SetIsRunning(true); |
615 | 0 | } else { |
616 | 0 | sUnlabeledEventRunning = true; |
617 | 0 | } |
618 | 0 | sNumThreadsRunning++; |
619 | 0 | } |
620 | | |
621 | | /* static */ void |
622 | | SchedulerImpl::FinishEvent(Scheduler::EventLoopActivation& aActivation) |
623 | 0 | { |
624 | 0 | if (aActivation.IsLabeled()) { |
625 | 0 | aActivation.EventGroupsAffected().SetIsRunning(false); |
626 | 0 | SchedulerGroup::SetValidatingAccess(SchedulerGroup::EndValidation); |
627 | 0 | } else { |
628 | 0 | MOZ_ASSERT(sUnlabeledEventRunning); |
629 | 0 | sUnlabeledEventRunning = false; |
630 | 0 | } |
631 | 0 |
|
632 | 0 | MOZ_ASSERT(sNumThreadsRunning > 0); |
633 | 0 | sNumThreadsRunning--; |
634 | 0 | } |
635 | | |
636 | | // When we enter a nested event loop, we act as if the outer event loop's event |
637 | | // finished. When we exit the nested event loop, we "resume" the outer event |
638 | | // loop's event. |
639 | | /* static */ void |
640 | | SchedulerImpl::EnterNestedEventLoop(Scheduler::EventLoopActivation& aOuterActivation) |
641 | 0 | { |
642 | 0 | FinishEvent(aOuterActivation); |
643 | 0 | } |
644 | | |
645 | | /* static */ void |
646 | | SchedulerImpl::ExitNestedEventLoop(Scheduler::EventLoopActivation& aOuterActivation) |
647 | 0 | { |
648 | 0 | StartEvent(aOuterActivation); |
649 | 0 | } |
650 | | |
651 | | void |
652 | | Scheduler::EventLoopActivation::SetEvent(nsIRunnable* aEvent, |
653 | | EventPriority aPriority) |
654 | 0 | { |
655 | 0 | if (nsCOMPtr<nsILabelableRunnable> labelable = do_QueryInterface(aEvent)) { |
656 | 0 | if (labelable->GetAffectedSchedulerGroups(mEventGroups)) { |
657 | 0 | mIsLabeled = true; |
658 | 0 | } |
659 | 0 | } |
660 | 0 |
|
661 | 0 | mPriority = aPriority; |
662 | 0 | mProcessingEvent = aEvent != nullptr; |
663 | 0 |
|
664 | 0 | if (aEvent) { |
665 | 0 | SchedulerImpl::StartEvent(*this); |
666 | 0 | } |
667 | 0 | } |
668 | | |
669 | | void |
670 | | SchedulerImpl::ThreadController::OnStartThread(size_t aIndex, const nsACString& aName, void* aStackTop) |
671 | 0 | { |
672 | 0 | using mozilla::ipc::BackgroundChild; |
673 | 0 |
|
674 | 0 | // Causes GetCurrentVirtualThread() to return mMainVirtual and NS_IsMainThread() |
675 | 0 | // to return true. |
676 | 0 | NS_SetMainThread(mMainVirtual); |
677 | 0 |
|
678 | 0 | // This will initialize the thread's mVirtualThread to mMainVirtual since |
679 | 0 | // GetCurrentVirtualThread() now returns mMainVirtual. |
680 | 0 | nsThreadManager::get().CreateCurrentThread(mMainQueue, nsThread::MAIN_THREAD); |
681 | 0 |
|
682 | 0 | PROFILER_REGISTER_THREAD(aName.BeginReading()); |
683 | 0 |
|
684 | 0 | mOldMainLoop = MessageLoop::current(); |
685 | 0 |
|
686 | 0 | MessageLoop::set_current(mMainLoop); |
687 | 0 |
|
688 | 0 | xpc::CreateCooperativeContext(); |
689 | 0 |
|
690 | 0 | JSContext* cx = dom::danger::GetJSContext(); |
691 | 0 | mScheduler->SetJSContext(aIndex, cx); |
692 | 0 | if (sPrefPreemption) { |
693 | 0 | JS_AddInterruptCallback(cx, SchedulerImpl::InterruptCallback); |
694 | 0 | } |
695 | 0 | Servo_InitializeCooperativeThread(); |
696 | 0 | } |
697 | | |
698 | | void |
699 | | SchedulerImpl::ThreadController::OnStopThread(size_t aIndex) |
700 | 0 | { |
701 | 0 | xpc::DestroyCooperativeContext(); |
702 | 0 |
|
703 | 0 | NS_UnsetMainThread(); |
704 | 0 | MessageLoop::set_current(mOldMainLoop); |
705 | 0 |
|
706 | 0 | RefPtr<nsThread> self = static_cast<nsThread*>(NS_GetCurrentThread()); |
707 | 0 | nsThreadManager::get().UnregisterCurrentThread(*self); |
708 | 0 |
|
709 | 0 | PROFILER_UNREGISTER_THREAD(); |
710 | 0 | } |
711 | | |
712 | | void |
713 | | SchedulerImpl::ThreadController::OnSuspendThread(size_t aIndex) |
714 | 0 | { |
715 | 0 | xpc::YieldCooperativeContext(); |
716 | 0 | } |
717 | | |
718 | | void |
719 | | SchedulerImpl::ThreadController::OnResumeThread(size_t aIndex) |
720 | 0 | { |
721 | 0 | xpc::ResumeCooperativeContext(); |
722 | 0 | } |
723 | | |
724 | | void |
725 | | SchedulerImpl::Yield() |
726 | 0 | { |
727 | 0 | MutexAutoLock lock(mLock); |
728 | 0 | CooperativeThreadPool::Yield(nullptr, lock); |
729 | 0 | } |
730 | | |
731 | | void |
732 | | SchedulerImpl::BlockThreadedExecution(nsIBlockThreadedExecutionCallback* aCallback) |
733 | 0 | { |
734 | 0 | if (mNumSchedulerBlocks++ == 0 || mShuttingDown) { |
735 | 0 | Stop(NewRunnableMethod("BlockThreadedExecution", aCallback, |
736 | 0 | &nsIBlockThreadedExecutionCallback::Callback)); |
737 | 0 | } else { |
738 | 0 | // The scheduler is already blocked. |
739 | 0 | nsCOMPtr<nsIBlockThreadedExecutionCallback> kungFuDeathGrip(aCallback); |
740 | 0 | aCallback->Callback(); |
741 | 0 | } |
742 | 0 | } |
743 | | |
744 | | void |
745 | | SchedulerImpl::UnblockThreadedExecution() |
746 | 0 | { |
747 | 0 | if (--mNumSchedulerBlocks == 0) { |
748 | 0 | Start(); |
749 | 0 | } |
750 | 0 | } |
751 | | |
752 | | /* static */ already_AddRefed<nsThread> |
753 | | Scheduler::Init(nsIIdlePeriod* aIdlePeriod) |
754 | 0 | { |
755 | 0 | MOZ_ASSERT(!sScheduler); |
756 | 0 |
|
757 | 0 | RefPtr<SchedulerEventQueue> queue; |
758 | 0 | RefPtr<nsThread> mainThread; |
759 | 0 | if (Scheduler::UseMultipleQueues()) { |
760 | 0 | mainThread = CreateMainThread<SchedulerEventQueue, LabeledEventQueue>(aIdlePeriod, getter_AddRefs(queue)); |
761 | 0 | } else { |
762 | 0 | mainThread = CreateMainThread<SchedulerEventQueue, EventQueue>(aIdlePeriod, getter_AddRefs(queue)); |
763 | 0 | } |
764 | 0 |
|
765 | 0 | sScheduler = MakeUnique<SchedulerImpl>(queue); |
766 | 0 | return mainThread.forget(); |
767 | 0 | } |
768 | | |
769 | | /* static */ void |
770 | | Scheduler::Start() |
771 | 0 | { |
772 | 0 | sScheduler->Start(); |
773 | 0 | } |
774 | | |
775 | | /* static */ void |
776 | | Scheduler::Shutdown() |
777 | 0 | { |
778 | 0 | if (sScheduler) { |
779 | 0 | sScheduler->Shutdown(); |
780 | 0 | } |
781 | 0 | } |
782 | | |
783 | | /* static */ nsPrintfCString |
784 | | Scheduler::GetPrefs() |
785 | 0 | { |
786 | 0 | MOZ_ASSERT(XRE_IsParentProcess()); |
787 | 0 | nsPrintfCString result("%d%d%d%d,%d", |
788 | 0 | false, // XXX The scheduler is always disabled. |
789 | 0 | Preferences::GetBool("dom.ipc.scheduler.chaoticScheduling", |
790 | 0 | SchedulerImpl::sPrefChaoticScheduling), |
791 | 0 | Preferences::GetBool("dom.ipc.scheduler.preemption", |
792 | 0 | SchedulerImpl::sPrefPreemption), |
793 | 0 | Preferences::GetBool("dom.ipc.scheduler.useMultipleQueues", |
794 | 0 | SchedulerImpl::sPrefUseMultipleQueues), |
795 | 0 | Preferences::GetInt("dom.ipc.scheduler.threadCount", |
796 | 0 | SchedulerImpl::sPrefThreadCount)); |
797 | 0 |
|
798 | 0 | return result; |
799 | 0 | } |
800 | | |
801 | | /* static */ void |
802 | | Scheduler::SetPrefs(const char* aPrefs) |
803 | 0 | { |
804 | 0 | MOZ_ASSERT(XRE_IsContentProcess()); |
805 | 0 |
|
806 | 0 | // If the prefs weren't sent to this process, use the default values. |
807 | 0 | if (!aPrefs) { |
808 | 0 | return; |
809 | 0 | } |
810 | 0 | |
811 | 0 | // If the pref string appears truncated, use the default values. |
812 | 0 | if (strlen(aPrefs) < 6) { |
813 | 0 | return; |
814 | 0 | } |
815 | 0 | |
816 | 0 | SchedulerImpl::sPrefChaoticScheduling = aPrefs[1] == '1'; |
817 | 0 | SchedulerImpl::sPrefPreemption = aPrefs[2] == '1'; |
818 | 0 | SchedulerImpl::sPrefUseMultipleQueues = aPrefs[3] == '1'; |
819 | 0 | MOZ_ASSERT(aPrefs[4] == ','); |
820 | 0 | SchedulerImpl::sPrefThreadCount = atoi(aPrefs + 5); |
821 | 0 | } |
822 | | |
823 | | /* static */ bool |
824 | | Scheduler::IsSchedulerEnabled() |
825 | 0 | { |
826 | 0 | // XXX We never enable the scheduler because it will crash immediately. |
827 | 0 | return false; |
828 | 0 | } |
829 | | |
830 | | /* static */ bool |
831 | | Scheduler::UseMultipleQueues() |
832 | 0 | { |
833 | 0 | return SchedulerImpl::sPrefUseMultipleQueues; |
834 | 0 | } |
835 | | |
836 | | /* static */ bool |
837 | | Scheduler::IsCooperativeThread() |
838 | 0 | { |
839 | 0 | return CooperativeThreadPool::IsCooperativeThread(); |
840 | 0 | } |
841 | | |
842 | | /* static */ void |
843 | | Scheduler::Yield() |
844 | 0 | { |
845 | 0 | sScheduler->Yield(); |
846 | 0 | } |
847 | | |
848 | | /* static */ bool |
849 | | Scheduler::UnlabeledEventRunning() |
850 | 0 | { |
851 | 0 | return SchedulerImpl::UnlabeledEventRunning(); |
852 | 0 | } |
853 | | |
854 | | /* static */ bool |
855 | | Scheduler::AnyEventRunning() |
856 | 0 | { |
857 | 0 | return SchedulerImpl::AnyEventRunning(); |
858 | 0 | } |
859 | | |
860 | | /* static */ void |
861 | | Scheduler::BlockThreadedExecution(nsIBlockThreadedExecutionCallback* aCallback) |
862 | 0 | { |
863 | 0 | if (!sScheduler) { |
864 | 0 | nsCOMPtr<nsIBlockThreadedExecutionCallback> kungFuDeathGrip(aCallback); |
865 | 0 | aCallback->Callback(); |
866 | 0 | return; |
867 | 0 | } |
868 | 0 | |
869 | 0 | sScheduler->BlockThreadedExecution(aCallback); |
870 | 0 | } |
871 | | |
872 | | /* static */ void |
873 | | Scheduler::UnblockThreadedExecution() |
874 | 0 | { |
875 | 0 | if (!sScheduler) { |
876 | 0 | return; |
877 | 0 | } |
878 | 0 | |
879 | 0 | sScheduler->UnblockThreadedExecution(); |
880 | 0 | } |