/src/mozilla-central/dom/workers/WorkerThread.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #include "WorkerThread.h" |
8 | | |
9 | | #include "mozilla/Assertions.h" |
10 | | #include "mozilla/ipc/BackgroundChild.h" |
11 | | #include "EventQueue.h" |
12 | | #include "mozilla/ThreadEventQueue.h" |
13 | | #include "mozilla/PerformanceCounter.h" |
14 | | #include "mozilla/StaticPrefs.h" |
15 | | #include "nsIThreadInternal.h" |
16 | | #include "WorkerPrivate.h" |
17 | | #include "WorkerRunnable.h" |
18 | | |
19 | | #ifdef DEBUG |
20 | | #include "nsThreadManager.h" |
21 | | #endif |
22 | | |
23 | | namespace mozilla { |
24 | | |
25 | | using namespace ipc; |
26 | | |
27 | | namespace dom { |
28 | | |
29 | | namespace { |
30 | | |
31 | | // The C stack size. We use the same stack size on all platforms for |
32 | | // consistency. |
33 | | // |
34 | | // Note: Our typical equation of 256 machine words works out to 2MB on 64-bit |
35 | | // platforms. Since that works out to the size of a VM huge page, that can |
36 | | // sometimes lead to an OS allocating an entire huge page for the stack at once. |
37 | | // To avoid this, we subtract the size of 2 pages, to be safe. |
38 | | const uint32_t kWorkerStackSize = 256 * sizeof(size_t) * 1024 - 8192; |
39 | | |
40 | | } // namespace |
41 | | |
42 | | WorkerThreadFriendKey::WorkerThreadFriendKey() |
43 | 0 | { |
44 | 0 | MOZ_COUNT_CTOR(WorkerThreadFriendKey); |
45 | 0 | } |
46 | | |
47 | | WorkerThreadFriendKey::~WorkerThreadFriendKey() |
48 | 0 | { |
49 | 0 | MOZ_COUNT_DTOR(WorkerThreadFriendKey); |
50 | 0 | } |
51 | | |
52 | | class WorkerThread::Observer final |
53 | | : public nsIThreadObserver |
54 | | { |
55 | | WorkerPrivate* mWorkerPrivate; |
56 | | |
57 | | public: |
58 | | explicit Observer(WorkerPrivate* aWorkerPrivate) |
59 | | : mWorkerPrivate(aWorkerPrivate) |
60 | 0 | { |
61 | 0 | MOZ_ASSERT(aWorkerPrivate); |
62 | 0 | aWorkerPrivate->AssertIsOnWorkerThread(); |
63 | 0 | } |
64 | | |
65 | | NS_DECL_THREADSAFE_ISUPPORTS |
66 | | |
67 | | private: |
68 | | ~Observer() |
69 | 0 | { |
70 | 0 | mWorkerPrivate->AssertIsOnWorkerThread(); |
71 | 0 | } |
72 | | |
73 | | NS_DECL_NSITHREADOBSERVER |
74 | | }; |
75 | | |
76 | | WorkerThread::WorkerThread() |
77 | | : nsThread(MakeNotNull<ThreadEventQueue<mozilla::EventQueue>*>( |
78 | | MakeUnique<mozilla::EventQueue>()), |
79 | | nsThread::NOT_MAIN_THREAD, |
80 | | kWorkerStackSize) |
81 | | , mLock("WorkerThread::mLock") |
82 | | , mWorkerPrivateCondVar(mLock, "WorkerThread::mWorkerPrivateCondVar") |
83 | | , mWorkerPrivate(nullptr) |
84 | | , mOtherThreadsDispatchingViaEventTarget(0) |
85 | | #ifdef DEBUG |
86 | | , mAcceptingNonWorkerRunnables(true) |
87 | | #endif |
88 | 0 | { |
89 | 0 | } |
90 | | |
91 | | WorkerThread::~WorkerThread() |
92 | 0 | { |
93 | 0 | MOZ_ASSERT(!mWorkerPrivate); |
94 | 0 | MOZ_ASSERT(!mOtherThreadsDispatchingViaEventTarget); |
95 | 0 | MOZ_ASSERT(mAcceptingNonWorkerRunnables); |
96 | 0 | } |
97 | | |
98 | | // static |
99 | | already_AddRefed<WorkerThread> |
100 | | WorkerThread::Create(const WorkerThreadFriendKey& /* aKey */) |
101 | 0 | { |
102 | 0 | RefPtr<WorkerThread> thread = new WorkerThread(); |
103 | 0 | if (NS_FAILED(thread->Init(NS_LITERAL_CSTRING("DOM Worker")))) { |
104 | 0 | NS_WARNING("Failed to create new thread!"); |
105 | 0 | return nullptr; |
106 | 0 | } |
107 | 0 |
|
108 | 0 | return thread.forget(); |
109 | 0 | } |
110 | | |
111 | | void |
112 | | WorkerThread::SetWorker(const WorkerThreadFriendKey& /* aKey */, |
113 | | WorkerPrivate* aWorkerPrivate) |
114 | 0 | { |
115 | 0 | MOZ_ASSERT(PR_GetCurrentThread() == mThread); |
116 | 0 |
|
117 | 0 | if (aWorkerPrivate) { |
118 | 0 | { |
119 | 0 | MutexAutoLock lock(mLock); |
120 | 0 |
|
121 | 0 | MOZ_ASSERT(!mWorkerPrivate); |
122 | 0 | MOZ_ASSERT(mAcceptingNonWorkerRunnables); |
123 | 0 |
|
124 | 0 | mWorkerPrivate = aWorkerPrivate; |
125 | | #ifdef DEBUG |
126 | | mAcceptingNonWorkerRunnables = false; |
127 | | #endif |
128 | | } |
129 | 0 |
|
130 | 0 | mObserver = new Observer(aWorkerPrivate); |
131 | 0 | MOZ_ALWAYS_SUCCEEDS(AddObserver(mObserver)); |
132 | 0 | } else { |
133 | 0 | MOZ_ALWAYS_SUCCEEDS(RemoveObserver(mObserver)); |
134 | 0 | mObserver = nullptr; |
135 | 0 |
|
136 | 0 | { |
137 | 0 | MutexAutoLock lock(mLock); |
138 | 0 |
|
139 | 0 | MOZ_ASSERT(mWorkerPrivate); |
140 | 0 | MOZ_ASSERT(!mAcceptingNonWorkerRunnables); |
141 | 0 | MOZ_ASSERT(!mOtherThreadsDispatchingViaEventTarget, |
142 | 0 | "XPCOM Dispatch hapenning at the same time our thread is " |
143 | 0 | "being unset! This should not be possible!"); |
144 | 0 |
|
145 | 0 | while (mOtherThreadsDispatchingViaEventTarget) { |
146 | 0 | mWorkerPrivateCondVar.Wait(); |
147 | 0 | } |
148 | 0 |
|
149 | | #ifdef DEBUG |
150 | | mAcceptingNonWorkerRunnables = true; |
151 | | #endif |
152 | | mWorkerPrivate = nullptr; |
153 | 0 | } |
154 | 0 | } |
155 | 0 | } |
156 | | |
157 | | void |
158 | | WorkerThread::IncrementDispatchCounter() |
159 | 0 | { |
160 | 0 | if (!mozilla::StaticPrefs::dom_performance_enable_scheduler_timing()) { |
161 | 0 | return; |
162 | 0 | } |
163 | 0 | MutexAutoLock lock(mLock); |
164 | 0 | if (mWorkerPrivate) { |
165 | 0 | PerformanceCounter* performanceCounter = mWorkerPrivate->GetPerformanceCounter(); |
166 | 0 | if (performanceCounter) { |
167 | 0 | performanceCounter->IncrementDispatchCounter(DispatchCategory::Worker); |
168 | 0 | } |
169 | 0 | } |
170 | 0 | } |
171 | | |
172 | | nsresult |
173 | | WorkerThread::DispatchPrimaryRunnable(const WorkerThreadFriendKey& /* aKey */, |
174 | | already_AddRefed<nsIRunnable> aRunnable) |
175 | 0 | { |
176 | 0 | nsCOMPtr<nsIRunnable> runnable(aRunnable); |
177 | 0 |
|
178 | | #ifdef DEBUG |
179 | | MOZ_ASSERT(PR_GetCurrentThread() != mThread); |
180 | | MOZ_ASSERT(runnable); |
181 | | { |
182 | | MutexAutoLock lock(mLock); |
183 | | |
184 | | MOZ_ASSERT(!mWorkerPrivate); |
185 | | MOZ_ASSERT(mAcceptingNonWorkerRunnables); |
186 | | } |
187 | | #endif |
188 | |
|
189 | 0 | nsresult rv = nsThread::Dispatch(runnable.forget(), NS_DISPATCH_NORMAL); |
190 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
191 | 0 | return rv; |
192 | 0 | } |
193 | 0 | |
194 | 0 | return NS_OK; |
195 | 0 | } |
196 | | |
197 | | nsresult |
198 | | WorkerThread::DispatchAnyThread(const WorkerThreadFriendKey& /* aKey */, |
199 | | already_AddRefed<WorkerRunnable> aWorkerRunnable) |
200 | 0 | { |
201 | 0 | // May be called on any thread! |
202 | 0 |
|
203 | | #ifdef DEBUG |
204 | | { |
205 | | const bool onWorkerThread = PR_GetCurrentThread() == mThread; |
206 | | { |
207 | | MutexAutoLock lock(mLock); |
208 | | |
209 | | MOZ_ASSERT(mWorkerPrivate); |
210 | | MOZ_ASSERT(!mAcceptingNonWorkerRunnables); |
211 | | |
212 | | if (onWorkerThread) { |
213 | | mWorkerPrivate->AssertIsOnWorkerThread(); |
214 | | } |
215 | | } |
216 | | } |
217 | | #endif |
218 | |
|
219 | 0 | // Increment the PerformanceCounter dispatch count |
220 | 0 | // to keep track of how many runnables are executed. |
221 | 0 | IncrementDispatchCounter(); |
222 | 0 | nsCOMPtr<nsIRunnable> runnable(aWorkerRunnable); |
223 | 0 |
|
224 | 0 | nsresult rv = nsThread::Dispatch(runnable.forget(), NS_DISPATCH_NORMAL); |
225 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
226 | 0 | return rv; |
227 | 0 | } |
228 | 0 | |
229 | 0 | // We don't need to notify the worker's condition variable here because we're |
230 | 0 | // being called from worker-controlled code and it will make sure to wake up |
231 | 0 | // the worker thread if needed. |
232 | 0 | |
233 | 0 | return NS_OK; |
234 | 0 | } |
235 | | |
236 | | NS_IMETHODIMP |
237 | | WorkerThread::DispatchFromScript(nsIRunnable* aRunnable, uint32_t aFlags) |
238 | 0 | { |
239 | 0 | nsCOMPtr<nsIRunnable> runnable(aRunnable); |
240 | 0 | return Dispatch(runnable.forget(), aFlags); |
241 | 0 | } |
242 | | |
243 | | NS_IMETHODIMP |
244 | | WorkerThread::Dispatch(already_AddRefed<nsIRunnable> aRunnable, uint32_t aFlags) |
245 | 0 | { |
246 | 0 | // May be called on any thread! |
247 | 0 | nsCOMPtr<nsIRunnable> runnable(aRunnable); // in case we exit early |
248 | 0 |
|
249 | 0 | // Workers only support asynchronous dispatch. |
250 | 0 | if (NS_WARN_IF(aFlags != NS_DISPATCH_NORMAL)) { |
251 | 0 | return NS_ERROR_UNEXPECTED; |
252 | 0 | } |
253 | 0 | |
254 | 0 | const bool onWorkerThread = PR_GetCurrentThread() == mThread; |
255 | 0 |
|
256 | 0 |
|
257 | | #ifdef DEBUG |
258 | | if (runnable && !onWorkerThread) { |
259 | | nsCOMPtr<nsICancelableRunnable> cancelable = do_QueryInterface(runnable); |
260 | | |
261 | | { |
262 | | MutexAutoLock lock(mLock); |
263 | | |
264 | | // Only enforce cancelable runnables after we've started the worker loop. |
265 | | if (!mAcceptingNonWorkerRunnables) { |
266 | | MOZ_ASSERT(cancelable, |
267 | | "Only nsICancelableRunnable may be dispatched to a worker!"); |
268 | | } |
269 | | } |
270 | | } |
271 | | #endif |
272 | |
|
273 | 0 | WorkerPrivate* workerPrivate = nullptr; |
274 | 0 | if (onWorkerThread) { |
275 | 0 | // No need to lock here because it is only modified on this thread. |
276 | 0 | MOZ_ASSERT(mWorkerPrivate); |
277 | 0 | mWorkerPrivate->AssertIsOnWorkerThread(); |
278 | 0 |
|
279 | 0 | workerPrivate = mWorkerPrivate; |
280 | 0 | } else { |
281 | 0 | MutexAutoLock lock(mLock); |
282 | 0 |
|
283 | 0 | MOZ_ASSERT(mOtherThreadsDispatchingViaEventTarget < UINT32_MAX); |
284 | 0 |
|
285 | 0 | if (mWorkerPrivate) { |
286 | 0 | workerPrivate = mWorkerPrivate; |
287 | 0 |
|
288 | 0 | // Incrementing this counter will make the worker thread sleep if it |
289 | 0 | // somehow tries to unset mWorkerPrivate while we're using it. |
290 | 0 | mOtherThreadsDispatchingViaEventTarget++; |
291 | 0 | } |
292 | 0 | } |
293 | 0 |
|
294 | 0 | // Increment the PerformanceCounter dispatch count |
295 | 0 | // to keep track of how many runnables are executed. |
296 | 0 | IncrementDispatchCounter(); |
297 | 0 | nsresult rv; |
298 | 0 | if (runnable && onWorkerThread) { |
299 | 0 | RefPtr<WorkerRunnable> workerRunnable = workerPrivate->MaybeWrapAsWorkerRunnable(runnable.forget()); |
300 | 0 | rv = nsThread::Dispatch(workerRunnable.forget(), NS_DISPATCH_NORMAL); |
301 | 0 | } else { |
302 | 0 | rv = nsThread::Dispatch(runnable.forget(), NS_DISPATCH_NORMAL); |
303 | 0 | } |
304 | 0 |
|
305 | 0 | if (!onWorkerThread && workerPrivate) { |
306 | 0 | // We need to wake the worker thread if we're not already on the right |
307 | 0 | // thread and the dispatch succeeded. |
308 | 0 | if (NS_SUCCEEDED(rv)) { |
309 | 0 | MutexAutoLock workerLock(workerPrivate->mMutex); |
310 | 0 |
|
311 | 0 | workerPrivate->mCondVar.Notify(); |
312 | 0 | } |
313 | 0 |
|
314 | 0 | // Now unset our waiting flag. |
315 | 0 | { |
316 | 0 | MutexAutoLock lock(mLock); |
317 | 0 |
|
318 | 0 | MOZ_ASSERT(mOtherThreadsDispatchingViaEventTarget); |
319 | 0 |
|
320 | 0 | if (!--mOtherThreadsDispatchingViaEventTarget) { |
321 | 0 | mWorkerPrivateCondVar.Notify(); |
322 | 0 | } |
323 | 0 | } |
324 | 0 | } |
325 | 0 |
|
326 | 0 | if (NS_WARN_IF(NS_FAILED(rv))) { |
327 | 0 | return rv; |
328 | 0 | } |
329 | 0 | |
330 | 0 | return NS_OK; |
331 | 0 | } |
332 | | |
333 | | NS_IMETHODIMP |
334 | | WorkerThread::DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t) |
335 | 0 | { |
336 | 0 | return NS_ERROR_NOT_IMPLEMENTED; |
337 | 0 | } |
338 | | |
339 | | uint32_t |
340 | | WorkerThread::RecursionDepth(const WorkerThreadFriendKey& /* aKey */) const |
341 | 0 | { |
342 | 0 | MOZ_ASSERT(PR_GetCurrentThread() == mThread); |
343 | 0 |
|
344 | 0 | return mNestedEventLoopDepth; |
345 | 0 | } |
346 | | |
347 | | PerformanceCounter* |
348 | | WorkerThread::GetPerformanceCounter(nsIRunnable* aEvent) |
349 | 0 | { |
350 | 0 | if (mWorkerPrivate) { |
351 | 0 | return mWorkerPrivate->GetPerformanceCounter(); |
352 | 0 | } |
353 | 0 | return nullptr; |
354 | 0 | } |
355 | | |
356 | | NS_IMPL_ISUPPORTS(WorkerThread::Observer, nsIThreadObserver) |
357 | | |
358 | | NS_IMETHODIMP |
359 | | WorkerThread::Observer::OnDispatchedEvent() |
360 | 0 | { |
361 | 0 | MOZ_CRASH("OnDispatchedEvent() should never be called!"); |
362 | 0 | } |
363 | | |
364 | | NS_IMETHODIMP |
365 | | WorkerThread::Observer::OnProcessNextEvent(nsIThreadInternal* /* aThread */, |
366 | | bool aMayWait) |
367 | 0 | { |
368 | 0 | mWorkerPrivate->AssertIsOnWorkerThread(); |
369 | 0 |
|
370 | 0 | // If the PBackground child is not created yet, then we must permit |
371 | 0 | // blocking event processing to support |
372 | 0 | // BackgroundChild::GetOrCreateCreateForCurrentThread(). If this occurs |
373 | 0 | // then we are spinning on the event queue at the start of |
374 | 0 | // PrimaryWorkerRunnable::Run() and don't want to process the event in |
375 | 0 | // mWorkerPrivate yet. |
376 | 0 | if (aMayWait) { |
377 | 0 | MOZ_ASSERT(CycleCollectedJSContext::Get()->RecursionDepth() == 2); |
378 | 0 | MOZ_ASSERT(!BackgroundChild::GetForCurrentThread()); |
379 | 0 | return NS_OK; |
380 | 0 | } |
381 | 0 |
|
382 | 0 | mWorkerPrivate->OnProcessNextEvent(); |
383 | 0 | return NS_OK; |
384 | 0 | } |
385 | | |
386 | | NS_IMETHODIMP |
387 | | WorkerThread::Observer::AfterProcessNextEvent(nsIThreadInternal* /* aThread */, |
388 | | bool /* aEventWasProcessed */) |
389 | 0 | { |
390 | 0 | mWorkerPrivate->AssertIsOnWorkerThread(); |
391 | 0 |
|
392 | 0 | mWorkerPrivate->AfterProcessNextEvent(); |
393 | 0 | return NS_OK; |
394 | 0 | } |
395 | | |
396 | | } // namespace dom |
397 | | } // namespace mozilla |