/work/obj-fuzz/dist/include/mozilla/TaskDispatcher.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #if !defined(TaskDispatcher_h_) |
8 | | #define TaskDispatcher_h_ |
9 | | |
10 | | #include "mozilla/AbstractThread.h" |
11 | | #include "mozilla/Maybe.h" |
12 | | #include "mozilla/UniquePtr.h" |
13 | | #include "mozilla/Unused.h" |
14 | | |
15 | | #include "nsISupportsImpl.h" |
16 | | #include "nsTArray.h" |
17 | | #include "nsThreadUtils.h" |
18 | | |
19 | | #include <queue> |
20 | | |
21 | | namespace mozilla { |
22 | | |
23 | | /* |
24 | | * A classic approach to cross-thread communication is to dispatch asynchronous |
25 | | * runnables to perform updates on other threads. This generally works well, but |
26 | | * there are sometimes reasons why we might want to delay the actual dispatch of |
27 | | * these tasks until a specified moment. At present, this is primarily useful to |
28 | | * ensure that mirrored state gets updated atomically - but there may be other |
29 | | * applications as well. |
30 | | * |
31 | | * TaskDispatcher is a general abstract class that accepts tasks and dispatches |
32 | | * them at some later point. These groups of tasks are per-target-thread, and |
33 | | * contain separate queues for several kinds of tasks (see comments below). - "state change tasks" (which |
34 | | * run first, and are intended to be used to update the value held by mirrors), |
35 | | * and regular tasks, which are other arbitrary operations that the are gated |
36 | | * to run after all the state changes have completed. |
37 | | */ |
38 | | class TaskDispatcher |
39 | | { |
40 | | public: |
41 | 0 | TaskDispatcher() {} |
42 | 0 | virtual ~TaskDispatcher() {} |
43 | | |
44 | | // Direct tasks are run directly (rather than dispatched asynchronously) when |
45 | | // the tail dispatcher fires. A direct task may cause other tasks to be added |
46 | | // to the tail dispatcher. |
47 | | virtual void AddDirectTask(already_AddRefed<nsIRunnable> aRunnable) = 0; |
48 | | |
49 | | // State change tasks are dispatched asynchronously always run before regular |
50 | | // tasks. They are intended to be used to update the value held by mirrors |
51 | | // before any other dispatched tasks are run on the target thread. |
52 | | virtual void AddStateChangeTask(AbstractThread* aThread, |
53 | | already_AddRefed<nsIRunnable> aRunnable) = 0; |
54 | | |
55 | | // Regular tasks are dispatched asynchronously, and run after state change |
56 | | // tasks. |
57 | | virtual nsresult AddTask(AbstractThread* aThread, |
58 | | already_AddRefed<nsIRunnable> aRunnable) = 0; |
59 | | |
60 | | virtual nsresult DispatchTasksFor(AbstractThread* aThread) = 0; |
61 | | virtual bool HasTasksFor(AbstractThread* aThread) = 0; |
62 | | virtual void DrainDirectTasks() = 0; |
63 | | }; |
64 | | |
65 | | /* |
66 | | * AutoTaskDispatcher is a stack-scoped TaskDispatcher implementation that fires |
67 | | * its queued tasks when it is popped off the stack. |
68 | | */ |
69 | | class AutoTaskDispatcher : public TaskDispatcher |
70 | | { |
71 | | public: |
72 | | explicit AutoTaskDispatcher(bool aIsTailDispatcher = false) |
73 | | : mIsTailDispatcher(aIsTailDispatcher) |
74 | 0 | {} |
75 | | |
76 | | ~AutoTaskDispatcher() |
77 | 0 | { |
78 | 0 | // Given that direct tasks may trigger other code that uses the tail |
79 | 0 | // dispatcher, it's better to avoid processing them in the tail dispatcher's |
80 | 0 | // destructor. So we require TailDispatchers to manually invoke |
81 | 0 | // DrainDirectTasks before the AutoTaskDispatcher gets destroyed. In truth, |
82 | 0 | // this is only necessary in the case where this AutoTaskDispatcher can be |
83 | 0 | // accessed by the direct tasks it dispatches (true for TailDispatchers, but |
84 | 0 | // potentially not true for other hypothetical AutoTaskDispatchers). Feel |
85 | 0 | // free to loosen this restriction to apply only to mIsTailDispatcher if a |
86 | 0 | // use-case requires it. |
87 | 0 | MOZ_ASSERT(!HaveDirectTasks()); |
88 | 0 |
|
89 | 0 | for (size_t i = 0; i < mTaskGroups.Length(); ++i) { |
90 | 0 | DispatchTaskGroup(std::move(mTaskGroups[i])); |
91 | 0 | } |
92 | 0 | } |
93 | | |
94 | | bool HaveDirectTasks() const |
95 | 0 | { |
96 | 0 | return mDirectTasks.isSome() && !mDirectTasks->empty(); |
97 | 0 | } |
98 | | |
99 | | void DrainDirectTasks() override |
100 | 0 | { |
101 | 0 | while (HaveDirectTasks()) { |
102 | 0 | nsCOMPtr<nsIRunnable> r = mDirectTasks->front(); |
103 | 0 | mDirectTasks->pop(); |
104 | 0 | r->Run(); |
105 | 0 | } |
106 | 0 | } |
107 | | |
108 | | void AddDirectTask(already_AddRefed<nsIRunnable> aRunnable) override |
109 | 0 | { |
110 | 0 | if (mDirectTasks.isNothing()) { |
111 | 0 | mDirectTasks.emplace(); |
112 | 0 | } |
113 | 0 | mDirectTasks->push(std::move(aRunnable)); |
114 | 0 | } |
115 | | |
116 | | void AddStateChangeTask(AbstractThread* aThread, |
117 | | already_AddRefed<nsIRunnable> aRunnable) override |
118 | 0 | { |
119 | 0 | nsCOMPtr<nsIRunnable> r = aRunnable; |
120 | 0 | MOZ_RELEASE_ASSERT(r); |
121 | 0 | EnsureTaskGroup(aThread).mStateChangeTasks.AppendElement(r.forget()); |
122 | 0 | } |
123 | | |
124 | | nsresult AddTask(AbstractThread* aThread, |
125 | | already_AddRefed<nsIRunnable> aRunnable) override |
126 | 0 | { |
127 | 0 | nsCOMPtr<nsIRunnable> r = aRunnable; |
128 | 0 | MOZ_RELEASE_ASSERT(r); |
129 | 0 | // To preserve the event order, we need to append a new group if the last |
130 | 0 | // group is not targeted for |aThread|. |
131 | 0 | // See https://bugzilla.mozilla.org/show_bug.cgi?id=1318226&mark=0-3#c0 |
132 | 0 | // for the details of the issue. |
133 | 0 | if (mTaskGroups.Length() == 0 || mTaskGroups.LastElement()->mThread != aThread) { |
134 | 0 | mTaskGroups.AppendElement(new PerThreadTaskGroup(aThread)); |
135 | 0 | } |
136 | 0 |
|
137 | 0 | PerThreadTaskGroup& group = *mTaskGroups.LastElement(); |
138 | 0 | group.mRegularTasks.AppendElement(r.forget()); |
139 | 0 |
|
140 | 0 | return NS_OK; |
141 | 0 | } |
142 | | |
143 | | bool HasTasksFor(AbstractThread* aThread) override |
144 | 0 | { |
145 | 0 | return !!GetTaskGroup(aThread) || |
146 | 0 | (aThread == AbstractThread::GetCurrent() && HaveDirectTasks()); |
147 | 0 | } |
148 | | |
149 | | nsresult DispatchTasksFor(AbstractThread* aThread) override |
150 | 0 | { |
151 | 0 | nsresult rv = NS_OK; |
152 | 0 |
|
153 | 0 | // Dispatch all groups that match |aThread|. |
154 | 0 | for (size_t i = 0; i < mTaskGroups.Length(); ++i) { |
155 | 0 | if (mTaskGroups[i]->mThread == aThread) { |
156 | 0 | nsresult rv2 = DispatchTaskGroup(std::move(mTaskGroups[i])); |
157 | 0 |
|
158 | 0 | if (NS_WARN_IF(NS_FAILED(rv2)) && NS_SUCCEEDED(rv)) { |
159 | 0 | // We should try our best to call DispatchTaskGroup() as much as |
160 | 0 | // possible and return an error if any of DispatchTaskGroup() calls |
161 | 0 | // failed. |
162 | 0 | rv = rv2; |
163 | 0 | } |
164 | 0 |
|
165 | 0 | mTaskGroups.RemoveElementAt(i--); |
166 | 0 | } |
167 | 0 | } |
168 | 0 |
|
169 | 0 | return rv; |
170 | 0 | } |
171 | | |
172 | | private: |
173 | | |
174 | | struct PerThreadTaskGroup |
175 | | { |
176 | | public: |
177 | | explicit PerThreadTaskGroup(AbstractThread* aThread) |
178 | | : mThread(aThread) |
179 | 0 | { |
180 | 0 | MOZ_COUNT_CTOR(PerThreadTaskGroup); |
181 | 0 | } |
182 | | |
183 | 0 | ~PerThreadTaskGroup() { MOZ_COUNT_DTOR(PerThreadTaskGroup); } |
184 | | |
185 | | RefPtr<AbstractThread> mThread; |
186 | | nsTArray<nsCOMPtr<nsIRunnable>> mStateChangeTasks; |
187 | | nsTArray<nsCOMPtr<nsIRunnable>> mRegularTasks; |
188 | | }; |
189 | | |
190 | | class TaskGroupRunnable : public Runnable |
191 | | { |
192 | | public: |
193 | | explicit TaskGroupRunnable(UniquePtr<PerThreadTaskGroup>&& aTasks) |
194 | | : Runnable("AutoTaskDispatcher::TaskGroupRunnable") |
195 | | , mTasks(std::move(aTasks)) |
196 | 0 | { |
197 | 0 | } |
198 | | |
199 | | NS_IMETHOD Run() override |
200 | 0 | { |
201 | 0 | // State change tasks get run all together before any code is run, so |
202 | 0 | // that all state changes are made in an atomic unit. |
203 | 0 | for (size_t i = 0; i < mTasks->mStateChangeTasks.Length(); ++i) { |
204 | 0 | mTasks->mStateChangeTasks[i]->Run(); |
205 | 0 | } |
206 | 0 |
|
207 | 0 | // Once the state changes have completed, drain any direct tasks |
208 | 0 | // generated by those state changes (i.e. watcher notification tasks). |
209 | 0 | // This needs to be outside the loop because we don't want to run code |
210 | 0 | // that might observe intermediate states. |
211 | 0 | MaybeDrainDirectTasks(); |
212 | 0 |
|
213 | 0 | for (size_t i = 0; i < mTasks->mRegularTasks.Length(); ++i) { |
214 | 0 | mTasks->mRegularTasks[i]->Run(); |
215 | 0 |
|
216 | 0 | // Scope direct tasks tightly to the task that generated them. |
217 | 0 | MaybeDrainDirectTasks(); |
218 | 0 | } |
219 | 0 |
|
220 | 0 | return NS_OK; |
221 | 0 | } |
222 | | |
223 | | private: |
224 | | void MaybeDrainDirectTasks() |
225 | 0 | { |
226 | 0 | AbstractThread* currentThread = AbstractThread::GetCurrent(); |
227 | 0 | if (currentThread) { |
228 | 0 | currentThread->TailDispatcher().DrainDirectTasks(); |
229 | 0 | } |
230 | 0 | } |
231 | | |
232 | | UniquePtr<PerThreadTaskGroup> mTasks; |
233 | | }; |
234 | | |
235 | | PerThreadTaskGroup& EnsureTaskGroup(AbstractThread* aThread) |
236 | 0 | { |
237 | 0 | PerThreadTaskGroup* existing = GetTaskGroup(aThread); |
238 | 0 | if (existing) { |
239 | 0 | return *existing; |
240 | 0 | } |
241 | 0 | |
242 | 0 | mTaskGroups.AppendElement(new PerThreadTaskGroup(aThread)); |
243 | 0 | return *mTaskGroups.LastElement(); |
244 | 0 | } |
245 | | |
246 | | PerThreadTaskGroup* GetTaskGroup(AbstractThread* aThread) |
247 | 0 | { |
248 | 0 | for (size_t i = 0; i < mTaskGroups.Length(); ++i) { |
249 | 0 | if (mTaskGroups[i]->mThread == aThread) { |
250 | 0 | return mTaskGroups[i].get(); |
251 | 0 | } |
252 | 0 | } |
253 | 0 |
|
254 | 0 | // Not found. |
255 | 0 | return nullptr; |
256 | 0 | } |
257 | | |
258 | | nsresult DispatchTaskGroup(UniquePtr<PerThreadTaskGroup> aGroup) |
259 | 0 | { |
260 | 0 | RefPtr<AbstractThread> thread = aGroup->mThread; |
261 | 0 |
|
262 | 0 | AbstractThread::DispatchReason reason = mIsTailDispatcher ? AbstractThread::TailDispatch |
263 | 0 | : AbstractThread::NormalDispatch; |
264 | 0 | nsCOMPtr<nsIRunnable> r = new TaskGroupRunnable(std::move(aGroup)); |
265 | 0 | return thread->Dispatch(r.forget(), reason); |
266 | 0 | } |
267 | | |
268 | | // Direct tasks. We use a Maybe<> because (a) this class is hot, (b) |
269 | | // mDirectTasks often doesn't get anything put into it, and (c) the |
270 | | // std::queue implementation in GNU libstdc++ does two largish heap |
271 | | // allocations when creating a new std::queue. |
272 | | mozilla::Maybe<std::queue<nsCOMPtr<nsIRunnable>>> mDirectTasks; |
273 | | |
274 | | // Task groups, organized by thread. |
275 | | nsTArray<UniquePtr<PerThreadTaskGroup>> mTaskGroups; |
276 | | |
277 | | // True if this TaskDispatcher represents the tail dispatcher for the thread |
278 | | // upon which it runs. |
279 | | const bool mIsTailDispatcher; |
280 | | }; |
281 | | |
282 | | // Little utility class to allow declaring AutoTaskDispatcher as a default |
283 | | // parameter for methods that take a TaskDispatcher&. |
284 | | template<typename T> |
285 | | class PassByRef |
286 | | { |
287 | | public: |
288 | | PassByRef() {} |
289 | | operator T&() { return mVal; } |
290 | | private: |
291 | | T mVal; |
292 | | }; |
293 | | |
294 | | } // namespace mozilla |
295 | | |
296 | | #endif |