/work/obj-fuzz/dist/include/mozilla/net/ChannelEventQueue.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- |
2 | | * vim: set sw=2 ts=8 et tw=80 : |
3 | | */ |
4 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
5 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
6 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
7 | | |
8 | | #ifndef mozilla_net_ChannelEventQueue_h |
9 | | #define mozilla_net_ChannelEventQueue_h |
10 | | |
11 | | #include "nsTArray.h" |
12 | | #include "nsAutoPtr.h" |
13 | | #include "nsIEventTarget.h" |
14 | | #include "nsThreadUtils.h" |
15 | | #include "nsXULAppAPI.h" |
16 | | #include "mozilla/DebugOnly.h" |
17 | | #include "mozilla/Mutex.h" |
18 | | #include "mozilla/RecursiveMutex.h" |
19 | | #include "mozilla/UniquePtr.h" |
20 | | #include "mozilla/Unused.h" |
21 | | |
22 | | class nsISupports; |
23 | | |
24 | | namespace mozilla { |
25 | | namespace net { |
26 | | |
27 | | class ChannelEvent |
28 | | { |
29 | | public: |
30 | 0 | ChannelEvent() { MOZ_COUNT_CTOR(ChannelEvent); } |
31 | 0 | virtual ~ChannelEvent() { MOZ_COUNT_DTOR(ChannelEvent); } |
32 | | virtual void Run() = 0; |
33 | | virtual already_AddRefed<nsIEventTarget> GetEventTarget() = 0; |
34 | | }; |
35 | | |
36 | | // Note that MainThreadChannelEvent should not be used in child process since |
37 | | // GetEventTarget() directly returns an unlabeled event target. |
38 | | class MainThreadChannelEvent : public ChannelEvent |
39 | | { |
40 | | public: |
41 | 0 | MainThreadChannelEvent() { MOZ_COUNT_CTOR(MainThreadChannelEvent); } |
42 | 0 | virtual ~MainThreadChannelEvent() { MOZ_COUNT_DTOR(MainThreadChannelEvent); } |
43 | | |
44 | | already_AddRefed<nsIEventTarget> |
45 | | GetEventTarget() override |
46 | 0 | { |
47 | 0 | MOZ_ASSERT(XRE_IsParentProcess()); |
48 | 0 |
|
49 | 0 | return do_AddRef(GetMainThreadEventTarget()); |
50 | 0 | } |
51 | | }; |
52 | | |
53 | | // This event is designed to be only used for e10s child channels. |
54 | | // The goal is to force the child channel to implement GetNeckoTarget() |
55 | | // which should return a labeled main thread event target so that this |
56 | | // channel event can be dispatched correctly. |
57 | | template<typename T> |
58 | | class NeckoTargetChannelEvent : public ChannelEvent |
59 | | { |
60 | | public: |
61 | | explicit NeckoTargetChannelEvent(T *aChild) |
62 | | : mChild(aChild) |
63 | 0 | { |
64 | 0 | MOZ_COUNT_CTOR(NeckoTargetChannelEvent); |
65 | 0 | } Unexecuted instantiation: mozilla::net::NeckoTargetChannelEvent<mozilla::net::FTPChannelChild>::NeckoTargetChannelEvent(mozilla::net::FTPChannelChild*) Unexecuted instantiation: mozilla::net::NeckoTargetChannelEvent<mozilla::net::HttpChannelChild>::NeckoTargetChannelEvent(mozilla::net::HttpChannelChild*) Unexecuted instantiation: mozilla::net::NeckoTargetChannelEvent<mozilla::net::WyciwygChannelChild>::NeckoTargetChannelEvent(mozilla::net::WyciwygChannelChild*) |
66 | | virtual ~NeckoTargetChannelEvent() |
67 | 0 | { |
68 | 0 | MOZ_COUNT_DTOR(NeckoTargetChannelEvent); |
69 | 0 | } Unexecuted instantiation: mozilla::net::NeckoTargetChannelEvent<mozilla::net::FTPChannelChild>::~NeckoTargetChannelEvent() Unexecuted instantiation: mozilla::net::NeckoTargetChannelEvent<mozilla::net::HttpChannelChild>::~NeckoTargetChannelEvent() Unexecuted instantiation: mozilla::net::NeckoTargetChannelEvent<mozilla::net::WyciwygChannelChild>::~NeckoTargetChannelEvent() |
70 | | |
71 | | already_AddRefed<nsIEventTarget> |
72 | | GetEventTarget() override |
73 | 0 | { |
74 | 0 | MOZ_ASSERT(mChild); |
75 | 0 |
|
76 | 0 | return mChild->GetNeckoTarget(); |
77 | 0 | } Unexecuted instantiation: mozilla::net::NeckoTargetChannelEvent<mozilla::net::FTPChannelChild>::GetEventTarget() Unexecuted instantiation: mozilla::net::NeckoTargetChannelEvent<mozilla::net::HttpChannelChild>::GetEventTarget() Unexecuted instantiation: mozilla::net::NeckoTargetChannelEvent<mozilla::net::WyciwygChannelChild>::GetEventTarget() |
78 | | |
79 | | protected: |
80 | | T *mChild; |
81 | | }; |
82 | | |
83 | | // Workaround for Necko re-entrancy dangers. We buffer IPDL messages in a |
84 | | // queue if still dispatching previous one(s) to listeners/observers. |
85 | | // Otherwise synchronous XMLHttpRequests and/or other code that spins the |
86 | | // event loop (ex: IPDL rpc) could cause listener->OnDataAvailable (for |
87 | | // instance) to be dispatched and called before mListener->OnStartRequest has |
88 | | // completed. |
89 | | |
90 | | class ChannelEventQueue final |
91 | | { |
92 | | NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ChannelEventQueue) |
93 | | |
94 | | public: |
95 | | explicit ChannelEventQueue(nsISupports *owner) |
96 | | : mSuspendCount(0) |
97 | | , mSuspended(false) |
98 | | , mForcedCount(0) |
99 | | , mFlushing(false) |
100 | | , mOwner(owner) |
101 | | , mMutex("ChannelEventQueue::mMutex") |
102 | | , mRunningMutex("ChannelEventQueue::mRunningMutex") |
103 | 0 | {} |
104 | | |
105 | | // Puts IPDL-generated channel event into queue, to be run later |
106 | | // automatically when EndForcedQueueing and/or Resume is called. |
107 | | // |
108 | | // @param aCallback - the ChannelEvent |
109 | | // @param aAssertionWhenNotQueued - this optional param will be used in an |
110 | | // assertion when the event is executed directly. |
111 | | inline void RunOrEnqueue(ChannelEvent* aCallback, |
112 | | bool aAssertionWhenNotQueued = false); |
113 | | |
114 | | // Append ChannelEvent in front of the event queue. |
115 | | inline nsresult PrependEvent(UniquePtr<ChannelEvent>& aEvent); |
116 | | inline nsresult PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents); |
117 | | |
118 | | // After StartForcedQueueing is called, RunOrEnqueue() will start enqueuing |
119 | | // events that will be run/flushed when EndForcedQueueing is called. |
120 | | // - Note: queueing may still be required after EndForcedQueueing() (if the |
121 | | // queue is suspended, etc): always call RunOrEnqueue() to avoid race |
122 | | // conditions. |
123 | | inline void StartForcedQueueing(); |
124 | | inline void EndForcedQueueing(); |
125 | | |
126 | | // Suspend/resume event queue. RunOrEnqueue() will start enqueuing |
127 | | // events and they will be run/flushed when resume is called. These should be |
128 | | // called when the channel owning the event queue is suspended/resumed. |
129 | | void Suspend(); |
130 | | // Resume flushes the queue asynchronously, i.e. items in queue will be |
131 | | // dispatched in a new event on the current thread. |
132 | | void Resume(); |
133 | | |
134 | | private: |
135 | | // Private destructor, to discourage deletion outside of Release(): |
136 | | ~ChannelEventQueue() |
137 | 0 | { |
138 | 0 | } |
139 | | |
140 | | void SuspendInternal(); |
141 | | void ResumeInternal(); |
142 | | |
143 | | inline void MaybeFlushQueue(); |
144 | | void FlushQueue(); |
145 | | inline void CompleteResume(); |
146 | | |
147 | | ChannelEvent* TakeEvent(); |
148 | | |
149 | | nsTArray<UniquePtr<ChannelEvent>> mEventQueue; |
150 | | |
151 | | uint32_t mSuspendCount; |
152 | | bool mSuspended; |
153 | | uint32_t mForcedCount; // Support ForcedQueueing on multiple thread. |
154 | | bool mFlushing; |
155 | | |
156 | | // Keep ptr to avoid refcount cycle: only grab ref during flushing. |
157 | | nsISupports *mOwner; |
158 | | |
159 | | // For atomic mEventQueue operation and state update |
160 | | Mutex mMutex; |
161 | | |
162 | | // To guarantee event execution order among threads |
163 | | RecursiveMutex mRunningMutex; |
164 | | |
165 | | friend class AutoEventEnqueuer; |
166 | | }; |
167 | | |
168 | | inline void |
169 | | ChannelEventQueue::RunOrEnqueue(ChannelEvent* aCallback, |
170 | | bool aAssertionWhenNotQueued) |
171 | 0 | { |
172 | 0 | MOZ_ASSERT(aCallback); |
173 | 0 |
|
174 | 0 | // Events execution could be a destruction of the channel (and our own |
175 | 0 | // destructor) unless we make sure its refcount doesn't drop to 0 while this |
176 | 0 | // method is running. |
177 | 0 | nsCOMPtr<nsISupports> kungFuDeathGrip(mOwner); |
178 | 0 | Unused << kungFuDeathGrip; // Not used in this function |
179 | 0 |
|
180 | 0 | // To avoid leaks. |
181 | 0 | UniquePtr<ChannelEvent> event(aCallback); |
182 | 0 |
|
183 | 0 | // To guarantee that the running event and all the events generated within |
184 | 0 | // it will be finished before events on other threads. |
185 | 0 | RecursiveMutexAutoLock lock(mRunningMutex); |
186 | 0 |
|
187 | 0 | { |
188 | 0 | MutexAutoLock lock(mMutex); |
189 | 0 |
|
190 | 0 | bool enqueue = !!mForcedCount || mSuspended || mFlushing || !mEventQueue.IsEmpty(); |
191 | 0 |
|
192 | 0 | if (enqueue) { |
193 | 0 | mEventQueue.AppendElement(std::move(event)); |
194 | 0 | return; |
195 | 0 | } |
196 | 0 | |
197 | 0 | nsCOMPtr<nsIEventTarget> target = event->GetEventTarget(); |
198 | 0 | MOZ_ASSERT(target); |
199 | 0 |
|
200 | 0 | bool isCurrentThread = false; |
201 | 0 | DebugOnly<nsresult> rv = target->IsOnCurrentThread(&isCurrentThread); |
202 | 0 | MOZ_ASSERT(NS_SUCCEEDED(rv)); |
203 | 0 |
|
204 | 0 | if (!isCurrentThread) { |
205 | 0 | // Leverage Suspend/Resume mechanism to trigger flush procedure without |
206 | 0 | // creating a new one. |
207 | 0 | SuspendInternal(); |
208 | 0 | mEventQueue.AppendElement(std::move(event)); |
209 | 0 | ResumeInternal(); |
210 | 0 | return; |
211 | 0 | } |
212 | 0 | } |
213 | 0 | |
214 | 0 | MOZ_RELEASE_ASSERT(!aAssertionWhenNotQueued); |
215 | 0 | event->Run(); |
216 | 0 | } |
217 | | |
218 | | inline void |
219 | | ChannelEventQueue::StartForcedQueueing() |
220 | 0 | { |
221 | 0 | MutexAutoLock lock(mMutex); |
222 | 0 | ++mForcedCount; |
223 | 0 | } |
224 | | |
225 | | inline void |
226 | | ChannelEventQueue::EndForcedQueueing() |
227 | 0 | { |
228 | 0 | bool tryFlush = false; |
229 | 0 | { |
230 | 0 | MutexAutoLock lock(mMutex); |
231 | 0 | MOZ_ASSERT(mForcedCount > 0); |
232 | 0 | if(!--mForcedCount) { |
233 | 0 | tryFlush = true; |
234 | 0 | } |
235 | 0 | } |
236 | 0 |
|
237 | 0 | if (tryFlush) { |
238 | 0 | MaybeFlushQueue(); |
239 | 0 | } |
240 | 0 | } |
241 | | |
242 | | inline nsresult |
243 | | ChannelEventQueue::PrependEvent(UniquePtr<ChannelEvent>& aEvent) |
244 | 0 | { |
245 | 0 | MutexAutoLock lock(mMutex); |
246 | 0 |
|
247 | 0 | // Prepending event while no queue flush foreseen might cause the following |
248 | 0 | // channel events not run. This assertion here guarantee there must be a |
249 | 0 | // queue flush, either triggered by Resume or EndForcedQueueing, to execute |
250 | 0 | // the added event. |
251 | 0 | MOZ_ASSERT(mSuspended || !!mForcedCount); |
252 | 0 |
|
253 | 0 | UniquePtr<ChannelEvent>* newEvent = |
254 | 0 | mEventQueue.InsertElementAt(0, std::move(aEvent)); |
255 | 0 |
|
256 | 0 | if (!newEvent) { |
257 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
258 | 0 | } |
259 | 0 | |
260 | 0 | return NS_OK; |
261 | 0 | } |
262 | | |
263 | | inline nsresult |
264 | | ChannelEventQueue::PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents) |
265 | 0 | { |
266 | 0 | MutexAutoLock lock(mMutex); |
267 | 0 |
|
268 | 0 | // Prepending event while no queue flush foreseen might cause the following |
269 | 0 | // channel events not run. This assertion here guarantee there must be a |
270 | 0 | // queue flush, either triggered by Resume or EndForcedQueueing, to execute |
271 | 0 | // the added events. |
272 | 0 | MOZ_ASSERT(mSuspended || !!mForcedCount); |
273 | 0 |
|
274 | 0 | UniquePtr<ChannelEvent>* newEvents = |
275 | 0 | mEventQueue.InsertElementsAt(0, aEvents.Length()); |
276 | 0 | if (!newEvents) { |
277 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
278 | 0 | } |
279 | 0 | |
280 | 0 | for (uint32_t i = 0; i < aEvents.Length(); i++) { |
281 | 0 | newEvents[i] = std::move(aEvents[i]); |
282 | 0 | } |
283 | 0 |
|
284 | 0 | return NS_OK; |
285 | 0 | } |
286 | | |
287 | | inline void |
288 | | ChannelEventQueue::CompleteResume() |
289 | 0 | { |
290 | 0 | bool tryFlush = false; |
291 | 0 | { |
292 | 0 | MutexAutoLock lock(mMutex); |
293 | 0 |
|
294 | 0 | // channel may have been suspended again since Resume fired event to call |
295 | 0 | // this. |
296 | 0 | if (!mSuspendCount) { |
297 | 0 | // we need to remain logically suspended (for purposes of queuing incoming |
298 | 0 | // messages) until this point, else new incoming messages could run before |
299 | 0 | // queued ones. |
300 | 0 | mSuspended = false; |
301 | 0 | tryFlush = true; |
302 | 0 | } |
303 | 0 | } |
304 | 0 |
|
305 | 0 | if (tryFlush) { |
306 | 0 | MaybeFlushQueue(); |
307 | 0 | } |
308 | 0 | } |
309 | | |
310 | | inline void |
311 | | ChannelEventQueue::MaybeFlushQueue() |
312 | 0 | { |
313 | 0 | // Don't flush if forced queuing on, we're already being flushed, or |
314 | 0 | // suspended, or there's nothing to flush |
315 | 0 | bool flushQueue = false; |
316 | 0 |
|
317 | 0 | { |
318 | 0 | MutexAutoLock lock(mMutex); |
319 | 0 | flushQueue = !mForcedCount && !mFlushing && !mSuspended && |
320 | 0 | !mEventQueue.IsEmpty(); |
321 | 0 |
|
322 | 0 | // Only one thread is allowed to run FlushQueue at a time. |
323 | 0 | if (flushQueue) { |
324 | 0 | mFlushing = true; |
325 | 0 | } |
326 | 0 | } |
327 | 0 |
|
328 | 0 | if (flushQueue) { |
329 | 0 | FlushQueue(); |
330 | 0 | } |
331 | 0 | } |
332 | | |
333 | | // Ensures that RunOrEnqueue() will be collecting events during its lifetime |
334 | | // (letting caller know incoming IPDL msgs should be queued). Flushes the queue |
335 | | // when it goes out of scope. |
336 | | class MOZ_STACK_CLASS AutoEventEnqueuer |
337 | | { |
338 | | public: |
339 | | explicit AutoEventEnqueuer(ChannelEventQueue *queue) |
340 | | : mEventQueue(queue) |
341 | | , mOwner(queue->mOwner) |
342 | 0 | { |
343 | 0 | mEventQueue->StartForcedQueueing(); |
344 | 0 | } |
345 | 0 | ~AutoEventEnqueuer() { |
346 | 0 | mEventQueue->EndForcedQueueing(); |
347 | 0 | } |
348 | | private: |
349 | | RefPtr<ChannelEventQueue> mEventQueue; |
350 | | // Ensure channel object lives longer than ChannelEventQueue. |
351 | | nsCOMPtr<nsISupports> mOwner; |
352 | | }; |
353 | | |
354 | | } // namespace net |
355 | | } // namespace mozilla |
356 | | |
357 | | #endif |