/src/mozilla-central/netwerk/cache2/CacheIOThread.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
2 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
3 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
4 | | |
5 | | #include "CacheIOThread.h" |
6 | | #include "CacheFileIOManager.h" |
7 | | |
8 | | #include "nsIRunnable.h" |
9 | | #include "nsISupportsImpl.h" |
10 | | #include "nsPrintfCString.h" |
11 | | #include "nsThreadUtils.h" |
12 | | #include "mozilla/IOInterposer.h" |
13 | | #include "GeckoProfiler.h" |
14 | | |
15 | | #ifdef XP_WIN |
16 | | #include <windows.h> |
17 | | #endif |
18 | | |
19 | | #ifdef MOZ_TASK_TRACER |
20 | | #include "GeckoTaskTracer.h" |
21 | | #include "TracedTaskCommon.h" |
22 | | #endif |
23 | | |
24 | | namespace mozilla { |
25 | | namespace net { |
26 | | |
27 | | namespace { // anon |
28 | | |
29 | | class CacheIOTelemetry |
30 | | { |
31 | | public: |
32 | | typedef CacheIOThread::EventQueue::size_type size_type; |
33 | | static size_type mMinLengthToReport[CacheIOThread::LAST_LEVEL]; |
34 | | static void Report(uint32_t aLevel, size_type aLength); |
35 | | }; |
36 | | |
37 | | static CacheIOTelemetry::size_type const kGranularity = 30; |
38 | | |
39 | | CacheIOTelemetry::size_type |
40 | | CacheIOTelemetry::mMinLengthToReport[CacheIOThread::LAST_LEVEL] = { |
41 | | kGranularity, kGranularity, kGranularity, kGranularity, |
42 | | kGranularity, kGranularity, kGranularity, kGranularity |
43 | | }; |
44 | | |
45 | | // static |
46 | | void CacheIOTelemetry::Report(uint32_t aLevel, CacheIOTelemetry::size_type aLength) |
47 | 0 | { |
48 | 0 | if (mMinLengthToReport[aLevel] > aLength) { |
49 | 0 | return; |
50 | 0 | } |
51 | 0 | |
52 | 0 | static Telemetry::HistogramID telemetryID[] = { |
53 | 0 | Telemetry::HTTP_CACHE_IO_QUEUE_2_OPEN_PRIORITY, |
54 | 0 | Telemetry::HTTP_CACHE_IO_QUEUE_2_READ_PRIORITY, |
55 | 0 | Telemetry::HTTP_CACHE_IO_QUEUE_2_MANAGEMENT, |
56 | 0 | Telemetry::HTTP_CACHE_IO_QUEUE_2_OPEN, |
57 | 0 | Telemetry::HTTP_CACHE_IO_QUEUE_2_READ, |
58 | 0 | Telemetry::HTTP_CACHE_IO_QUEUE_2_WRITE_PRIORITY, |
59 | 0 | Telemetry::HTTP_CACHE_IO_QUEUE_2_WRITE, |
60 | 0 | Telemetry::HTTP_CACHE_IO_QUEUE_2_INDEX, |
61 | 0 | Telemetry::HTTP_CACHE_IO_QUEUE_2_EVICT |
62 | 0 | }; |
63 | 0 |
|
64 | 0 | // Each bucket is a multiply of kGranularity (30, 60, 90..., 300+) |
65 | 0 | aLength = (aLength / kGranularity); |
66 | 0 | // Next time report only when over the current length + kGranularity |
67 | 0 | mMinLengthToReport[aLevel] = (aLength + 1) * kGranularity; |
68 | 0 |
|
69 | 0 | // 10 is number of buckets we have in each probe |
70 | 0 | aLength = std::min<size_type>(aLength, 10); |
71 | 0 |
|
72 | 0 | Telemetry::Accumulate(telemetryID[aLevel], aLength - 1); // counted from 0 |
73 | 0 | } |
74 | | |
75 | | } // anon |
76 | | |
77 | | namespace detail { |
78 | | |
79 | | /** |
80 | | * Helper class encapsulating platform-specific code to cancel |
81 | | * any pending IO operation taking too long. Solely used during |
82 | | * shutdown to prevent any IO shutdown hangs. |
83 | | * Mainly designed for using Win32 CancelSynchronousIo function. |
84 | | */ |
85 | | class BlockingIOWatcher |
86 | | { |
87 | | #ifdef XP_WIN |
88 | | typedef BOOL(WINAPI* TCancelSynchronousIo)(HANDLE hThread); |
89 | | TCancelSynchronousIo mCancelSynchronousIo; |
90 | | // The native handle to the thread |
91 | | HANDLE mThread; |
92 | | // Event signaling back to the main thread, see NotifyOperationDone. |
93 | | HANDLE mEvent; |
94 | | #endif |
95 | | |
96 | | public: |
97 | | // Created and destroyed on the main thread only |
98 | | BlockingIOWatcher(); |
99 | | ~BlockingIOWatcher(); |
100 | | |
101 | | // Called on the IO thread to grab the platform specific |
102 | | // reference to it. |
103 | | void InitThread(); |
104 | | // If there is a blocking operation being handled on the IO |
105 | | // thread, this is called on the main thread during shutdown. |
106 | | // Waits for notification from the IO thread for up to two seconds. |
107 | | // If that times out, it attempts to cancel the IO operation. |
108 | | void WatchAndCancel(Monitor& aMonitor); |
109 | | // Called by the IO thread after each operation has been |
110 | | // finished (after each Run() call). This wakes the main |
111 | | // thread up and makes WatchAndCancel() early exit and become |
112 | | // a no-op. |
113 | | void NotifyOperationDone(); |
114 | | }; |
115 | | |
116 | | #ifdef XP_WIN |
117 | | |
118 | | BlockingIOWatcher::BlockingIOWatcher() |
119 | | : mCancelSynchronousIo(NULL) |
120 | | , mThread(NULL) |
121 | | , mEvent(NULL) |
122 | | { |
123 | | HMODULE kernel32_dll = GetModuleHandle("kernel32.dll"); |
124 | | if (!kernel32_dll) { |
125 | | return; |
126 | | } |
127 | | |
128 | | FARPROC ptr = GetProcAddress(kernel32_dll, "CancelSynchronousIo"); |
129 | | if (!ptr) { |
130 | | return; |
131 | | } |
132 | | |
133 | | mCancelSynchronousIo = reinterpret_cast<TCancelSynchronousIo>(ptr); |
134 | | |
135 | | mEvent = ::CreateEvent(NULL, TRUE, FALSE, NULL); |
136 | | } |
137 | | |
138 | | BlockingIOWatcher::~BlockingIOWatcher() |
139 | | { |
140 | | if (mEvent) { |
141 | | CloseHandle(mEvent); |
142 | | } |
143 | | if (mThread) { |
144 | | CloseHandle(mThread); |
145 | | } |
146 | | } |
147 | | |
148 | | void BlockingIOWatcher::InitThread() |
149 | | { |
150 | | // GetCurrentThread() only returns a pseudo handle, hence DuplicateHandle |
151 | | ::DuplicateHandle( |
152 | | GetCurrentProcess(), |
153 | | GetCurrentThread(), |
154 | | GetCurrentProcess(), |
155 | | &mThread, |
156 | | 0, |
157 | | FALSE, |
158 | | DUPLICATE_SAME_ACCESS); |
159 | | } |
160 | | |
161 | | void BlockingIOWatcher::WatchAndCancel(Monitor& aMonitor) |
162 | | { |
163 | | if (!mEvent) { |
164 | | return; |
165 | | } |
166 | | |
167 | | // Reset before we enter the monitor to raise the chance we catch |
168 | | // the currently pending IO op completion. |
169 | | ::ResetEvent(mEvent); |
170 | | |
171 | | HANDLE thread; |
172 | | { |
173 | | MonitorAutoLock lock(aMonitor); |
174 | | thread = mThread; |
175 | | |
176 | | if (!thread) { |
177 | | return; |
178 | | } |
179 | | } |
180 | | |
181 | | LOG(("Blocking IO operation pending on IO thread, waiting...")); |
182 | | |
183 | | // It seems wise to use the I/O lag time as a maximum time to wait |
184 | | // for an operation to finish. When that times out and cancelation |
185 | | // succeeds, there will be no other IO operation permitted. By default |
186 | | // this is two seconds. |
187 | | uint32_t maxLag = std::min<uint32_t>(5, CacheObserver::MaxShutdownIOLag()) * 1000; |
188 | | |
189 | | DWORD result = ::WaitForSingleObject(mEvent, maxLag); |
190 | | if (result == WAIT_TIMEOUT) { |
191 | | LOG(("CacheIOThread: Attempting to cancel a long blocking IO operation")); |
192 | | BOOL result = mCancelSynchronousIo(thread); |
193 | | if (result) { |
194 | | LOG((" cancelation signal succeeded")); |
195 | | } else { |
196 | | DWORD error = GetLastError(); |
197 | | LOG((" cancelation signal failed with GetLastError=%u", error)); |
198 | | } |
199 | | } |
200 | | } |
201 | | |
202 | | void BlockingIOWatcher::NotifyOperationDone() |
203 | | { |
204 | | if (mEvent) { |
205 | | ::SetEvent(mEvent); |
206 | | } |
207 | | } |
208 | | |
209 | | #else // WIN |
210 | | |
211 | | // Stub code only (we don't implement IO cancelation for this platform) |
212 | | |
213 | 0 | BlockingIOWatcher::BlockingIOWatcher() = default; |
214 | 0 | BlockingIOWatcher::~BlockingIOWatcher() = default; |
215 | 0 | void BlockingIOWatcher::InitThread() { } |
216 | 0 | void BlockingIOWatcher::WatchAndCancel(Monitor&) { } |
217 | 0 | void BlockingIOWatcher::NotifyOperationDone() { } |
218 | | |
219 | | #endif |
220 | | |
221 | | } // detail |
222 | | |
223 | | CacheIOThread* CacheIOThread::sSelf = nullptr; |
224 | | |
225 | | NS_IMPL_ISUPPORTS(CacheIOThread, nsIThreadObserver) |
226 | | |
227 | | CacheIOThread::CacheIOThread() |
228 | | : mMonitor("CacheIOThread") |
229 | | , mThread(nullptr) |
230 | | , mXPCOMThread(nullptr) |
231 | | , mLowestLevelWaiting(LAST_LEVEL) |
232 | | , mCurrentlyExecutingLevel(0) |
233 | | , mHasXPCOMEvents(false) |
234 | | , mRerunCurrentEvent(false) |
235 | | , mShutdown(false) |
236 | | , mIOCancelableEvents(0) |
237 | | , mEventCounter(0) |
238 | | #ifdef DEBUG |
239 | | , mInsideLoop(true) |
240 | | #endif |
241 | 0 | { |
242 | 0 | for (auto& item : mQueueLength) { |
243 | 0 | item = 0; |
244 | 0 | } |
245 | 0 |
|
246 | 0 | sSelf = this; |
247 | 0 | } |
248 | | |
249 | | CacheIOThread::~CacheIOThread() |
250 | 0 | { |
251 | 0 | if (mXPCOMThread) { |
252 | 0 | nsIThread *thread = mXPCOMThread; |
253 | 0 | thread->Release(); |
254 | 0 | } |
255 | 0 |
|
256 | 0 | sSelf = nullptr; |
257 | | #ifdef DEBUG |
258 | | for (auto& event : mEventQueue) { |
259 | | MOZ_ASSERT(!event.Length()); |
260 | | } |
261 | | #endif |
262 | | } |
263 | | |
264 | | nsresult CacheIOThread::Init() |
265 | 0 | { |
266 | 0 | { |
267 | 0 | MonitorAutoLock lock(mMonitor); |
268 | 0 | // Yeah, there is not a thread yet, but we want to make sure |
269 | 0 | // the sequencing is correct. |
270 | 0 | mBlockingIOWatcher = MakeUnique<detail::BlockingIOWatcher>(); |
271 | 0 | } |
272 | 0 |
|
273 | 0 | mThread = PR_CreateThread(PR_USER_THREAD, ThreadFunc, this, |
274 | 0 | PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, |
275 | 0 | PR_JOINABLE_THREAD, 128 * 1024); |
276 | 0 | if (!mThread) { |
277 | 0 | return NS_ERROR_FAILURE; |
278 | 0 | } |
279 | 0 | |
280 | 0 | return NS_OK; |
281 | 0 | } |
282 | | |
283 | | nsresult CacheIOThread::Dispatch(nsIRunnable* aRunnable, uint32_t aLevel) |
284 | 0 | { |
285 | 0 | return Dispatch(do_AddRef(aRunnable), aLevel); |
286 | 0 | } |
287 | | |
288 | | nsresult CacheIOThread::Dispatch(already_AddRefed<nsIRunnable> aRunnable, |
289 | | uint32_t aLevel) |
290 | 0 | { |
291 | 0 | NS_ENSURE_ARG(aLevel < LAST_LEVEL); |
292 | 0 |
|
293 | 0 | nsCOMPtr<nsIRunnable> runnable(aRunnable); |
294 | 0 |
|
295 | 0 | // Runnable is always expected to be non-null, hard null-check bellow. |
296 | 0 | MOZ_ASSERT(runnable); |
297 | 0 |
|
298 | 0 | MonitorAutoLock lock(mMonitor); |
299 | 0 |
|
300 | 0 | if (mShutdown && (PR_GetCurrentThread() != mThread)) |
301 | 0 | return NS_ERROR_UNEXPECTED; |
302 | 0 | |
303 | 0 | return DispatchInternal(runnable.forget(), aLevel); |
304 | 0 | } |
305 | | |
306 | | nsresult CacheIOThread::DispatchAfterPendingOpens(nsIRunnable* aRunnable) |
307 | 0 | { |
308 | 0 | // Runnable is always expected to be non-null, hard null-check bellow. |
309 | 0 | MOZ_ASSERT(aRunnable); |
310 | 0 |
|
311 | 0 | MonitorAutoLock lock(mMonitor); |
312 | 0 |
|
313 | 0 | if (mShutdown && (PR_GetCurrentThread() != mThread)) |
314 | 0 | return NS_ERROR_UNEXPECTED; |
315 | 0 | |
316 | 0 | // Move everything from later executed OPEN level to the OPEN_PRIORITY level |
317 | 0 | // where we post the (eviction) runnable. |
318 | 0 | mQueueLength[OPEN_PRIORITY] += mEventQueue[OPEN].Length(); |
319 | 0 | mQueueLength[OPEN] -= mEventQueue[OPEN].Length(); |
320 | 0 | mEventQueue[OPEN_PRIORITY].AppendElements(mEventQueue[OPEN]); |
321 | 0 | mEventQueue[OPEN].Clear(); |
322 | 0 |
|
323 | 0 | return DispatchInternal(do_AddRef(aRunnable), OPEN_PRIORITY); |
324 | 0 | } |
325 | | |
326 | | nsresult CacheIOThread::DispatchInternal(already_AddRefed<nsIRunnable> aRunnable, |
327 | | uint32_t aLevel) |
328 | 0 | { |
329 | 0 | nsCOMPtr<nsIRunnable> runnable(aRunnable); |
330 | | #ifdef MOZ_TASK_TRACER |
331 | | if (tasktracer::IsStartLogging()) { |
332 | | runnable = tasktracer::CreateTracedRunnable(runnable.forget()); |
333 | | (static_cast<tasktracer::TracedRunnable*>(runnable.get()))->DispatchTask(); |
334 | | } |
335 | | #endif |
336 | |
|
337 | 0 | if (NS_WARN_IF(!runnable)) |
338 | 0 | return NS_ERROR_NULL_POINTER; |
339 | 0 | |
340 | 0 | mMonitor.AssertCurrentThreadOwns(); |
341 | 0 |
|
342 | 0 | ++mQueueLength[aLevel]; |
343 | 0 | mEventQueue[aLevel].AppendElement(runnable.forget()); |
344 | 0 | if (mLowestLevelWaiting > aLevel) |
345 | 0 | mLowestLevelWaiting = aLevel; |
346 | 0 |
|
347 | 0 | mMonitor.NotifyAll(); |
348 | 0 |
|
349 | 0 | return NS_OK; |
350 | 0 | } |
351 | | |
352 | | bool CacheIOThread::IsCurrentThread() |
353 | 0 | { |
354 | 0 | return mThread == PR_GetCurrentThread(); |
355 | 0 | } |
356 | | |
357 | | uint32_t CacheIOThread::QueueSize(bool highPriority) |
358 | 0 | { |
359 | 0 | MonitorAutoLock lock(mMonitor); |
360 | 0 | if (highPriority) { |
361 | 0 | return mQueueLength[OPEN_PRIORITY] + mQueueLength[READ_PRIORITY]; |
362 | 0 | } |
363 | 0 | |
364 | 0 | return mQueueLength[OPEN_PRIORITY] + mQueueLength[READ_PRIORITY] + |
365 | 0 | mQueueLength[MANAGEMENT] + mQueueLength[OPEN] + mQueueLength[READ]; |
366 | 0 | } |
367 | | |
368 | | bool CacheIOThread::YieldInternal() |
369 | 0 | { |
370 | 0 | if (!IsCurrentThread()) { |
371 | 0 | NS_WARNING("Trying to yield to priority events on non-cache2 I/O thread? " |
372 | 0 | "You probably do something wrong."); |
373 | 0 | return false; |
374 | 0 | } |
375 | 0 |
|
376 | 0 | if (mCurrentlyExecutingLevel == XPCOM_LEVEL) { |
377 | 0 | // Doesn't make any sense, since this handler is the one |
378 | 0 | // that would be executed as the next one. |
379 | 0 | return false; |
380 | 0 | } |
381 | 0 | |
382 | 0 | if (!EventsPending(mCurrentlyExecutingLevel)) |
383 | 0 | return false; |
384 | 0 | |
385 | 0 | mRerunCurrentEvent = true; |
386 | 0 | return true; |
387 | 0 | } |
388 | | |
389 | | void CacheIOThread::Shutdown() |
390 | 0 | { |
391 | 0 | if (!mThread) { |
392 | 0 | return; |
393 | 0 | } |
394 | 0 | |
395 | 0 | { |
396 | 0 | MonitorAutoLock lock(mMonitor); |
397 | 0 | mShutdown = true; |
398 | 0 | mMonitor.NotifyAll(); |
399 | 0 | } |
400 | 0 |
|
401 | 0 | PR_JoinThread(mThread); |
402 | 0 | mThread = nullptr; |
403 | 0 | } |
404 | | |
405 | | void CacheIOThread::CancelBlockingIO() |
406 | 0 | { |
407 | 0 | // This is an attempt to cancel any blocking I/O operation taking |
408 | 0 | // too long time. |
409 | 0 | if (!mBlockingIOWatcher) { |
410 | 0 | return; |
411 | 0 | } |
412 | 0 | |
413 | 0 | if (!mIOCancelableEvents) { |
414 | 0 | LOG(("CacheIOThread::CancelBlockingIO, no blocking operation to cancel")); |
415 | 0 | return; |
416 | 0 | } |
417 | 0 |
|
418 | 0 | // OK, when we are here, we are processing an IO on the thread that |
419 | 0 | // can be cancelled. |
420 | 0 | mBlockingIOWatcher->WatchAndCancel(mMonitor); |
421 | 0 | } |
422 | | |
423 | | already_AddRefed<nsIEventTarget> CacheIOThread::Target() |
424 | 0 | { |
425 | 0 | nsCOMPtr<nsIEventTarget> target; |
426 | 0 |
|
427 | 0 | target = mXPCOMThread; |
428 | 0 | if (!target && mThread) |
429 | 0 | { |
430 | 0 | MonitorAutoLock lock(mMonitor); |
431 | 0 | while (!mXPCOMThread) { |
432 | 0 | lock.Wait(); |
433 | 0 | } |
434 | 0 |
|
435 | 0 | target = mXPCOMThread; |
436 | 0 | } |
437 | 0 |
|
438 | 0 | return target.forget(); |
439 | 0 | } |
440 | | |
441 | | // static |
442 | | void CacheIOThread::ThreadFunc(void* aClosure) |
443 | 0 | { |
444 | 0 | // XXXmstange We'd like to register this thread with the profiler, but doing |
445 | 0 | // so causes leaks, see bug 1323100. |
446 | 0 | NS_SetCurrentThreadName("Cache2 I/O"); |
447 | 0 |
|
448 | 0 | mozilla::IOInterposer::RegisterCurrentThread(); |
449 | 0 | CacheIOThread* thread = static_cast<CacheIOThread*>(aClosure); |
450 | 0 | thread->ThreadFunc(); |
451 | 0 | mozilla::IOInterposer::UnregisterCurrentThread(); |
452 | 0 | } |
453 | | |
454 | | void CacheIOThread::ThreadFunc() |
455 | 0 | { |
456 | 0 | nsCOMPtr<nsIThreadInternal> threadInternal; |
457 | 0 |
|
458 | 0 | { |
459 | 0 | MonitorAutoLock lock(mMonitor); |
460 | 0 |
|
461 | 0 | MOZ_ASSERT(mBlockingIOWatcher); |
462 | 0 | mBlockingIOWatcher->InitThread(); |
463 | 0 |
|
464 | 0 | // This creates nsThread for this PRThread |
465 | 0 | nsCOMPtr<nsIThread> xpcomThread = NS_GetCurrentThread(); |
466 | 0 |
|
467 | 0 | threadInternal = do_QueryInterface(xpcomThread); |
468 | 0 | if (threadInternal) |
469 | 0 | threadInternal->SetObserver(this); |
470 | 0 |
|
471 | 0 | mXPCOMThread = xpcomThread.forget().take(); |
472 | 0 |
|
473 | 0 | lock.NotifyAll(); |
474 | 0 |
|
475 | 0 | do { |
476 | 0 | loopStart: |
477 | 0 | // Reset the lowest level now, so that we can detect a new event on |
478 | 0 | // a lower level (i.e. higher priority) has been scheduled while |
479 | 0 | // executing any previously scheduled event. |
480 | 0 | mLowestLevelWaiting = LAST_LEVEL; |
481 | 0 |
|
482 | 0 | // Process xpcom events first |
483 | 0 | while (mHasXPCOMEvents) { |
484 | 0 | mHasXPCOMEvents = false; |
485 | 0 | mCurrentlyExecutingLevel = XPCOM_LEVEL; |
486 | 0 |
|
487 | 0 | MonitorAutoUnlock unlock(mMonitor); |
488 | 0 |
|
489 | 0 | bool processedEvent; |
490 | 0 | nsresult rv; |
491 | 0 | do { |
492 | 0 | nsIThread *thread = mXPCOMThread; |
493 | 0 | rv = thread->ProcessNextEvent(false, &processedEvent); |
494 | 0 |
|
495 | 0 | ++mEventCounter; |
496 | 0 | MOZ_ASSERT(mBlockingIOWatcher); |
497 | 0 | mBlockingIOWatcher->NotifyOperationDone(); |
498 | 0 | } while (NS_SUCCEEDED(rv) && processedEvent); |
499 | 0 | } |
500 | 0 |
|
501 | 0 | uint32_t level; |
502 | 0 | for (level = 0; level < LAST_LEVEL; ++level) { |
503 | 0 | if (!mEventQueue[level].Length()) { |
504 | 0 | // no events on this level, go to the next level |
505 | 0 | continue; |
506 | 0 | } |
507 | 0 | |
508 | 0 | LoopOneLevel(level); |
509 | 0 |
|
510 | 0 | // Go to the first (lowest) level again |
511 | 0 | goto loopStart; |
512 | 0 | } |
513 | 0 |
|
514 | 0 | if (EventsPending()) { |
515 | 0 | continue; |
516 | 0 | } |
517 | 0 | |
518 | 0 | if (mShutdown) { |
519 | 0 | break; |
520 | 0 | } |
521 | 0 | |
522 | 0 | lock.Wait(); |
523 | 0 |
|
524 | 0 | } while (true); |
525 | 0 |
|
526 | 0 | MOZ_ASSERT(!EventsPending()); |
527 | 0 |
|
528 | | #ifdef DEBUG |
529 | | // This is for correct assertion on XPCOM events dispatch. |
530 | | mInsideLoop = false; |
531 | | #endif |
532 | | } // lock |
533 | 0 |
|
534 | 0 | if (threadInternal) |
535 | 0 | threadInternal->SetObserver(nullptr); |
536 | 0 | } |
537 | | |
538 | | void CacheIOThread::LoopOneLevel(uint32_t aLevel) |
539 | 0 | { |
540 | 0 | EventQueue events; |
541 | 0 | events.SwapElements(mEventQueue[aLevel]); |
542 | 0 | EventQueue::size_type length = events.Length(); |
543 | 0 |
|
544 | 0 | mCurrentlyExecutingLevel = aLevel; |
545 | 0 |
|
546 | 0 | bool returnEvents = false; |
547 | 0 | bool reportTelemetry = true; |
548 | 0 |
|
549 | 0 | EventQueue::size_type index; |
550 | 0 | { |
551 | 0 | MonitorAutoUnlock unlock(mMonitor); |
552 | 0 |
|
553 | 0 | for (index = 0; index < length; ++index) { |
554 | 0 | if (EventsPending(aLevel)) { |
555 | 0 | // Somebody scheduled a new event on a lower level, break and harry |
556 | 0 | // to execute it! Don't forget to return what we haven't exec. |
557 | 0 | returnEvents = true; |
558 | 0 | break; |
559 | 0 | } |
560 | 0 | |
561 | 0 | if (reportTelemetry) { |
562 | 0 | reportTelemetry = false; |
563 | 0 | CacheIOTelemetry::Report(aLevel, length); |
564 | 0 | } |
565 | 0 |
|
566 | 0 | // Drop any previous flagging, only an event on the current level may set |
567 | 0 | // this flag. |
568 | 0 | mRerunCurrentEvent = false; |
569 | 0 |
|
570 | 0 | events[index]->Run(); |
571 | 0 |
|
572 | 0 | MOZ_ASSERT(mBlockingIOWatcher); |
573 | 0 | mBlockingIOWatcher->NotifyOperationDone(); |
574 | 0 |
|
575 | 0 | if (mRerunCurrentEvent) { |
576 | 0 | // The event handler yields to higher priority events and wants to rerun. |
577 | 0 | returnEvents = true; |
578 | 0 | break; |
579 | 0 | } |
580 | 0 | |
581 | 0 | ++mEventCounter; |
582 | 0 | --mQueueLength[aLevel]; |
583 | 0 |
|
584 | 0 | // Release outside the lock. |
585 | 0 | events[index] = nullptr; |
586 | 0 | } |
587 | 0 | } |
588 | 0 |
|
589 | 0 | if (returnEvents) |
590 | 0 | mEventQueue[aLevel].InsertElementsAt(0, events.Elements() + index, length - index); |
591 | 0 | } |
592 | | |
593 | | bool CacheIOThread::EventsPending(uint32_t aLastLevel) |
594 | 0 | { |
595 | 0 | return mLowestLevelWaiting < aLastLevel || mHasXPCOMEvents; |
596 | 0 | } |
597 | | |
598 | | NS_IMETHODIMP CacheIOThread::OnDispatchedEvent() |
599 | 0 | { |
600 | 0 | MonitorAutoLock lock(mMonitor); |
601 | 0 | mHasXPCOMEvents = true; |
602 | 0 | MOZ_ASSERT(mInsideLoop); |
603 | 0 | lock.Notify(); |
604 | 0 | return NS_OK; |
605 | 0 | } |
606 | | |
607 | | NS_IMETHODIMP CacheIOThread::OnProcessNextEvent(nsIThreadInternal *thread, bool mayWait) |
608 | 0 | { |
609 | 0 | return NS_OK; |
610 | 0 | } |
611 | | |
612 | | NS_IMETHODIMP CacheIOThread::AfterProcessNextEvent(nsIThreadInternal *thread, |
613 | | bool eventWasProcessed) |
614 | 0 | { |
615 | 0 | return NS_OK; |
616 | 0 | } |
617 | | |
618 | | // Memory reporting |
619 | | |
620 | | size_t CacheIOThread::SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const |
621 | 0 | { |
622 | 0 | MonitorAutoLock lock(const_cast<CacheIOThread*>(this)->mMonitor); |
623 | 0 |
|
624 | 0 | size_t n = 0; |
625 | 0 | n += mallocSizeOf(mThread); |
626 | 0 | for (const auto& event : mEventQueue) { |
627 | 0 | n += event.ShallowSizeOfExcludingThis(mallocSizeOf); |
628 | 0 | // Events referenced by the queues are arbitrary objects we cannot be sure |
629 | 0 | // are reported elsewhere as well as probably not implementing nsISizeOf |
630 | 0 | // interface. Deliberatly omitting them from reporting here. |
631 | 0 | } |
632 | 0 |
|
633 | 0 | return n; |
634 | 0 | } |
635 | | |
636 | | size_t CacheIOThread::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const |
637 | 0 | { |
638 | 0 | return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf); |
639 | 0 | } |
640 | | |
641 | | CacheIOThread::Cancelable::Cancelable(bool aCancelable) |
642 | | : mCancelable(aCancelable) |
643 | 0 | { |
644 | 0 | // This will only ever be used on the I/O thread, |
645 | 0 | // which is expected to be alive longer than this class. |
646 | 0 | MOZ_ASSERT(CacheIOThread::sSelf); |
647 | 0 | MOZ_ASSERT(CacheIOThread::sSelf->IsCurrentThread()); |
648 | 0 |
|
649 | 0 | if (mCancelable) { |
650 | 0 | ++CacheIOThread::sSelf->mIOCancelableEvents; |
651 | 0 | } |
652 | 0 | } |
653 | | |
654 | | CacheIOThread::Cancelable::~Cancelable() |
655 | 0 | { |
656 | 0 | MOZ_ASSERT(CacheIOThread::sSelf); |
657 | 0 |
|
658 | 0 | if (mCancelable) { |
659 | 0 | --CacheIOThread::sSelf->mIOCancelableEvents; |
660 | 0 | } |
661 | 0 | } |
662 | | |
663 | | } // namespace net |
664 | | } // namespace mozilla |