/src/mozilla-central/xpcom/threads/LabeledEventQueue.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
3 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
4 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
5 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
6 | | |
7 | | #ifndef mozilla_LabeledEventQueue_h |
8 | | #define mozilla_LabeledEventQueue_h |
9 | | |
10 | | #include <stdint.h> |
11 | | #include "mozilla/AbstractEventQueue.h" |
12 | | #include "mozilla/LinkedList.h" |
13 | | #include "mozilla/SchedulerGroup.h" |
14 | | #include "mozilla/Queue.h" |
15 | | #include "nsClassHashtable.h" |
16 | | #include "nsHashKeys.h" |
17 | | |
18 | | namespace mozilla { |
19 | | |
20 | | class SchedulerGroup; |
21 | | |
22 | | // LabeledEventQueue is actually a set of queues. There is one queue for each |
23 | | // SchedulerGroup, as well as one queue for unlabeled events (those with no |
24 | | // associated SchedulerGroup). When an event is added to a LabeledEventQueue, we |
25 | | // query its SchedulerGroup and then add it to the appropriate queue. When an |
26 | | // event is fetched, we heuristically pick a SchedulerGroup and return an event |
27 | | // from its queue. Ideally the heuristic should give precedence to |
28 | | // SchedulerGroups corresponding to the foreground tabs. The correctness of this |
29 | | // data structure relies on the invariant that events from different |
30 | | // SchedulerGroups cannot affect each other. |
31 | | class LabeledEventQueue final : public AbstractEventQueue |
32 | | { |
33 | | public: |
34 | | explicit LabeledEventQueue(EventPriority aPriority); |
35 | | ~LabeledEventQueue(); |
36 | | |
37 | | void PutEvent(already_AddRefed<nsIRunnable>&& aEvent, |
38 | | EventPriority aPriority, |
39 | | const MutexAutoLock& aProofOfLock) final; |
40 | | already_AddRefed<nsIRunnable> GetEvent(EventPriority* aPriority, |
41 | | const MutexAutoLock& aProofOfLock) final; |
42 | | |
43 | | bool IsEmpty(const MutexAutoLock& aProofOfLock) final; |
44 | | size_t Count(const MutexAutoLock& aProofOfLock) const final; |
45 | | bool HasReadyEvent(const MutexAutoLock& aProofOfLock) final; |
46 | | |
47 | 0 | void EnableInputEventPrioritization(const MutexAutoLock& aProofOfLock) final {} |
48 | 0 | void FlushInputEventPrioritization(const MutexAutoLock& aProofOfLock) final {} |
49 | 0 | void SuspendInputEventPrioritization(const MutexAutoLock& aProofOfLock) final {} |
50 | 0 | void ResumeInputEventPrioritization(const MutexAutoLock& aProofOfLock) final {} |
51 | | |
52 | | size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const override |
53 | 0 | { |
54 | 0 | return mEpochs.ShallowSizeOfExcludingThis(aMallocSizeOf) + |
55 | 0 | mUnlabeled.ShallowSizeOfExcludingThis(aMallocSizeOf); |
56 | 0 | } |
57 | | |
58 | | private: |
59 | | |
60 | | // The basic problem here is to keep track of the ordering relationships |
61 | | // between events. As long as there are only labeled events, there can be one |
62 | | // queue per SchedulerGroup. However, if an unlabeled event is pushed, we must |
63 | | // remember that it should run after all the labeled events currently in the |
64 | | // queue. To do this, the queues are arranged in "epochs". Each time the tail |
65 | | // of the queue transitions from labeled to unlabeled (or from unlabeled to |
66 | | // labeled) a new epoch starts. Events within different epochs are ordered |
67 | | // according to which epoch happened first. Within a labeled epoch, there is |
68 | | // one queue per SchedulerGroup. So events from different SchedulerGroups |
69 | | // within the same epoch are unordered with respect to each other. Within an |
70 | | // unlabeled epoch, there is a single queue that orders all the unlabeled |
71 | | // events. |
72 | | // |
73 | | // The data structures we use are: |
74 | | // 1. A queue of epochs. For each epoch, we store its epoch number. This number |
75 | | // is odd for labeled epochs and even for unlabeled epochs. We also store the |
76 | | // number of events in the epoch. |
77 | | // 2. A single queue for all unlabeled events. For each event in the queue, we |
78 | | // store the runnable as well as the epoch number. |
79 | | // 3. For labeled events, one queue for each SchedulerGroup. Each element in |
80 | | // these queues also keeps track of the epoch it belongs to. |
81 | | // |
82 | | // To push an event, we see if we can remain in the same epoch or if we have |
83 | | // to start a new one. If we have to start a new one, we push onto the epoch |
84 | | // queue. Then, based on whether the event is labeled or not, we push the |
85 | | // runnable and the epoch number into the appopriate queue. |
86 | | // |
87 | | // To pop an event, we look at the epoch at the front of the epoch queue. If |
88 | | // it is unlabeled, then we pop the first event in the unlabeled queue. If it |
89 | | // is labeled, we can pop from any of the SchedulerGroup queues. Then we |
90 | | // decrement the number of events in the current epoch. If this number reaches |
91 | | // zero, we pop from the epoch queue. |
92 | | |
93 | | struct Epoch |
94 | | { |
95 | | static Epoch First(bool aIsLabeled) |
96 | 0 | { |
97 | 0 | // Odd numbers are labeled, even are unlabeled. |
98 | 0 | uintptr_t number = aIsLabeled ? 1 : 0; |
99 | 0 | return Epoch(number, aIsLabeled); |
100 | 0 | } |
101 | | |
102 | | static bool EpochNumberIsLabeled(uintptr_t aEpochNumber) |
103 | 0 | { |
104 | 0 | // Odd numbers are labeled, even are unlabeled. |
105 | 0 | return (aEpochNumber & 1) ? true : false; |
106 | 0 | } |
107 | | |
108 | | uintptr_t mEpochNumber; |
109 | | size_t mNumEvents; |
110 | | |
111 | | Epoch(uintptr_t aEpochNumber, bool aIsLabeled) |
112 | | : mEpochNumber(aEpochNumber) |
113 | | , mNumEvents(0) |
114 | 0 | { |
115 | 0 | MOZ_ASSERT(aIsLabeled == EpochNumberIsLabeled(aEpochNumber)); |
116 | 0 | } |
117 | | |
118 | 0 | bool IsLabeled() const { return EpochNumberIsLabeled(mEpochNumber); } |
119 | | |
120 | | Epoch NextEpoch(bool aIsLabeled) const |
121 | 0 | { |
122 | 0 | MOZ_ASSERT(aIsLabeled == !IsLabeled()); |
123 | 0 | return Epoch(mEpochNumber + 1, aIsLabeled); |
124 | 0 | } |
125 | | }; |
126 | | |
127 | | void PopEpoch(); |
128 | | static SchedulerGroup* NextSchedulerGroup(SchedulerGroup* aGroup); |
129 | | |
130 | | using RunnableEpochQueue = SchedulerGroup::RunnableEpochQueue; |
131 | | using EpochQueue = Queue<Epoch, 8>; |
132 | | |
133 | | // List of SchedulerGroups that might have events. This is static, so it |
134 | | // covers all LabeledEventQueues. If a SchedulerGroup is in this list, it may |
135 | | // not have an event in *this* LabeledEventQueue (although it will have an |
136 | | // event in *some* LabeledEventQueue). sCurrentSchedulerGroup cycles through |
137 | | // the elements of sSchedulerGroups in order. |
138 | | static LinkedList<SchedulerGroup>* sSchedulerGroups; |
139 | | static size_t sLabeledEventQueueCount; |
140 | | static SchedulerGroup* sCurrentSchedulerGroup; |
141 | | |
142 | | RunnableEpochQueue mUnlabeled; |
143 | | EpochQueue mEpochs; |
144 | | size_t mNumEvents = 0; |
145 | | |
146 | | // Number of SchedulerGroups that must be processed before we prioritize a |
147 | | // visible tab. This field is designed to guarantee a 1:1 interleaving between |
148 | | // foreground and background SchedulerGroups. For details, see its usage in |
149 | | // LabeledEventQueue.cpp. |
150 | | int64_t mAvoidVisibleTabCount = 0; |
151 | | EventPriority mPriority; |
152 | | }; |
153 | | |
154 | | } // namespace mozilla |
155 | | |
156 | | #endif // mozilla_LabeledEventQueue_h |