Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/xpcom/threads/LabeledEventQueue.cpp
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
3
/* This Source Code Form is subject to the terms of the Mozilla Public
4
 * License, v. 2.0. If a copy of the MPL was not distributed with this
5
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "LabeledEventQueue.h"
8
#include "mozilla/dom/TabChild.h"
9
#include "mozilla/dom/TabGroup.h"
10
#include "mozilla/Scheduler.h"
11
#include "mozilla/SchedulerGroup.h"
12
#include "nsQueryObject.h"
13
14
using namespace mozilla::dom;
15
16
using EpochQueueEntry = SchedulerGroup::EpochQueueEntry;
17
18
LinkedList<SchedulerGroup>* LabeledEventQueue::sSchedulerGroups;
19
size_t LabeledEventQueue::sLabeledEventQueueCount;
20
SchedulerGroup* LabeledEventQueue::sCurrentSchedulerGroup;
21
22
LabeledEventQueue::LabeledEventQueue(EventPriority aPriority)
23
  : mPriority(aPriority)
24
0
{
25
0
  // LabeledEventQueue should only be used by one consumer since it uses a
26
0
  // single static sSchedulerGroups field. It's hard to assert this, though, so
27
0
  // we assert NS_IsMainThread(), which is a reasonable proxy.
28
0
  MOZ_ASSERT(NS_IsMainThread());
29
0
30
0
  if (sLabeledEventQueueCount++ == 0) {
31
0
    sSchedulerGroups = new LinkedList<SchedulerGroup>();
32
0
  }
33
0
}
34
35
LabeledEventQueue::~LabeledEventQueue()
36
0
{
37
0
  if (--sLabeledEventQueueCount == 0) {
38
0
    delete sSchedulerGroups;
39
0
    sSchedulerGroups = nullptr;
40
0
  }
41
0
}
42
43
static SchedulerGroup*
44
GetSchedulerGroup(nsIRunnable* aEvent)
45
0
{
46
0
  RefPtr<SchedulerGroup::Runnable> groupRunnable = do_QueryObject(aEvent);
47
0
  if (!groupRunnable) {
48
0
    // It's not labeled.
49
0
    return nullptr;
50
0
  }
51
0
52
0
  return groupRunnable->Group();
53
0
}
54
55
static bool
56
IsReadyToRun(nsIRunnable* aEvent, SchedulerGroup* aEventGroup)
57
0
{
58
0
  if (!Scheduler::AnyEventRunning()) {
59
0
    return true;
60
0
  }
61
0
62
0
  if (Scheduler::UnlabeledEventRunning()) {
63
0
    return false;
64
0
  }
65
0
66
0
  if (aEventGroup) {
67
0
    return !aEventGroup->IsRunning();
68
0
  }
69
0
70
0
  nsCOMPtr<nsILabelableRunnable> labelable = do_QueryInterface(aEvent);
71
0
  if (!labelable) {
72
0
    return false;
73
0
  }
74
0
75
0
  return labelable->IsReadyToRun();
76
0
}
77
78
void
79
LabeledEventQueue::PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
80
                            EventPriority aPriority,
81
                            const MutexAutoLock& aProofOfLock)
82
0
{
83
0
  MOZ_ASSERT(aPriority == mPriority);
84
0
85
0
  nsCOMPtr<nsIRunnable> event(aEvent);
86
0
87
0
  MOZ_ASSERT(event.get());
88
0
89
0
  SchedulerGroup* group = GetSchedulerGroup(event);
90
0
  bool isLabeled = !!group;
91
0
92
0
  // Create a new epoch if necessary.
93
0
  Epoch* epoch;
94
0
  if (mEpochs.IsEmpty()) {
95
0
    epoch = &mEpochs.Push(Epoch::First(isLabeled));
96
0
  } else {
97
0
    Epoch& lastEpoch = mEpochs.LastElement();
98
0
    if (lastEpoch.IsLabeled() != isLabeled) {
99
0
      epoch = &mEpochs.Push(lastEpoch.NextEpoch(isLabeled));
100
0
    } else {
101
0
      epoch = &lastEpoch;
102
0
    }
103
0
  }
104
0
105
0
  mNumEvents++;
106
0
  epoch->mNumEvents++;
107
0
108
0
  RunnableEpochQueue& queue = isLabeled ? group->GetQueue(aPriority) : mUnlabeled;
109
0
  queue.Push(EpochQueueEntry(event.forget(), epoch->mEpochNumber));
110
0
111
0
  if (group && group->EnqueueEvent() == SchedulerGroup::NewlyQueued) {
112
0
    // This group didn't have any events before. Add it to the
113
0
    // sSchedulerGroups list.
114
0
    MOZ_ASSERT(!group->isInList());
115
0
    sSchedulerGroups->insertBack(group);
116
0
    if (!sCurrentSchedulerGroup) {
117
0
      sCurrentSchedulerGroup = group;
118
0
    }
119
0
  }
120
0
}
121
122
void
123
LabeledEventQueue::PopEpoch()
124
0
{
125
0
  Epoch& epoch = mEpochs.FirstElement();
126
0
  MOZ_ASSERT(epoch.mNumEvents > 0);
127
0
  if (epoch.mNumEvents == 1) {
128
0
    mEpochs.Pop();
129
0
  } else {
130
0
    epoch.mNumEvents--;
131
0
  }
132
0
133
0
  mNumEvents--;
134
0
}
135
136
// Returns the next SchedulerGroup after |aGroup| in sSchedulerGroups. Wraps
137
// around to the beginning of the list when we hit the end.
138
/* static */ SchedulerGroup*
139
LabeledEventQueue::NextSchedulerGroup(SchedulerGroup* aGroup)
140
0
{
141
0
  SchedulerGroup* result = aGroup->getNext();
142
0
  if (!result) {
143
0
    result = sSchedulerGroups->getFirst();
144
0
  }
145
0
  return result;
146
0
}
147
148
already_AddRefed<nsIRunnable>
149
LabeledEventQueue::GetEvent(EventPriority* aPriority,
150
                            const MutexAutoLock& aProofOfLock)
151
0
{
152
0
  if (mEpochs.IsEmpty()) {
153
0
    return nullptr;
154
0
  }
155
0
156
0
  Epoch epoch = mEpochs.FirstElement();
157
0
  if (!epoch.IsLabeled()) {
158
0
    EpochQueueEntry& first = mUnlabeled.FirstElement();
159
0
    if (!IsReadyToRun(first.mRunnable, nullptr)) {
160
0
      return nullptr;
161
0
    }
162
0
163
0
    PopEpoch();
164
0
    EpochQueueEntry entry = mUnlabeled.Pop();
165
0
    MOZ_ASSERT(entry.mEpochNumber == epoch.mEpochNumber);
166
0
    MOZ_ASSERT(entry.mRunnable.get());
167
0
    return entry.mRunnable.forget();
168
0
  }
169
0
170
0
  if (!sCurrentSchedulerGroup) {
171
0
    return nullptr;
172
0
  }
173
0
174
0
  // Move visible tabs to the front of the queue. The mAvoidVisibleTabCount field
175
0
  // prevents us from preferentially processing events from visible tabs twice in
176
0
  // a row. This scheme is designed to prevent starvation.
177
0
  if (TabChild::HasVisibleTabs() && mAvoidVisibleTabCount <= 0) {
178
0
    for (auto iter = TabChild::GetVisibleTabs().ConstIter();
179
0
         !iter.Done(); iter.Next()) {
180
0
      SchedulerGroup* group = iter.Get()->GetKey()->TabGroup();
181
0
      if (!group->isInList() || group == sCurrentSchedulerGroup) {
182
0
        continue;
183
0
      }
184
0
185
0
      // For each visible tab we move to the front of the queue, we have to
186
0
      // process two SchedulerGroups (the visible tab and another one, presumably
187
0
      // a background group) before we prioritize visible tabs again.
188
0
      mAvoidVisibleTabCount += 2;
189
0
190
0
      // We move |group| right before sCurrentSchedulerGroup and then set
191
0
      // sCurrentSchedulerGroup to group.
192
0
      MOZ_ASSERT(group != sCurrentSchedulerGroup);
193
0
      group->removeFrom(*sSchedulerGroups);
194
0
      sCurrentSchedulerGroup->setPrevious(group);
195
0
      sCurrentSchedulerGroup = group;
196
0
    }
197
0
  }
198
0
199
0
  // Iterate over each SchedulerGroup once, starting at sCurrentSchedulerGroup.
200
0
  SchedulerGroup* firstGroup = sCurrentSchedulerGroup;
201
0
  SchedulerGroup* group = firstGroup;
202
0
  do {
203
0
    mAvoidVisibleTabCount--;
204
0
205
0
    RunnableEpochQueue& queue = group->GetQueue(mPriority);
206
0
207
0
    if (queue.IsEmpty()) {
208
0
      // This can happen if |group| is in a different LabeledEventQueue than |this|.
209
0
      group = NextSchedulerGroup(group);
210
0
      continue;
211
0
    }
212
0
213
0
    EpochQueueEntry& first = queue.FirstElement();
214
0
    if (first.mEpochNumber == epoch.mEpochNumber &&
215
0
        IsReadyToRun(first.mRunnable, group)) {
216
0
      sCurrentSchedulerGroup = NextSchedulerGroup(group);
217
0
218
0
      PopEpoch();
219
0
220
0
      if (group->DequeueEvent() == SchedulerGroup::NoLongerQueued) {
221
0
        // Now we can take group out of sSchedulerGroups.
222
0
        if (sCurrentSchedulerGroup == group) {
223
0
          // Since we changed sCurrentSchedulerGroup above, we'll only get here
224
0
          // if |group| was the only element in sSchedulerGroups. In that case
225
0
          // set sCurrentSchedulerGroup to null.
226
0
          MOZ_ASSERT(group->getNext() == nullptr);
227
0
          MOZ_ASSERT(group->getPrevious() == nullptr);
228
0
          sCurrentSchedulerGroup = nullptr;
229
0
        }
230
0
        group->removeFrom(*sSchedulerGroups);
231
0
      }
232
0
      EpochQueueEntry entry = queue.Pop();
233
0
      return entry.mRunnable.forget();
234
0
    }
235
0
236
0
    group = NextSchedulerGroup(group);
237
0
  } while (group != firstGroup);
238
0
239
0
  return nullptr;
240
0
}
241
242
bool
243
LabeledEventQueue::IsEmpty(const MutexAutoLock& aProofOfLock)
244
0
{
245
0
  return mEpochs.IsEmpty();
246
0
}
247
248
size_t
249
LabeledEventQueue::Count(const MutexAutoLock& aProofOfLock) const
250
0
{
251
0
  return mNumEvents;
252
0
}
253
254
bool
255
LabeledEventQueue::HasReadyEvent(const MutexAutoLock& aProofOfLock)
256
0
{
257
0
  if (mEpochs.IsEmpty()) {
258
0
    return false;
259
0
  }
260
0
261
0
  Epoch& frontEpoch = mEpochs.FirstElement();
262
0
263
0
  if (!frontEpoch.IsLabeled()) {
264
0
    EpochQueueEntry& entry = mUnlabeled.FirstElement();
265
0
    return IsReadyToRun(entry.mRunnable, nullptr);
266
0
  }
267
0
268
0
  // Go through the scheduler groups and look for one that has events
269
0
  // for the priority of this labeled queue that is in the current
270
0
  // epoch and is allowed to run.
271
0
  uintptr_t currentEpoch = frontEpoch.mEpochNumber;
272
0
  for (SchedulerGroup* group : *sSchedulerGroups) {
273
0
    RunnableEpochQueue& queue = group->GetQueue(mPriority);
274
0
    if (queue.IsEmpty()) {
275
0
      continue;
276
0
    }
277
0
278
0
    EpochQueueEntry& entry = queue.FirstElement();
279
0
    if (entry.mEpochNumber != currentEpoch) {
280
0
      continue;
281
0
    }
282
0
283
0
    if (IsReadyToRun(entry.mRunnable, group)) {
284
0
      return true;
285
0
    }
286
0
  }
287
0
288
0
  return false;
289
0
}