Coverage Report

Created: 2018-09-25 14:53

/src/mozilla-central/gfx/tests/gtest/TestJobScheduler.cpp
Line
Count
Source (jump to first uncovered line)
1
/* vim:set ts=2 sw=2 sts=2 et: */
2
/* Any copyright is dedicated to the Public Domain.
3
 * http://creativecommons.org/publicdomain/zero/1.0/
4
 */
5
6
#include "gtest/gtest.h"
7
#include "gmock/gmock.h"
8
9
#include "mozilla/gfx/JobScheduler.h"
10
11
#ifndef WIN32
12
#include <pthread.h>
13
#include <sched.h>
14
#endif
15
16
#include <stdlib.h>
17
#include <time.h>
18
19
namespace test_scheduler {
20
21
using namespace mozilla::gfx;
22
using namespace mozilla;
23
using mozilla::gfx::SyncObject;
24
25
// Artificially cause threads to yield randomly in an attempt to make racy
26
// things more apparent (if any).
27
void MaybeYieldThread()
28
0
{
29
0
#ifndef WIN32
30
0
  if (rand() % 5 == 0) {
31
0
    sched_yield();
32
0
  }
33
0
#endif
34
0
}
35
36
/// Used by the TestCommand to check that tasks are processed in the right order.
37
struct SanityChecker {
38
  std::vector<uint64_t> mAdvancements;
39
  mozilla::gfx::CriticalSection mSection;
40
41
  explicit SanityChecker(uint64_t aNumCmdBuffers)
42
0
  {
43
0
    for (uint32_t i = 0; i < aNumCmdBuffers; ++i) {
44
0
      mAdvancements.push_back(0);
45
0
    }
46
0
  }
47
48
  virtual void Check(uint64_t aJobId, uint64_t aCmdId)
49
0
  {
50
0
    MaybeYieldThread();
51
0
    CriticalSectionAutoEnter lock(&mSection);
52
0
    MOZ_RELEASE_ASSERT(mAdvancements[aJobId] == aCmdId-1);
53
0
    mAdvancements[aJobId] = aCmdId;
54
0
  }
55
};
56
57
/// Run checks that are specific to TestSchulerJoin.
58
struct JoinTestSanityCheck : public SanityChecker {
59
  bool mSpecialJobHasRun;
60
61
  explicit JoinTestSanityCheck(uint64_t aNumCmdBuffers)
62
  : SanityChecker(aNumCmdBuffers)
63
  , mSpecialJobHasRun(false)
64
0
  {}
65
66
  virtual void Check(uint64_t aJobId, uint64_t aCmdId) override
67
0
  {
68
0
    // Job 0 is the special task executed when everything is joined after task 1
69
0
    if (aCmdId == 0) {
70
0
      MOZ_RELEASE_ASSERT(!mSpecialJobHasRun, "GFX: A special task has been executed.");
71
0
      mSpecialJobHasRun = true;
72
0
      for (auto advancement : mAdvancements) {
73
0
        // Because of the synchronization point (beforeFilter), all
74
0
        // task buffers should have run task 1 when task 0 is run.
75
0
        MOZ_RELEASE_ASSERT(advancement == 1, "GFX: task buffer has not run task 1.");
76
0
      }
77
0
    } else {
78
0
      // This check does not apply to task 0.
79
0
      SanityChecker::Check(aJobId, aCmdId);
80
0
    }
81
0
82
0
    if (aCmdId == 2) {
83
0
      MOZ_RELEASE_ASSERT(mSpecialJobHasRun, "GFX: Special job has not run.");
84
0
    }
85
0
  }
86
};
87
88
class TestJob : public Job
89
{
90
public:
91
  TestJob(uint64_t aCmdId, uint64_t aJobId, SanityChecker* aChecker,
92
           SyncObject* aStart, SyncObject* aCompletion)
93
  : Job(aStart, aCompletion, nullptr)
94
  , mCmdId(aCmdId)
95
  , mCmdBufferId(aJobId)
96
  , mSanityChecker(aChecker)
97
0
  {}
98
99
  JobStatus Run()
100
0
  {
101
0
    MaybeYieldThread();
102
0
    mSanityChecker->Check(mCmdBufferId, mCmdId);
103
0
    MaybeYieldThread();
104
0
    return JobStatus::Complete;
105
0
  }
106
107
  uint64_t mCmdId;
108
  uint64_t mCmdBufferId;
109
  SanityChecker* mSanityChecker;
110
};
111
112
/// This test creates aNumCmdBuffers task buffers with sync objects set up
113
/// so that all tasks will join after command 5 before a task buffer runs
114
/// a special task (task 0) after which all task buffers fork again.
115
/// This simulates the kind of scenario where all tiles must join at
116
/// a certain point to execute, say, a filter, and fork again after the filter
117
/// has been processed.
118
/// The main thread is only blocked when waiting for the completion of the entire
119
/// task stream (it doesn't have to wait at the filter's sync points to orchestrate it).
120
void TestSchedulerJoin(uint32_t aNumThreads, uint32_t aNumCmdBuffers)
121
0
{
122
0
  JoinTestSanityCheck check(aNumCmdBuffers);
123
0
124
0
  RefPtr<SyncObject> beforeFilter = new SyncObject(aNumCmdBuffers);
125
0
  RefPtr<SyncObject> afterFilter = new SyncObject();
126
0
  RefPtr<SyncObject> completion = new SyncObject(aNumCmdBuffers);
127
0
128
0
129
0
  for (uint32_t i = 0; i < aNumCmdBuffers; ++i) {
130
0
    Job* t1 = new TestJob(1, i, &check, nullptr, beforeFilter);
131
0
    JobScheduler::SubmitJob(t1);
132
0
    MaybeYieldThread();
133
0
  }
134
0
  beforeFilter->FreezePrerequisites();
135
0
136
0
  // This task buffer is executed when all other tasks have joined after task 1
137
0
  JobScheduler::SubmitJob(
138
0
    new TestJob(0, 0, &check, beforeFilter, afterFilter)
139
0
  );
140
0
  afterFilter->FreezePrerequisites();
141
0
142
0
  for (uint32_t i = 0; i < aNumCmdBuffers; ++i) {
143
0
    Job* t2 = new TestJob(2, i, &check, afterFilter, completion);
144
0
    JobScheduler::SubmitJob(t2);
145
0
    MaybeYieldThread();
146
0
  }
147
0
  completion->FreezePrerequisites();
148
0
149
0
  JobScheduler::Join(completion);
150
0
151
0
  MaybeYieldThread();
152
0
153
0
  for (auto advancement : check.mAdvancements) {
154
0
    EXPECT_TRUE(advancement == 2);
155
0
  }
156
0
}
157
158
/// This test creates several chains of 10 task, tasks of a given chain are executed
159
/// sequentially, and chains are exectuted in parallel.
160
/// This simulates the typical scenario where we want to process sequences of drawing
161
/// commands for several tiles in parallel.
162
void TestSchedulerChain(uint32_t aNumThreads, uint32_t aNumCmdBuffers)
163
0
{
164
0
  SanityChecker check(aNumCmdBuffers);
165
0
166
0
  RefPtr<SyncObject> completion = new SyncObject(aNumCmdBuffers);
167
0
168
0
  uint32_t numJobs = 10;
169
0
170
0
  for (uint32_t i = 0; i < aNumCmdBuffers; ++i) {
171
0
172
0
    std::vector<RefPtr<SyncObject>> syncs;
173
0
    std::vector<Job*> tasks;
174
0
    syncs.reserve(numJobs);
175
0
    tasks.reserve(numJobs);
176
0
177
0
    for (uint32_t t = 0; t < numJobs-1; ++t) {
178
0
      syncs.push_back(new SyncObject());
179
0
      tasks.push_back(new TestJob(t+1, i, &check, t == 0 ? nullptr
180
0
                                                          : syncs[t-1].get(),
181
0
                                   syncs[t]));
182
0
      syncs.back()->FreezePrerequisites();
183
0
    }
184
0
185
0
    tasks.push_back(new TestJob(numJobs, i, &check, syncs.back(), completion));
186
0
187
0
    if (i % 2 == 0) {
188
0
      // submit half of the tasks in order
189
0
      for (Job* task : tasks) {
190
0
        JobScheduler::SubmitJob(task);
191
0
        MaybeYieldThread();
192
0
      }
193
0
    } else {
194
0
      // ... and submit the other half in reverse order
195
0
      for (int32_t reverse = numJobs-1; reverse >= 0; --reverse) {
196
0
        JobScheduler::SubmitJob(tasks[reverse]);
197
0
        MaybeYieldThread();
198
0
      }
199
0
    }
200
0
  }
201
0
  completion->FreezePrerequisites();
202
0
203
0
  JobScheduler::Join(completion);
204
0
205
0
  for (auto advancement : check.mAdvancements) {
206
0
    EXPECT_TRUE(advancement == numJobs);
207
0
  }
208
0
}
209
210
} // namespace test_scheduler
211
212
#if !defined(MOZ_CODE_COVERAGE) || !defined(XP_WIN)
213
0
TEST(Moz2D, JobScheduler_Shutdown) {
214
0
  srand(time(nullptr));
215
0
  for (uint32_t threads = 1; threads < 16; ++threads) {
216
0
    for (uint32_t i = 1; i < 1000; ++i) {
217
0
      mozilla::gfx::JobScheduler::Init(threads, threads);
218
0
      mozilla::gfx::JobScheduler::ShutDown();
219
0
    }
220
0
  }
221
0
}
222
#endif
223
224
0
TEST(Moz2D, JobScheduler_Join) {
225
0
  srand(time(nullptr));
226
0
  for (uint32_t threads = 1; threads < 8; ++threads) {
227
0
    for (uint32_t queues = 1; queues < threads; ++queues) {
228
0
      for (uint32_t buffers = 1; buffers < 100; buffers += 3) {
229
0
        mozilla::gfx::JobScheduler::Init(threads, queues);
230
0
        test_scheduler::TestSchedulerJoin(threads, buffers);
231
0
        mozilla::gfx::JobScheduler::ShutDown();
232
0
      }
233
0
    }
234
0
  }
235
0
}
236
237
0
TEST(Moz2D, JobScheduler_Chain) {
238
0
  srand(time(nullptr));
239
0
  for (uint32_t threads = 1; threads < 8; ++threads) {
240
0
    for (uint32_t queues = 1; queues < threads; ++queues) {
241
0
      for (uint32_t buffers = 1; buffers < 100; buffers += 3) {
242
0
        mozilla::gfx::JobScheduler::Init(threads, queues);
243
0
        test_scheduler::TestSchedulerChain(threads, buffers);
244
0
        mozilla::gfx::JobScheduler::ShutDown();
245
0
      }
246
0
    }
247
0
  }
248
0
}