Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler-dispatcher/compiler-dispatcher.h"
6 :
7 : #include "include/v8-platform.h"
8 : #include "include/v8.h"
9 : #include "src/base/platform/time.h"
10 : #include "src/cancelable-task.h"
11 : #include "src/compilation-info.h"
12 : #include "src/compiler-dispatcher/compiler-dispatcher-job.h"
13 : #include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
14 : #include "src/compiler-dispatcher/unoptimized-compile-job.h"
15 : #include "src/flags.h"
16 : #include "src/objects-inl.h"
17 :
18 : namespace v8 {
19 : namespace internal {
20 :
21 : namespace {
22 :
23 : enum class ExceptionHandling { kSwallow, kThrow };
24 :
25 127 : bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
26 : ExceptionHandling exception_handling) {
27 : DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
28 254 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
29 : "V8.CompilerDispatcherForgroundStep");
30 127 : job->StepNextOnMainThread(isolate);
31 :
32 : DCHECK_EQ(job->IsFailed(), isolate->has_pending_exception());
33 127 : if (job->IsFailed() && exception_handling == ExceptionHandling::kSwallow) {
34 : isolate->clear_pending_exception();
35 : }
36 127 : return job->IsFailed();
37 : }
38 :
39 5 : void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
40 : DCHECK(job->CanStepNextOnAnyThread());
41 10 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
42 : "V8.CompilerDispatcherBackgroundStep");
43 5 : job->StepNextOnBackgroundThread();
44 5 : }
45 :
46 : // Theoretically we get 50ms of idle time max, however it's unlikely that
47 : // we'll get all of it so try to be a conservative.
48 : const double kMaxIdleTimeToExpectInMs = 40;
49 :
50 : class MemoryPressureTask : public CancelableTask {
51 : public:
52 : MemoryPressureTask(Isolate* isolate, CancelableTaskManager* task_manager,
53 : CompilerDispatcher* dispatcher);
54 : ~MemoryPressureTask() override;
55 :
56 : // CancelableTask implementation.
57 : void RunInternal() override;
58 :
59 : private:
60 : CompilerDispatcher* dispatcher_;
61 :
62 : DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
63 : };
64 :
65 : MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
66 : CancelableTaskManager* task_manager,
67 : CompilerDispatcher* dispatcher)
68 7 : : CancelableTask(task_manager), dispatcher_(dispatcher) {}
69 :
70 14 : MemoryPressureTask::~MemoryPressureTask() {}
71 :
72 7 : void MemoryPressureTask::RunInternal() {
73 7 : dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
74 7 : }
75 :
76 : } // namespace
77 :
78 : class CompilerDispatcher::AbortTask : public CancelableTask {
79 : public:
80 : AbortTask(Isolate* isolate, CancelableTaskManager* task_manager,
81 : CompilerDispatcher* dispatcher);
82 : ~AbortTask() override;
83 :
84 : // CancelableTask implementation.
85 : void RunInternal() override;
86 :
87 : private:
88 : CompilerDispatcher* dispatcher_;
89 :
90 : DISALLOW_COPY_AND_ASSIGN(AbortTask);
91 : };
92 :
93 0 : CompilerDispatcher::AbortTask::AbortTask(Isolate* isolate,
94 : CancelableTaskManager* task_manager,
95 : CompilerDispatcher* dispatcher)
96 11 : : CancelableTask(task_manager), dispatcher_(dispatcher) {}
97 :
98 22 : CompilerDispatcher::AbortTask::~AbortTask() {}
99 :
100 11 : void CompilerDispatcher::AbortTask::RunInternal() {
101 11 : dispatcher_->AbortInactiveJobs();
102 11 : }
103 :
104 : class CompilerDispatcher::BackgroundTask : public CancelableTask {
105 : public:
106 : BackgroundTask(Isolate* isolate, CancelableTaskManager* task_manager,
107 : CompilerDispatcher* dispatcher);
108 : ~BackgroundTask() override;
109 :
110 : // CancelableTask implementation.
111 : void RunInternal() override;
112 :
113 : private:
114 : CompilerDispatcher* dispatcher_;
115 :
116 : DISALLOW_COPY_AND_ASSIGN(BackgroundTask);
117 : };
118 :
119 0 : CompilerDispatcher::BackgroundTask::BackgroundTask(
120 : Isolate* isolate, CancelableTaskManager* task_manager,
121 : CompilerDispatcher* dispatcher)
122 8 : : CancelableTask(task_manager), dispatcher_(dispatcher) {}
123 :
124 16 : CompilerDispatcher::BackgroundTask::~BackgroundTask() {}
125 :
126 4 : void CompilerDispatcher::BackgroundTask::RunInternal() {
127 4 : dispatcher_->DoBackgroundWork();
128 4 : }
129 :
130 : class CompilerDispatcher::IdleTask : public CancelableIdleTask {
131 : public:
132 : IdleTask(Isolate* isolate, CancelableTaskManager* task_manager,
133 : CompilerDispatcher* dispatcher);
134 : ~IdleTask() override;
135 :
136 : // CancelableIdleTask implementation.
137 : void RunInternal(double deadline_in_seconds) override;
138 :
139 : private:
140 : CompilerDispatcher* dispatcher_;
141 :
142 : DISALLOW_COPY_AND_ASSIGN(IdleTask);
143 : };
144 :
145 0 : CompilerDispatcher::IdleTask::IdleTask(Isolate* isolate,
146 : CancelableTaskManager* task_manager,
147 : CompilerDispatcher* dispatcher)
148 27 : : CancelableIdleTask(task_manager), dispatcher_(dispatcher) {}
149 :
150 54 : CompilerDispatcher::IdleTask::~IdleTask() {}
151 :
152 14 : void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) {
153 14 : dispatcher_->DoIdleWork(deadline_in_seconds);
154 14 : }
155 :
156 55019 : CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
157 : size_t max_stack_size)
158 : : isolate_(isolate),
159 : platform_(platform),
160 : max_stack_size_(max_stack_size),
161 : trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
162 55019 : tracer_(new CompilerDispatcherTracer(isolate_)),
163 : task_manager_(new CancelableTaskManager()),
164 : next_job_id_(0),
165 : shared_to_unoptimized_job_id_(isolate->heap()),
166 : memory_pressure_level_(MemoryPressureLevel::kNone),
167 : abort_(false),
168 : idle_task_scheduled_(false),
169 : num_background_tasks_(0),
170 : main_thread_blocking_on_job_(nullptr),
171 : block_for_testing_(false),
172 440152 : semaphore_for_testing_(0) {
173 55019 : if (trace_compiler_dispatcher_ && !IsEnabled()) {
174 0 : PrintF("CompilerDispatcher: dispatcher is disabled\n");
175 : }
176 55019 : }
177 :
178 160155 : CompilerDispatcher::~CompilerDispatcher() {
179 : // To avoid crashing in unit tests due to unfished jobs.
180 53385 : AbortAll(BlockingBehavior::kBlock);
181 53385 : task_manager_->CancelAndWait();
182 53385 : }
183 :
184 28 : bool CompilerDispatcher::CanEnqueue() {
185 28 : if (!IsEnabled()) return false;
186 :
187 28 : if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
188 : return false;
189 : }
190 :
191 : {
192 27 : base::LockGuard<base::Mutex> lock(&mutex_);
193 27 : if (abort_) return false;
194 : }
195 :
196 26 : return true;
197 : }
198 :
199 28 : bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
200 28 : if (!CanEnqueue()) return false;
201 :
202 : // We only handle functions (no eval / top-level code / native) that are
203 : // attached to a script.
204 78 : if (!function->script()->IsScript() || function->is_toplevel() ||
205 : function->native()) {
206 : return false;
207 : }
208 :
209 26 : return true;
210 : }
211 :
212 24 : CompilerDispatcher::JobId CompilerDispatcher::Enqueue(
213 : std::unique_ptr<CompilerDispatcherJob> job) {
214 : DCHECK(!job->IsFinished());
215 48 : JobMap::const_iterator it = InsertJob(std::move(job));
216 24 : ConsiderJobForBackgroundProcessing(it->second.get());
217 : ScheduleIdleTaskIfNeeded();
218 24 : return it->first;
219 : }
220 :
221 2 : CompilerDispatcher::JobId CompilerDispatcher::EnqueueAndStep(
222 : std::unique_ptr<CompilerDispatcherJob> job) {
223 : DCHECK(!job->IsFinished());
224 4 : JobMap::const_iterator it = InsertJob(std::move(job));
225 2 : if (trace_compiler_dispatcher_) {
226 0 : PrintF("CompilerDispatcher: stepping ");
227 0 : it->second->ShortPrintOnMainThread();
228 0 : PrintF("\n");
229 : }
230 : DoNextStepOnMainThread(isolate_, it->second.get(),
231 2 : ExceptionHandling::kSwallow);
232 2 : ConsiderJobForBackgroundProcessing(it->second.get());
233 2 : RemoveIfFinished(it);
234 : ScheduleIdleTaskIfNeeded();
235 2 : return it->first;
236 : }
237 :
238 25 : bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
239 50 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
240 : "V8.CompilerDispatcherEnqueue");
241 25 : if (!CanEnqueue(function)) return false;
242 23 : if (IsEnqueued(function)) return true;
243 :
244 23 : if (trace_compiler_dispatcher_) {
245 0 : PrintF("CompilerDispatcher: enqueuing ");
246 0 : function->ShortPrint();
247 0 : PrintF(" for parse and compile\n");
248 : }
249 :
250 : std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
251 23 : isolate_, tracer_.get(), function, max_stack_size_));
252 46 : Enqueue(std::move(job));
253 25 : return true;
254 : }
255 :
256 3 : bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
257 6 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
258 : "V8.CompilerDispatcherEnqueueAndStep");
259 3 : if (!CanEnqueue(function)) return false;
260 3 : if (IsEnqueued(function)) return true;
261 :
262 2 : if (trace_compiler_dispatcher_) {
263 0 : PrintF("CompilerDispatcher: enqueuing ");
264 0 : function->ShortPrint();
265 0 : PrintF(" for parse and compile\n");
266 : }
267 :
268 : std::unique_ptr<CompilerDispatcherJob> job(new UnoptimizedCompileJob(
269 2 : isolate_, tracer_.get(), function, max_stack_size_));
270 4 : EnqueueAndStep(std::move(job));
271 3 : return true;
272 : }
273 :
274 28 : bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }
275 :
276 576647 : bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
277 576673 : if (jobs_.empty()) return false;
278 46 : return GetJobFor(function) != jobs_.end();
279 : }
280 :
281 14 : void CompilerDispatcher::WaitForJobIfRunningOnBackground(
282 : CompilerDispatcherJob* job) {
283 28 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
284 : "V8.CompilerDispatcherWaitForBackgroundJob");
285 : RuntimeCallTimerScope runtimeTimer(
286 14 : isolate_, &RuntimeCallStats::CompileWaitForDispatcher);
287 :
288 14 : base::LockGuard<base::Mutex> lock(&mutex_);
289 14 : if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
290 : pending_background_jobs_.erase(job);
291 14 : return;
292 : }
293 : DCHECK_NULL(main_thread_blocking_on_job_);
294 0 : main_thread_blocking_on_job_ = job;
295 0 : while (main_thread_blocking_on_job_ != nullptr) {
296 0 : main_thread_blocking_signal_.Wait(&mutex_);
297 : }
298 : DCHECK(pending_background_jobs_.find(job) == pending_background_jobs_.end());
299 0 : DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
300 : }
301 :
302 7 : bool CompilerDispatcher::FinishNow(CompilerDispatcherJob* job) {
303 7 : if (trace_compiler_dispatcher_) {
304 0 : PrintF("CompilerDispatcher: finishing ");
305 0 : job->ShortPrintOnMainThread();
306 0 : PrintF(" now\n");
307 : }
308 7 : WaitForJobIfRunningOnBackground(job);
309 52 : while (!job->IsFinished()) {
310 38 : DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
311 : }
312 7 : return !job->IsFailed();
313 : }
314 :
315 7 : bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
316 14 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
317 : "V8.CompilerDispatcherFinishNow");
318 7 : JobMap::const_iterator job = GetJobFor(function);
319 7 : CHECK(job != jobs_.end());
320 7 : bool result = FinishNow(job->second.get());
321 7 : RemoveIfFinished(job);
322 7 : return result;
323 : }
324 :
325 1 : void CompilerDispatcher::FinishAllNow() {
326 : // First finish all jobs not running in background
327 4 : for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
328 2 : CompilerDispatcherJob* job = it->second.get();
329 : bool is_running_in_background;
330 : {
331 2 : base::LockGuard<base::Mutex> lock(&mutex_);
332 : is_running_in_background =
333 : running_background_jobs_.find(job) != running_background_jobs_.end();
334 : pending_background_jobs_.erase(job);
335 : }
336 2 : if (!is_running_in_background) {
337 16 : while (!job->IsFinished()) {
338 14 : DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
339 : }
340 2 : it = RemoveIfFinished(it);
341 : } else {
342 : ++it;
343 : }
344 : }
345 : // Potentially wait for jobs that were running in background
346 1 : for (auto it = jobs_.cbegin(); it != jobs_.cend();
347 : it = RemoveIfFinished(it)) {
348 0 : FinishNow(it->second.get());
349 : }
350 1 : }
351 :
352 106764 : void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
353 : bool background_tasks_running =
354 106764 : task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
355 106764 : if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
356 213517 : for (auto& it : jobs_) {
357 7 : WaitForJobIfRunningOnBackground(it.second.get());
358 7 : if (trace_compiler_dispatcher_) {
359 0 : PrintF("CompilerDispatcher: aborted ");
360 0 : it.second->ShortPrintOnMainThread();
361 0 : PrintF("\n");
362 : }
363 7 : it.second->ResetOnMainThread(isolate_);
364 : }
365 : jobs_.clear();
366 : shared_to_unoptimized_job_id_.Clear();
367 : {
368 106755 : base::LockGuard<base::Mutex> lock(&mutex_);
369 : DCHECK(pending_background_jobs_.empty());
370 : DCHECK(running_background_jobs_.empty());
371 106755 : abort_ = false;
372 : }
373 213519 : return;
374 : }
375 :
376 : {
377 9 : base::LockGuard<base::Mutex> lock(&mutex_);
378 9 : abort_ = true;
379 : pending_background_jobs_.clear();
380 : }
381 9 : AbortInactiveJobs();
382 :
383 : // All running background jobs might already have scheduled idle tasks instead
384 : // of abort tasks. Schedule a single abort task here to make sure they get
385 : // processed as soon as possible (and not first when we have idle time).
386 9 : ScheduleAbortTask();
387 : }
388 :
389 20 : void CompilerDispatcher::AbortInactiveJobs() {
390 : {
391 20 : base::LockGuard<base::Mutex> lock(&mutex_);
392 : // Since we schedule two abort tasks per async abort, we might end up
393 : // here with nothing left to do.
394 40 : if (!abort_) return;
395 : }
396 30 : for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
397 : auto job = it;
398 : ++it;
399 : {
400 : base::LockGuard<base::Mutex> lock(&mutex_);
401 12 : if (running_background_jobs_.find(job->second.get()) !=
402 : running_background_jobs_.end()) {
403 : continue;
404 : }
405 : }
406 2 : if (trace_compiler_dispatcher_) {
407 0 : PrintF("CompilerDispatcher: aborted ");
408 0 : job->second->ShortPrintOnMainThread();
409 0 : PrintF("\n");
410 : }
411 2 : it = RemoveJob(job);
412 : }
413 12 : if (jobs_.empty()) {
414 : base::LockGuard<base::Mutex> lock(&mutex_);
415 8 : if (num_background_tasks_ == 0) abort_ = false;
416 : }
417 : }
418 :
419 26 : void CompilerDispatcher::MemoryPressureNotification(
420 : v8::MemoryPressureLevel level, bool is_isolate_locked) {
421 : MemoryPressureLevel previous = memory_pressure_level_.Value();
422 : memory_pressure_level_.SetValue(level);
423 : // If we're already under pressure, we haven't accepted new tasks meanwhile
424 : // and can just return. If we're no longer under pressure, we're also done.
425 52 : if (previous != MemoryPressureLevel::kNone ||
426 26 : level == MemoryPressureLevel::kNone) {
427 : return;
428 : }
429 10 : if (trace_compiler_dispatcher_) {
430 0 : PrintF("CompilerDispatcher: received memory pressure notification\n");
431 : }
432 10 : if (is_isolate_locked) {
433 3 : AbortAll(BlockingBehavior::kDontBlock);
434 : } else {
435 : {
436 7 : base::LockGuard<base::Mutex> lock(&mutex_);
437 7 : if (abort_) return;
438 : // By going into abort mode here, and clearing the
439 : // pending_background_jobs_, we at keep existing background jobs from
440 : // picking up more work before the MemoryPressureTask gets executed.
441 7 : abort_ = true;
442 : pending_background_jobs_.clear();
443 : }
444 : platform_->CallOnForegroundThread(
445 : reinterpret_cast<v8::Isolate*>(isolate_),
446 28 : new MemoryPressureTask(isolate_, task_manager_.get(), this));
447 : }
448 : }
449 :
450 30 : CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
451 : Handle<SharedFunctionInfo> shared) const {
452 : JobId* job_id_ptr = shared_to_unoptimized_job_id_.Find(shared);
453 : JobMap::const_iterator job = jobs_.end();
454 30 : if (job_id_ptr) {
455 : job = jobs_.find(*job_id_ptr);
456 : DCHECK(job == jobs_.end() ||
457 : job->second->AsUnoptimizedCompileJob()->IsAssociatedWith(shared));
458 : }
459 30 : return job;
460 : }
461 :
462 32 : void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
463 32 : v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
464 32 : if (!platform_->IdleTasksEnabled(v8_isolate)) return;
465 : {
466 32 : base::LockGuard<base::Mutex> lock(&mutex_);
467 32 : if (idle_task_scheduled_) return;
468 27 : idle_task_scheduled_ = true;
469 : }
470 : platform_->CallIdleOnForegroundThread(
471 108 : v8_isolate, new IdleTask(isolate_, task_manager_.get(), this));
472 : }
473 :
474 0 : void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
475 27 : if (jobs_.empty()) return;
476 27 : ScheduleIdleTaskFromAnyThread();
477 : }
478 :
479 11 : void CompilerDispatcher::ScheduleAbortTask() {
480 11 : v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
481 : platform_->CallOnForegroundThread(
482 33 : v8_isolate, new AbortTask(isolate_, task_manager_.get(), this));
483 11 : }
484 :
485 33 : void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
486 : CompilerDispatcherJob* job) {
487 66 : if (!job->CanStepNextOnAnyThread()) return;
488 : {
489 9 : base::LockGuard<base::Mutex> lock(&mutex_);
490 : pending_background_jobs_.insert(job);
491 : }
492 9 : ScheduleMoreBackgroundTasksIfNeeded();
493 : }
494 :
495 9 : void CompilerDispatcher::ScheduleMoreBackgroundTasksIfNeeded() {
496 18 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
497 : "V8.CompilerDispatcherScheduleMoreBackgroundTasksIfNeeded");
498 : {
499 9 : base::LockGuard<base::Mutex> lock(&mutex_);
500 9 : if (pending_background_jobs_.empty()) return;
501 9 : if (platform_->NumberOfAvailableBackgroundThreads() <=
502 : num_background_tasks_) {
503 : return;
504 : }
505 8 : ++num_background_tasks_;
506 : }
507 : platform_->CallOnBackgroundThread(
508 : new BackgroundTask(isolate_, task_manager_.get(), this),
509 32 : v8::Platform::kShortRunningTask);
510 : }
511 :
512 4 : void CompilerDispatcher::DoBackgroundWork() {
513 : for (;;) {
514 9 : CompilerDispatcherJob* job = nullptr;
515 : {
516 9 : base::LockGuard<base::Mutex> lock(&mutex_);
517 9 : if (!pending_background_jobs_.empty()) {
518 : auto it = pending_background_jobs_.begin();
519 5 : job = *it;
520 : pending_background_jobs_.erase(it);
521 : running_background_jobs_.insert(job);
522 : }
523 : }
524 9 : if (job == nullptr) break;
525 :
526 5 : if (V8_UNLIKELY(block_for_testing_.Value())) {
527 : block_for_testing_.SetValue(false);
528 2 : semaphore_for_testing_.Wait();
529 : }
530 :
531 5 : if (trace_compiler_dispatcher_) {
532 0 : PrintF("CompilerDispatcher: doing background work\n");
533 : }
534 :
535 5 : DoNextStepOnBackgroundThread(job);
536 : // Unconditionally schedule an idle task, as all background steps have to be
537 : // followed by a main thread step.
538 5 : ScheduleIdleTaskFromAnyThread();
539 :
540 : {
541 : base::LockGuard<base::Mutex> lock(&mutex_);
542 : running_background_jobs_.erase(job);
543 :
544 5 : if (main_thread_blocking_on_job_ == job) {
545 0 : main_thread_blocking_on_job_ = nullptr;
546 0 : main_thread_blocking_signal_.NotifyOne();
547 : }
548 : }
549 5 : }
550 :
551 : {
552 : base::LockGuard<base::Mutex> lock(&mutex_);
553 4 : --num_background_tasks_;
554 :
555 4 : if (running_background_jobs_.empty() && abort_) {
556 : // This is the last background job that finished. The abort task
557 : // scheduled by AbortAll might already have ran, so schedule another
558 : // one to be on the safe side.
559 2 : ScheduleAbortTask();
560 : }
561 : }
562 : // Don't touch |this| anymore after this point, as it might have been
563 : // deleted.
564 4 : }
565 :
566 14 : void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
567 : bool aborted = false;
568 : {
569 14 : base::LockGuard<base::Mutex> lock(&mutex_);
570 14 : idle_task_scheduled_ = false;
571 14 : aborted = abort_;
572 : }
573 :
574 14 : if (aborted) {
575 0 : AbortInactiveJobs();
576 14 : return;
577 : }
578 :
579 : // Number of jobs that are unlikely to make progress during any idle callback
580 : // due to their estimated duration.
581 : size_t too_long_jobs = 0;
582 :
583 : // Iterate over all available jobs & remaining time. For each job, decide
584 : // whether to 1) skip it (if it would take too long), 2) erase it (if it's
585 : // finished), or 3) make progress on it.
586 : double idle_time_in_seconds =
587 14 : deadline_in_seconds - platform_->MonotonicallyIncreasingTime();
588 :
589 14 : if (trace_compiler_dispatcher_) {
590 : PrintF("CompilerDispatcher: received %0.1lfms of idle time\n",
591 : idle_time_in_seconds *
592 0 : static_cast<double>(base::Time::kMillisecondsPerSecond));
593 : }
594 204 : for (auto job = jobs_.cbegin();
595 102 : job != jobs_.cend() && idle_time_in_seconds > 0.0;
596 : idle_time_in_seconds =
597 88 : deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) {
598 : // Don't work on jobs that are being worked on by background tasks.
599 : // Similarly, remove jobs we work on from the set of available background
600 : // jobs.
601 : std::unique_ptr<base::LockGuard<base::Mutex>> lock(
602 88 : new base::LockGuard<base::Mutex>(&mutex_));
603 176 : if (running_background_jobs_.find(job->second.get()) !=
604 : running_background_jobs_.end()) {
605 : ++job;
606 : continue;
607 : }
608 176 : auto it = pending_background_jobs_.find(job->second.get());
609 88 : double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs();
610 88 : if (idle_time_in_seconds <
611 88 : (estimate_in_ms /
612 : static_cast<double>(base::Time::kMillisecondsPerSecond))) {
613 : // If there's not enough time left, try to estimate whether we would
614 : // have managed to finish the job in a large idle task to assess
615 : // whether we should ask for another idle callback.
616 7 : if (estimate_in_ms > kMaxIdleTimeToExpectInMs) ++too_long_jobs;
617 7 : if (it == pending_background_jobs_.end()) {
618 : lock.reset();
619 7 : ConsiderJobForBackgroundProcessing(job->second.get());
620 : }
621 : ++job;
622 81 : } else if (job->second->IsFinished()) {
623 : DCHECK(it == pending_background_jobs_.end());
624 : lock.reset();
625 8 : job = RemoveJob(job);
626 8 : continue;
627 : } else {
628 : // Do one step, and keep processing the job (as we don't advance the
629 : // iterator).
630 73 : if (it != pending_background_jobs_.end()) {
631 : pending_background_jobs_.erase(it);
632 : }
633 : lock.reset();
634 : DoNextStepOnMainThread(isolate_, job->second.get(),
635 73 : ExceptionHandling::kSwallow);
636 : }
637 : }
638 14 : if (jobs_.size() > too_long_jobs) ScheduleIdleTaskIfNeeded();
639 : }
640 :
641 11 : CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished(
642 : JobMap::const_iterator job) {
643 11 : if (!job->second->IsFinished()) {
644 2 : return job;
645 : }
646 :
647 9 : if (trace_compiler_dispatcher_) {
648 0 : bool result = !job->second->IsFailed();
649 0 : PrintF("CompilerDispatcher: finished working on ");
650 0 : job->second->ShortPrintOnMainThread();
651 0 : PrintF(": %s\n", result ? "success" : "failure");
652 0 : tracer_->DumpStatistics();
653 : }
654 :
655 9 : return RemoveJob(job);
656 : }
657 :
658 26 : CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::InsertJob(
659 : std::unique_ptr<CompilerDispatcherJob> job) {
660 : bool added;
661 : JobMap::const_iterator it;
662 : std::tie(it, added) =
663 52 : jobs_.insert(std::make_pair(next_job_id_++, std::move(job)));
664 : DCHECK(added);
665 :
666 26 : JobId id = it->first;
667 : CompilerDispatcherJob* inserted_job = it->second.get();
668 :
669 : // Maps unoptimized jobs' SFIs to their job id.
670 26 : if (inserted_job->type() == CompilerDispatcherJob::kUnoptimizedCompile) {
671 : Handle<SharedFunctionInfo> shared =
672 26 : inserted_job->AsUnoptimizedCompileJob()->shared();
673 26 : if (!shared.is_null()) {
674 : shared_to_unoptimized_job_id_.Set(shared, id);
675 : }
676 : }
677 :
678 26 : return it;
679 : }
680 :
681 19 : CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
682 : CompilerDispatcher::JobMap::const_iterator it) {
683 : CompilerDispatcherJob* job = it->second.get();
684 19 : job->ResetOnMainThread(isolate_);
685 :
686 : // Unmaps unoptimized jobs' SFIs to their job id.
687 19 : if (job->type() == CompilerDispatcherJob::kUnoptimizedCompile) {
688 : Handle<SharedFunctionInfo> shared =
689 19 : job->AsUnoptimizedCompileJob()->shared();
690 19 : if (!shared.is_null()) {
691 : JobId deleted_id = shared_to_unoptimized_job_id_.Delete(shared);
692 : USE(deleted_id);
693 : DCHECK_EQ(it->first, deleted_id);
694 : }
695 : }
696 :
697 38 : it = jobs_.erase(it);
698 19 : if (jobs_.empty()) {
699 15 : base::LockGuard<base::Mutex> lock(&mutex_);
700 15 : if (num_background_tasks_ == 0) abort_ = false;
701 : }
702 19 : return it;
703 : }
704 :
705 : } // namespace internal
706 : } // namespace v8
|