Line data Source code
1 : // Copyright 2016 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler-dispatcher/compiler-dispatcher.h"
6 :
7 : #include "include/v8-platform.h"
8 : #include "include/v8.h"
9 : #include "src/base/platform/time.h"
10 : #include "src/cancelable-task.h"
11 : #include "src/compilation-info.h"
12 : #include "src/compiler-dispatcher/compiler-dispatcher-job.h"
13 : #include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
14 : #include "src/flags.h"
15 : #include "src/objects-inl.h"
16 :
17 : namespace v8 {
18 : namespace internal {
19 :
20 : namespace {
21 :
22 : enum class ExceptionHandling { kSwallow, kThrow };
23 :
24 161 : bool IsFinished(CompilerDispatcherJob* job) {
25 161 : return job->status() == CompileJobStatus::kDone ||
26 : job->status() == CompileJobStatus::kFailed;
27 : }
28 :
29 38 : bool CanRunOnAnyThread(CompilerDispatcherJob* job) {
30 38 : return job->status() == CompileJobStatus::kReadyToParse ||
31 : job->status() == CompileJobStatus::kReadyToCompile;
32 : }
33 :
34 532 : bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
35 : ExceptionHandling exception_handling) {
36 : DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
37 266 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
38 : "V8.CompilerDispatcherForgroundStep");
39 :
40 : // Ensure we are in the correct context for the job.
41 266 : SaveContext save(isolate);
42 133 : if (job->has_context()) {
43 : isolate->set_context(job->context());
44 : } else {
45 : DCHECK(CanRunOnAnyThread(job));
46 : }
47 :
48 133 : switch (job->status()) {
49 : case CompileJobStatus::kInitial:
50 20 : job->PrepareToParseOnMainThread();
51 20 : break;
52 :
53 : case CompileJobStatus::kReadyToParse:
54 19 : job->Parse();
55 19 : break;
56 :
57 : case CompileJobStatus::kParsed:
58 19 : job->FinalizeParsingOnMainThread();
59 19 : break;
60 :
61 : case CompileJobStatus::kReadyToAnalyze:
62 19 : job->AnalyzeOnMainThread();
63 19 : break;
64 :
65 : case CompileJobStatus::kAnalyzed:
66 23 : job->PrepareToCompileOnMainThread();
67 23 : break;
68 :
69 : case CompileJobStatus::kReadyToCompile:
70 14 : job->Compile();
71 14 : break;
72 :
73 : case CompileJobStatus::kCompiled:
74 19 : job->FinalizeCompilingOnMainThread();
75 19 : break;
76 :
77 : case CompileJobStatus::kFailed:
78 : case CompileJobStatus::kDone:
79 : break;
80 : }
81 :
82 : DCHECK_EQ(job->status() == CompileJobStatus::kFailed,
83 : isolate->has_pending_exception());
84 133 : if (job->status() == CompileJobStatus::kFailed &&
85 : exception_handling == ExceptionHandling::kSwallow) {
86 : isolate->clear_pending_exception();
87 : }
88 266 : return job->status() != CompileJobStatus::kFailed;
89 : }
90 :
91 12 : void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
92 : DCHECK(CanRunOnAnyThread(job));
93 12 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
94 : "V8.CompilerDispatcherBackgroundStep");
95 :
96 6 : switch (job->status()) {
97 : case CompileJobStatus::kReadyToParse:
98 0 : job->Parse();
99 0 : break;
100 :
101 : case CompileJobStatus::kReadyToCompile:
102 6 : job->Compile();
103 6 : break;
104 :
105 : default:
106 0 : UNREACHABLE();
107 6 : }
108 6 : }
109 :
110 : // Theoretically we get 50ms of idle time max, however it's unlikely that
111 : // we'll get all of it so try to be a conservative.
112 : const double kMaxIdleTimeToExpectInMs = 40;
113 :
114 : class MemoryPressureTask : public CancelableTask {
115 : public:
116 : MemoryPressureTask(Isolate* isolate, CancelableTaskManager* task_manager,
117 : CompilerDispatcher* dispatcher);
118 : ~MemoryPressureTask() override;
119 :
120 : // CancelableTask implementation.
121 : void RunInternal() override;
122 :
123 : private:
124 : CompilerDispatcher* dispatcher_;
125 :
126 : DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
127 : };
128 :
129 : MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
130 : CancelableTaskManager* task_manager,
131 : CompilerDispatcher* dispatcher)
132 8 : : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
133 :
134 16 : MemoryPressureTask::~MemoryPressureTask() {}
135 :
136 8 : void MemoryPressureTask::RunInternal() {
137 8 : dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
138 8 : }
139 :
140 : } // namespace
141 :
142 : class CompilerDispatcher::AbortTask : public CancelableTask {
143 : public:
144 : AbortTask(Isolate* isolate, CancelableTaskManager* task_manager,
145 : CompilerDispatcher* dispatcher);
146 : ~AbortTask() override;
147 :
148 : // CancelableTask implementation.
149 : void RunInternal() override;
150 :
151 : private:
152 : CompilerDispatcher* dispatcher_;
153 :
154 : DISALLOW_COPY_AND_ASSIGN(AbortTask);
155 : };
156 :
157 0 : CompilerDispatcher::AbortTask::AbortTask(Isolate* isolate,
158 : CancelableTaskManager* task_manager,
159 : CompilerDispatcher* dispatcher)
160 12 : : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
161 :
162 24 : CompilerDispatcher::AbortTask::~AbortTask() {}
163 :
164 12 : void CompilerDispatcher::AbortTask::RunInternal() {
165 12 : dispatcher_->AbortInactiveJobs();
166 12 : }
167 :
168 : class CompilerDispatcher::BackgroundTask : public CancelableTask {
169 : public:
170 : BackgroundTask(Isolate* isolate, CancelableTaskManager* task_manager,
171 : CompilerDispatcher* dispatcher);
172 : ~BackgroundTask() override;
173 :
174 : // CancelableTask implementation.
175 : void RunInternal() override;
176 :
177 : private:
178 : CompilerDispatcher* dispatcher_;
179 :
180 : DISALLOW_COPY_AND_ASSIGN(BackgroundTask);
181 : };
182 :
183 0 : CompilerDispatcher::BackgroundTask::BackgroundTask(
184 : Isolate* isolate, CancelableTaskManager* task_manager,
185 : CompilerDispatcher* dispatcher)
186 10 : : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
187 :
188 20 : CompilerDispatcher::BackgroundTask::~BackgroundTask() {}
189 :
190 5 : void CompilerDispatcher::BackgroundTask::RunInternal() {
191 5 : dispatcher_->DoBackgroundWork();
192 5 : }
193 :
194 : class CompilerDispatcher::IdleTask : public CancelableIdleTask {
195 : public:
196 : IdleTask(Isolate* isolate, CancelableTaskManager* task_manager,
197 : CompilerDispatcher* dispatcher);
198 : ~IdleTask() override;
199 :
200 : // CancelableIdleTask implementation.
201 : void RunInternal(double deadline_in_seconds) override;
202 :
203 : private:
204 : CompilerDispatcher* dispatcher_;
205 :
206 : DISALLOW_COPY_AND_ASSIGN(IdleTask);
207 : };
208 :
209 0 : CompilerDispatcher::IdleTask::IdleTask(Isolate* isolate,
210 : CancelableTaskManager* task_manager,
211 : CompilerDispatcher* dispatcher)
212 33 : : CancelableIdleTask(isolate, task_manager), dispatcher_(dispatcher) {}
213 :
214 66 : CompilerDispatcher::IdleTask::~IdleTask() {}
215 :
216 16 : void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) {
217 16 : dispatcher_->DoIdleWork(deadline_in_seconds);
218 16 : }
219 :
220 60807 : CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
221 : size_t max_stack_size)
222 : : isolate_(isolate),
223 : platform_(platform),
224 : max_stack_size_(max_stack_size),
225 : trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
226 60807 : tracer_(new CompilerDispatcherTracer(isolate_)),
227 : task_manager_(new CancelableTaskManager()),
228 : next_job_id_(0),
229 : shared_to_job_id_(isolate->heap()),
230 : memory_pressure_level_(MemoryPressureLevel::kNone),
231 : abort_(false),
232 : idle_task_scheduled_(false),
233 : num_background_tasks_(0),
234 : main_thread_blocking_on_job_(nullptr),
235 : block_for_testing_(false),
236 486456 : semaphore_for_testing_(0) {
237 60807 : if (trace_compiler_dispatcher_ && !IsEnabled()) {
238 0 : PrintF("CompilerDispatcher: dispatcher is disabled\n");
239 : }
240 60807 : }
241 :
242 177930 : CompilerDispatcher::~CompilerDispatcher() {
243 : // To avoid crashing in unit tests due to unfished jobs.
244 59310 : AbortAll(BlockingBehavior::kBlock);
245 59310 : task_manager_->CancelAndWait();
246 59310 : }
247 :
248 34 : bool CompilerDispatcher::CanEnqueue() {
249 34 : if (!IsEnabled()) return false;
250 :
251 : DCHECK(FLAG_ignition);
252 :
253 34 : if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
254 : return false;
255 : }
256 :
257 : {
258 33 : base::LockGuard<base::Mutex> lock(&mutex_);
259 33 : if (abort_) return false;
260 : }
261 :
262 32 : return true;
263 : }
264 :
265 33 : bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
266 33 : if (!CanEnqueue()) return false;
267 :
268 : // We only handle functions (no eval / top-level code / wasm) that are
269 : // attached to a script.
270 93 : if (!function->script()->IsScript() || function->is_toplevel() ||
271 62 : function->asm_function() || function->native()) {
272 : return false;
273 : }
274 :
275 31 : return true;
276 : }
277 :
278 28 : CompilerDispatcher::JobId CompilerDispatcher::Enqueue(
279 : std::unique_ptr<CompilerDispatcherJob> job) {
280 : DCHECK(!IsFinished(job.get()));
281 : bool added;
282 : JobMap::const_iterator it;
283 : std::tie(it, added) =
284 56 : jobs_.insert(std::make_pair(next_job_id_++, std::move(job)));
285 : DCHECK(added);
286 28 : if (!it->second->shared().is_null()) {
287 27 : shared_to_job_id_.Set(it->second->shared(), it->first);
288 : }
289 28 : ConsiderJobForBackgroundProcessing(it->second.get());
290 : ScheduleIdleTaskIfNeeded();
291 28 : return it->first;
292 : }
293 :
294 3 : CompilerDispatcher::JobId CompilerDispatcher::EnqueueAndStep(
295 : std::unique_ptr<CompilerDispatcherJob> job) {
296 : DCHECK(!IsFinished(job.get()));
297 : bool added;
298 : JobMap::const_iterator it;
299 : std::tie(it, added) =
300 6 : jobs_.insert(std::make_pair(next_job_id_++, std::move(job)));
301 : DCHECK(added);
302 3 : if (!it->second->shared().is_null()) {
303 3 : shared_to_job_id_.Set(it->second->shared(), it->first);
304 : }
305 3 : JobId id = it->first;
306 3 : if (trace_compiler_dispatcher_) {
307 0 : PrintF("CompilerDispatcher: stepping ");
308 0 : it->second->ShortPrint();
309 0 : PrintF("\n");
310 : }
311 : DoNextStepOnMainThread(isolate_, it->second.get(),
312 3 : ExceptionHandling::kSwallow);
313 3 : ConsiderJobForBackgroundProcessing(it->second.get());
314 3 : RemoveIfFinished(it);
315 : ScheduleIdleTaskIfNeeded();
316 3 : return id;
317 : }
318 :
319 25 : bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
320 50 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
321 : "V8.CompilerDispatcherEnqueue");
322 25 : if (!CanEnqueue(function)) return false;
323 23 : if (IsEnqueued(function)) return true;
324 :
325 23 : if (trace_compiler_dispatcher_) {
326 0 : PrintF("CompilerDispatcher: enqueuing ");
327 0 : function->ShortPrint();
328 0 : PrintF(" for parse and compile\n");
329 : }
330 :
331 : std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
332 23 : isolate_, tracer_.get(), function, max_stack_size_));
333 46 : Enqueue(std::move(job));
334 25 : return true;
335 : }
336 :
337 1 : bool CompilerDispatcher::Enqueue(Handle<String> source, int start_position,
338 : int end_position, LanguageMode language_mode,
339 : int function_literal_id, bool native,
340 : bool module, bool is_named_expression,
341 : int compiler_hints,
342 : CompileJobFinishCallback* finish_callback,
343 : JobId* job_id) {
344 2 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
345 : "V8.CompilerDispatcherEnqueue");
346 1 : if (!CanEnqueue()) return false;
347 :
348 1 : if (trace_compiler_dispatcher_) {
349 : PrintF("CompilerDispatcher: enqueuing function at %d for initial parse\n",
350 0 : start_position);
351 : }
352 :
353 : std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
354 : tracer_.get(), max_stack_size_, source, start_position, end_position,
355 : language_mode, function_literal_id, native, module, is_named_expression,
356 2 : isolate_->heap()->HashSeed(), isolate_->allocator(), compiler_hints,
357 2 : isolate_->ast_string_constants(), finish_callback));
358 2 : JobId id = Enqueue(std::move(job));
359 1 : if (job_id != nullptr) {
360 0 : *job_id = id;
361 : }
362 1 : return true;
363 : }
364 :
365 2 : bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
366 4 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
367 : "V8.CompilerDispatcherEnqueueAndStep");
368 2 : if (!CanEnqueue(function)) return false;
369 2 : if (IsEnqueued(function)) return true;
370 :
371 1 : if (trace_compiler_dispatcher_) {
372 0 : PrintF("CompilerDispatcher: enqueuing ");
373 0 : function->ShortPrint();
374 0 : PrintF(" for parse and compile\n");
375 : }
376 :
377 : std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
378 1 : isolate_, tracer_.get(), function, max_stack_size_));
379 2 : EnqueueAndStep(std::move(job));
380 2 : return true;
381 : }
382 :
383 3 : bool CompilerDispatcher::Enqueue(
384 : Handle<Script> script, Handle<SharedFunctionInfo> function,
385 : FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
386 : std::shared_ptr<DeferredHandles> parse_handles,
387 : std::shared_ptr<DeferredHandles> compile_handles) {
388 6 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
389 : "V8.CompilerDispatcherEnqueue");
390 3 : if (!CanEnqueue(function)) return false;
391 3 : if (IsEnqueued(function)) return true;
392 :
393 3 : if (trace_compiler_dispatcher_) {
394 0 : PrintF("CompilerDispatcher: enqueuing ");
395 0 : function->ShortPrint();
396 0 : PrintF(" for compile\n");
397 : }
398 :
399 : std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
400 : isolate_, tracer_.get(), script, function, literal, parse_zone,
401 15 : parse_handles, compile_handles, max_stack_size_));
402 6 : Enqueue(std::move(job));
403 3 : return true;
404 : }
405 :
406 3 : bool CompilerDispatcher::EnqueueAndStep(
407 : Handle<Script> script, Handle<SharedFunctionInfo> function,
408 : FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
409 : std::shared_ptr<DeferredHandles> parse_handles,
410 : std::shared_ptr<DeferredHandles> compile_handles) {
411 6 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
412 : "V8.CompilerDispatcherEnqueueAndStep");
413 3 : if (!CanEnqueue(function)) return false;
414 3 : if (IsEnqueued(function)) return true;
415 :
416 2 : if (trace_compiler_dispatcher_) {
417 0 : PrintF("CompilerDispatcher: enqueuing ");
418 0 : function->ShortPrint();
419 0 : PrintF(" for compile\n");
420 : }
421 :
422 : std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
423 : isolate_, tracer_.get(), script, function, literal, parse_zone,
424 10 : parse_handles, compile_handles, max_stack_size_));
425 4 : EnqueueAndStep(std::move(job));
426 3 : return true;
427 : }
428 :
429 34 : bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }
430 :
431 1486554 : bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
432 1486585 : if (jobs_.empty()) return false;
433 54 : return GetJobFor(function) != jobs_.end();
434 : }
435 :
436 19 : void CompilerDispatcher::WaitForJobIfRunningOnBackground(
437 : CompilerDispatcherJob* job) {
438 38 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
439 : "V8.CompilerDispatcherWaitForBackgroundJob");
440 : RuntimeCallTimerScope runtimeTimer(
441 19 : isolate_, &RuntimeCallStats::CompileWaitForDispatcher);
442 :
443 19 : base::LockGuard<base::Mutex> lock(&mutex_);
444 19 : if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
445 : pending_background_jobs_.erase(job);
446 19 : return;
447 : }
448 : DCHECK_NULL(main_thread_blocking_on_job_);
449 0 : main_thread_blocking_on_job_ = job;
450 0 : while (main_thread_blocking_on_job_ != nullptr) {
451 0 : main_thread_blocking_signal_.Wait(&mutex_);
452 : }
453 : DCHECK(pending_background_jobs_.find(job) == pending_background_jobs_.end());
454 0 : DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
455 : }
456 :
457 9 : bool CompilerDispatcher::FinishNow(CompilerDispatcherJob* job) {
458 9 : if (trace_compiler_dispatcher_) {
459 0 : PrintF("CompilerDispatcher: finishing ");
460 0 : job->ShortPrint();
461 0 : PrintF(" now\n");
462 : }
463 9 : WaitForJobIfRunningOnBackground(job);
464 69 : while (!IsFinished(job)) {
465 51 : DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
466 : }
467 9 : return job->status() != CompileJobStatus::kFailed;
468 : }
469 :
470 7 : bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
471 14 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
472 : "V8.CompilerDispatcherFinishNow");
473 7 : JobMap::const_iterator job = GetJobFor(function);
474 7 : CHECK(job != jobs_.end());
475 7 : bool result = FinishNow(job->second.get());
476 7 : if (!job->second->shared().is_null()) {
477 : shared_to_job_id_.Delete(job->second->shared());
478 : }
479 7 : RemoveIfFinished(job);
480 7 : return result;
481 : }
482 :
483 1 : void CompilerDispatcher::FinishAllNow() {
484 6 : for (auto it = jobs_.cbegin(); it != jobs_.cend();
485 : it = RemoveIfFinished(it)) {
486 2 : FinishNow(it->second.get());
487 : }
488 1 : }
489 :
490 118610 : void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
491 : bool background_tasks_running =
492 118610 : task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
493 118610 : if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
494 237210 : for (auto& it : jobs_) {
495 10 : WaitForJobIfRunningOnBackground(it.second.get());
496 10 : if (trace_compiler_dispatcher_) {
497 0 : PrintF("CompilerDispatcher: aborted ");
498 0 : it.second->ShortPrint();
499 0 : PrintF("\n");
500 : }
501 10 : it.second->ResetOnMainThread();
502 : }
503 : jobs_.clear();
504 : shared_to_job_id_.Clear();
505 : {
506 118600 : base::LockGuard<base::Mutex> lock(&mutex_);
507 : DCHECK(pending_background_jobs_.empty());
508 : DCHECK(running_background_jobs_.empty());
509 118600 : abort_ = false;
510 : }
511 237210 : return;
512 : }
513 :
514 : {
515 10 : base::LockGuard<base::Mutex> lock(&mutex_);
516 10 : abort_ = true;
517 : pending_background_jobs_.clear();
518 : }
519 10 : AbortInactiveJobs();
520 :
521 : // All running background jobs might already have scheduled idle tasks instead
522 : // of abort tasks. Schedule a single abort task here to make sure they get
523 : // processed as soon as possible (and not first when we have idle time).
524 10 : ScheduleAbortTask();
525 : }
526 :
527 22 : void CompilerDispatcher::AbortInactiveJobs() {
528 : {
529 22 : base::LockGuard<base::Mutex> lock(&mutex_);
530 : // Since we schedule two abort tasks per async abort, we might end up
531 : // here with nothing left to do.
532 44 : if (!abort_) return;
533 : }
534 32 : for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
535 : auto job = it;
536 : ++it;
537 : {
538 : base::LockGuard<base::Mutex> lock(&mutex_);
539 12 : if (running_background_jobs_.find(job->second.get()) !=
540 : running_background_jobs_.end()) {
541 : continue;
542 : }
543 : }
544 2 : if (trace_compiler_dispatcher_) {
545 0 : PrintF("CompilerDispatcher: aborted ");
546 0 : job->second->ShortPrint();
547 0 : PrintF("\n");
548 : }
549 2 : it = RemoveJob(job);
550 : }
551 13 : if (jobs_.empty()) {
552 : base::LockGuard<base::Mutex> lock(&mutex_);
553 9 : if (num_background_tasks_ == 0) abort_ = false;
554 : }
555 : }
556 :
557 30 : void CompilerDispatcher::MemoryPressureNotification(
558 : v8::MemoryPressureLevel level, bool is_isolate_locked) {
559 : MemoryPressureLevel previous = memory_pressure_level_.Value();
560 : memory_pressure_level_.SetValue(level);
561 : // If we're already under pressure, we haven't accepted new tasks meanwhile
562 : // and can just return. If we're no longer under pressure, we're also done.
563 60 : if (previous != MemoryPressureLevel::kNone ||
564 30 : level == MemoryPressureLevel::kNone) {
565 : return;
566 : }
567 11 : if (trace_compiler_dispatcher_) {
568 0 : PrintF("CompilerDispatcher: received memory pressure notification\n");
569 : }
570 11 : if (is_isolate_locked) {
571 3 : AbortAll(BlockingBehavior::kDontBlock);
572 : } else {
573 : {
574 8 : base::LockGuard<base::Mutex> lock(&mutex_);
575 8 : if (abort_) return;
576 : // By going into abort mode here, and clearing the
577 : // pending_background_jobs_, we at keep existing background jobs from
578 : // picking up more work before the MemoryPressureTask gets executed.
579 8 : abort_ = true;
580 : pending_background_jobs_.clear();
581 : }
582 : platform_->CallOnForegroundThread(
583 : reinterpret_cast<v8::Isolate*>(isolate_),
584 32 : new MemoryPressureTask(isolate_, task_manager_.get(), this));
585 : }
586 : }
587 :
588 34 : CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
589 : Handle<SharedFunctionInfo> shared) const {
590 : JobId* job_id_ptr = shared_to_job_id_.Find(shared);
591 : JobMap::const_iterator job = jobs_.end();
592 34 : if (job_id_ptr) {
593 : job = jobs_.find(*job_id_ptr);
594 : DCHECK(job == jobs_.end() || job->second->IsAssociatedWith(shared));
595 : }
596 34 : return job;
597 : }
598 :
599 38 : void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
600 38 : v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
601 : DCHECK(platform_->IdleTasksEnabled(v8_isolate));
602 : {
603 38 : base::LockGuard<base::Mutex> lock(&mutex_);
604 76 : if (idle_task_scheduled_) return;
605 33 : idle_task_scheduled_ = true;
606 : }
607 : platform_->CallIdleOnForegroundThread(
608 132 : v8_isolate, new IdleTask(isolate_, task_manager_.get(), this));
609 : }
610 :
611 0 : void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
612 32 : if (jobs_.empty()) return;
613 32 : ScheduleIdleTaskFromAnyThread();
614 : }
615 :
616 12 : void CompilerDispatcher::ScheduleAbortTask() {
617 12 : v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
618 : platform_->CallOnForegroundThread(
619 36 : v8_isolate, new AbortTask(isolate_, task_manager_.get(), this));
620 12 : }
621 :
622 38 : void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
623 : CompilerDispatcherJob* job) {
624 114 : if (!CanRunOnAnyThread(job)) return;
625 : {
626 11 : base::LockGuard<base::Mutex> lock(&mutex_);
627 : pending_background_jobs_.insert(job);
628 : }
629 11 : ScheduleMoreBackgroundTasksIfNeeded();
630 : }
631 :
632 11 : void CompilerDispatcher::ScheduleMoreBackgroundTasksIfNeeded() {
633 22 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
634 : "V8.CompilerDispatcherScheduleMoreBackgroundTasksIfNeeded");
635 : {
636 11 : base::LockGuard<base::Mutex> lock(&mutex_);
637 11 : if (pending_background_jobs_.empty()) return;
638 11 : if (platform_->NumberOfAvailableBackgroundThreads() <=
639 : num_background_tasks_) {
640 : return;
641 : }
642 10 : ++num_background_tasks_;
643 : }
644 : platform_->CallOnBackgroundThread(
645 10 : new BackgroundTask(isolate_, task_manager_.get(), this),
646 40 : v8::Platform::kShortRunningTask);
647 : }
648 :
649 5 : void CompilerDispatcher::DoBackgroundWork() {
650 : for (;;) {
651 11 : CompilerDispatcherJob* job = nullptr;
652 : {
653 11 : base::LockGuard<base::Mutex> lock(&mutex_);
654 11 : if (!pending_background_jobs_.empty()) {
655 : auto it = pending_background_jobs_.begin();
656 6 : job = *it;
657 : pending_background_jobs_.erase(it);
658 : running_background_jobs_.insert(job);
659 : }
660 : }
661 11 : if (job == nullptr) break;
662 :
663 6 : if (V8_UNLIKELY(block_for_testing_.Value())) {
664 : block_for_testing_.SetValue(false);
665 2 : semaphore_for_testing_.Wait();
666 : }
667 :
668 6 : if (trace_compiler_dispatcher_) {
669 0 : PrintF("CompilerDispatcher: doing background work\n");
670 : }
671 :
672 6 : DoNextStepOnBackgroundThread(job);
673 : // Unconditionally schedule an idle task, as all background steps have to be
674 : // followed by a main thread step.
675 6 : ScheduleIdleTaskFromAnyThread();
676 :
677 : {
678 : base::LockGuard<base::Mutex> lock(&mutex_);
679 : running_background_jobs_.erase(job);
680 :
681 6 : if (main_thread_blocking_on_job_ == job) {
682 0 : main_thread_blocking_on_job_ = nullptr;
683 0 : main_thread_blocking_signal_.NotifyOne();
684 : }
685 : }
686 6 : }
687 :
688 : {
689 : base::LockGuard<base::Mutex> lock(&mutex_);
690 5 : --num_background_tasks_;
691 :
692 5 : if (running_background_jobs_.empty() && abort_) {
693 : // This is the last background job that finished. The abort task
694 : // scheduled by AbortAll might already have ran, so schedule another
695 : // one to be on the safe side.
696 2 : ScheduleAbortTask();
697 : }
698 : }
699 : // Don't touch |this| anymore after this point, as it might have been
700 : // deleted.
701 5 : }
702 :
703 16 : void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
704 : bool aborted = false;
705 : {
706 16 : base::LockGuard<base::Mutex> lock(&mutex_);
707 16 : idle_task_scheduled_ = false;
708 16 : aborted = abort_;
709 : }
710 :
711 16 : if (aborted) {
712 0 : AbortInactiveJobs();
713 16 : return;
714 : }
715 :
716 : // Number of jobs that are unlikely to make progress during any idle callback
717 : // due to their estimated duration.
718 : size_t too_long_jobs = 0;
719 :
720 : // Iterate over all available jobs & remaining time. For each job, decide
721 : // whether to 1) skip it (if it would take too long), 2) erase it (if it's
722 : // finished), or 3) make progress on it.
723 : double idle_time_in_seconds =
724 16 : deadline_in_seconds - platform_->MonotonicallyIncreasingTime();
725 :
726 16 : if (trace_compiler_dispatcher_) {
727 : PrintF("CompilerDispatcher: received %0.1lfms of idle time\n",
728 : idle_time_in_seconds *
729 0 : static_cast<double>(base::Time::kMillisecondsPerSecond));
730 : }
731 224 : for (auto job = jobs_.cbegin();
732 112 : job != jobs_.cend() && idle_time_in_seconds > 0.0;
733 : idle_time_in_seconds =
734 96 : deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) {
735 : // Don't work on jobs that are being worked on by background tasks.
736 : // Similarly, remove jobs we work on from the set of available background
737 : // jobs.
738 : std::unique_ptr<base::LockGuard<base::Mutex>> lock(
739 96 : new base::LockGuard<base::Mutex>(&mutex_));
740 192 : if (running_background_jobs_.find(job->second.get()) !=
741 : running_background_jobs_.end()) {
742 : ++job;
743 : continue;
744 : }
745 192 : auto it = pending_background_jobs_.find(job->second.get());
746 96 : double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs();
747 96 : if (idle_time_in_seconds <
748 96 : (estimate_in_ms /
749 : static_cast<double>(base::Time::kMillisecondsPerSecond))) {
750 : // If there's not enough time left, try to estimate whether we would
751 : // have managed to finish the job in a large idle task to assess
752 : // whether we should ask for another idle callback.
753 7 : if (estimate_in_ms > kMaxIdleTimeToExpectInMs) ++too_long_jobs;
754 7 : if (it == pending_background_jobs_.end()) {
755 : lock.reset();
756 7 : ConsiderJobForBackgroundProcessing(job->second.get());
757 : }
758 : ++job;
759 89 : } else if (IsFinished(job->second.get())) {
760 : DCHECK(it == pending_background_jobs_.end());
761 : lock.reset();
762 10 : job = RemoveJob(job);
763 10 : continue;
764 : } else {
765 : // Do one step, and keep processing the job (as we don't advance the
766 : // iterator).
767 79 : if (it != pending_background_jobs_.end()) {
768 : pending_background_jobs_.erase(it);
769 : }
770 : lock.reset();
771 : DoNextStepOnMainThread(isolate_, job->second.get(),
772 79 : ExceptionHandling::kSwallow);
773 : }
774 : }
775 16 : if (jobs_.size() > too_long_jobs) ScheduleIdleTaskIfNeeded();
776 : }
777 :
778 12 : CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveIfFinished(
779 : JobMap::const_iterator job) {
780 12 : if (!IsFinished(job->second.get())) {
781 3 : return job;
782 : }
783 :
784 9 : if (trace_compiler_dispatcher_) {
785 : bool result = job->second->status() != CompileJobStatus::kFailed;
786 0 : PrintF("CompilerDispatcher: finished working on ");
787 0 : job->second->ShortPrint();
788 0 : PrintF(": %s\n", result ? "success" : "failure");
789 0 : tracer_->DumpStatistics();
790 : }
791 :
792 9 : return RemoveJob(job);
793 : }
794 :
795 21 : CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
796 : CompilerDispatcher::JobMap::const_iterator job) {
797 21 : job->second->ResetOnMainThread();
798 21 : if (!job->second->shared().is_null()) {
799 : shared_to_job_id_.Delete(job->second->shared());
800 : }
801 42 : job = jobs_.erase(job);
802 21 : if (jobs_.empty()) {
803 17 : base::LockGuard<base::Mutex> lock(&mutex_);
804 17 : if (num_background_tasks_ == 0) abort_ = false;
805 : }
806 21 : return job;
807 : }
808 :
809 : } // namespace internal
810 : } // namespace v8
|