Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
6 :
7 : #include "src/base/atomicops.h"
8 : #include "src/base/template-utils.h"
9 : #include "src/cancelable-task.h"
10 : #include "src/compiler.h"
11 : #include "src/counters.h"
12 : #include "src/isolate.h"
13 : #include "src/objects-inl.h"
14 : #include "src/optimized-compilation-info.h"
15 : #include "src/tracing/trace-event.h"
16 : #include "src/v8.h"
17 :
18 : namespace v8 {
19 : namespace internal {
20 :
21 : namespace {
22 :
23 203 : void DisposeCompilationJob(OptimizedCompilationJob* job,
24 : bool restore_function_code) {
25 147 : if (restore_function_code) {
26 : Handle<JSFunction> function = job->compilation_info()->closure();
27 112 : function->set_code(function->shared()->GetCode());
28 56 : if (function->IsInOptimizationQueue()) {
29 98 : function->ClearOptimizationMarker();
30 : }
31 : // TODO(mvstanton): We can't call EnsureFeedbackVector here due to
32 : // allocation, but we probably shouldn't call set_code either, as this
33 : // sometimes runs on the worker thread!
34 : // JSFunction::EnsureFeedbackVector(function);
35 : }
36 147 : delete job;
37 147 : }
38 :
39 : } // namespace
40 :
41 : class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
42 : public:
43 7211 : explicit CompileTask(Isolate* isolate,
44 : OptimizingCompileDispatcher* dispatcher)
45 : : CancelableTask(isolate),
46 : isolate_(isolate),
47 : worker_thread_runtime_call_stats_(
48 7211 : isolate->counters()->worker_thread_runtime_call_stats()),
49 14422 : dispatcher_(dispatcher) {
50 7211 : base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
51 7211 : ++dispatcher_->ref_count_;
52 7211 : }
53 :
54 14422 : ~CompileTask() override = default;
55 :
56 : private:
57 : // v8::Task overrides.
58 7211 : void RunInternal() override {
59 : DisallowHeapAllocation no_allocation;
60 : DisallowHandleAllocation no_handles;
61 : DisallowHandleDereference no_deref;
62 :
63 : {
64 : WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
65 7211 : worker_thread_runtime_call_stats_);
66 : RuntimeCallTimerScope runtimeTimer(
67 : runtime_call_stats_scope.Get(),
68 7211 : RuntimeCallCounterId::kRecompileConcurrent);
69 :
70 7211 : TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
71 21633 : TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
72 : "V8.RecompileConcurrent");
73 :
74 7211 : if (dispatcher_->recompilation_delay_ != 0) {
75 : base::OS::Sleep(base::TimeDelta::FromMilliseconds(
76 0 : dispatcher_->recompilation_delay_));
77 : }
78 :
79 14422 : dispatcher_->CompileNext(dispatcher_->NextInput(true));
80 : }
81 : {
82 7211 : base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
83 7211 : if (--dispatcher_->ref_count_ == 0) {
84 5664 : dispatcher_->ref_count_zero_.NotifyOne();
85 : }
86 : }
87 7211 : }
88 :
89 : Isolate* isolate_;
90 : WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats_;
91 : OptimizingCompileDispatcher* dispatcher_;
92 :
93 : DISALLOW_COPY_AND_ASSIGN(CompileTask);
94 : };
95 :
96 188306 : OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
97 : #ifdef DEBUG
98 : {
99 : base::MutexGuard lock_guard(&ref_count_mutex_);
100 : DCHECK_EQ(0, ref_count_);
101 : }
102 : #endif
103 : DCHECK_EQ(0, input_queue_length_);
104 62768 : DeleteArray(input_queue_);
105 62769 : }
106 :
107 7211 : OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
108 7207 : bool check_if_flushing) {
109 7211 : base::MutexGuard access_input_queue_(&input_queue_mutex_);
110 7211 : if (input_queue_length_ == 0) return nullptr;
111 14414 : OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
112 : DCHECK_NOT_NULL(job);
113 7207 : input_queue_shift_ = InputQueueIndex(1);
114 7207 : input_queue_length_--;
115 7207 : if (check_if_flushing) {
116 7207 : if (mode_ == FLUSH) {
117 : AllowHandleDereference allow_handle_dereference;
118 33 : DisposeCompilationJob(job, true);
119 : return nullptr;
120 : }
121 : }
122 7174 : return job;
123 : }
124 :
125 7211 : void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job) {
126 14422 : if (!job) return;
127 :
128 : // The function may have already been optimized by OSR. Simply continue.
129 7174 : CompilationJob::Status status = job->ExecuteJob();
130 : USE(status); // Prevent an unused-variable error.
131 :
132 : // The function may have already been optimized by OSR. Simply continue.
133 : // Use a mutex to make sure that functions marked for install
134 : // are always also queued.
135 7174 : base::MutexGuard access_output_queue_(&output_queue_mutex_);
136 : output_queue_.push(job);
137 7174 : isolate_->stack_guard()->RequestInstallCode();
138 : }
139 :
140 92385 : void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
141 : for (;;) {
142 : OptimizedCompilationJob* job = nullptr;
143 : {
144 92492 : base::MutexGuard access_output_queue_(&output_queue_mutex_);
145 184879 : if (output_queue_.empty()) return;
146 107 : job = output_queue_.front();
147 : output_queue_.pop();
148 : }
149 :
150 107 : DisposeCompilationJob(job, restore_function_code);
151 107 : }
152 : }
153 :
154 29622 : void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
155 29618 : if (blocking_behavior == BlockingBehavior::kDontBlock) {
156 10728 : if (FLAG_block_concurrent_recompilation) Unblock();
157 10728 : base::MutexGuard access_input_queue_(&input_queue_mutex_);
158 10732 : while (input_queue_length_ > 0) {
159 8 : OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
160 : DCHECK_NOT_NULL(job);
161 4 : input_queue_shift_ = InputQueueIndex(1);
162 4 : input_queue_length_--;
163 4 : DisposeCompilationJob(job, true);
164 : }
165 10728 : FlushOutputQueue(true);
166 10728 : if (FLAG_trace_concurrent_recompilation) {
167 0 : PrintF(" ** Flushed concurrent recompilation queues (not blocking).\n");
168 : }
169 29618 : return;
170 : }
171 : mode_ = FLUSH;
172 18890 : if (FLAG_block_concurrent_recompilation) Unblock();
173 : {
174 18890 : base::MutexGuard lock_guard(&ref_count_mutex_);
175 15 : while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
176 : mode_ = COMPILE;
177 : }
178 18890 : FlushOutputQueue(true);
179 18890 : if (FLAG_trace_concurrent_recompilation) {
180 0 : PrintF(" ** Flushed concurrent recompilation queues.\n");
181 : }
182 : }
183 :
184 62768 : void OptimizingCompileDispatcher::Stop() {
185 : mode_ = FLUSH;
186 62768 : if (FLAG_block_concurrent_recompilation) Unblock();
187 : {
188 62768 : base::MutexGuard lock_guard(&ref_count_mutex_);
189 26 : while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
190 : mode_ = COMPILE;
191 : }
192 :
193 62768 : if (recompilation_delay_ != 0) {
194 : // At this point the optimizing compiler thread's event loop has stopped.
195 : // There is no need for a mutex when reading input_queue_length_.
196 0 : while (input_queue_length_ > 0) CompileNext(NextInput());
197 0 : InstallOptimizedFunctions();
198 : } else {
199 62768 : FlushOutputQueue(false);
200 : }
201 62768 : }
202 :
203 6441 : void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
204 6441 : HandleScope handle_scope(isolate_);
205 :
206 : for (;;) {
207 7067 : OptimizedCompilationJob* job = nullptr;
208 : {
209 13508 : base::MutexGuard access_output_queue_(&output_queue_mutex_);
210 19949 : if (output_queue_.empty()) return;
211 7067 : job = output_queue_.front();
212 : output_queue_.pop();
213 : }
214 : OptimizedCompilationInfo* info = job->compilation_info();
215 7067 : Handle<JSFunction> function(*info->closure(), isolate_);
216 7067 : if (function->HasOptimizedCode()) {
217 3 : if (FLAG_trace_concurrent_recompilation) {
218 0 : PrintF(" ** Aborting compilation for ");
219 0 : function->ShortPrint();
220 0 : PrintF(" as it has already been optimized.\n");
221 : }
222 3 : DisposeCompilationJob(job, false);
223 : } else {
224 7064 : Compiler::FinalizeOptimizedCompilationJob(job, isolate_);
225 : }
226 : }
227 : }
228 :
229 7211 : void OptimizingCompileDispatcher::QueueForOptimization(
230 7211 : OptimizedCompilationJob* job) {
231 : DCHECK(IsQueueAvailable());
232 : {
233 : // Add job to the back of the input queue.
234 7211 : base::MutexGuard access_input_queue(&input_queue_mutex_);
235 : DCHECK_LT(input_queue_length_, input_queue_capacity_);
236 14422 : input_queue_[InputQueueIndex(input_queue_length_)] = job;
237 7211 : input_queue_length_++;
238 : }
239 7211 : if (FLAG_block_concurrent_recompilation) {
240 79 : blocked_jobs_++;
241 : } else {
242 7132 : V8::GetCurrentPlatform()->CallOnWorkerThread(
243 28528 : base::make_unique<CompileTask>(isolate_, this));
244 : }
245 7211 : }
246 :
247 217 : void OptimizingCompileDispatcher::Unblock() {
248 513 : while (blocked_jobs_ > 0) {
249 79 : V8::GetCurrentPlatform()->CallOnWorkerThread(
250 316 : base::make_unique<CompileTask>(isolate_, this));
251 79 : blocked_jobs_--;
252 : }
253 217 : }
254 :
255 : } // namespace internal
256 183867 : } // namespace v8
|