Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/profiler/cpu-profiler.h"
6 :
7 : #include "src/debug/debug.h"
8 : #include "src/deoptimizer.h"
9 : #include "src/frames-inl.h"
10 : #include "src/locked-queue-inl.h"
11 : #include "src/log-inl.h"
12 : #include "src/profiler/cpu-profiler-inl.h"
13 : #include "src/vm-state-inl.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 :
18 : static const int kProfilerStackSize = 64 * KB;
19 :
20 330 : class CpuSampler : public sampler::Sampler {
21 : public:
22 : CpuSampler(Isolate* isolate, ProfilerEventsProcessor* processor)
23 : : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
24 330 : processor_(processor) {}
25 :
26 232021 : void SampleStack(const v8::RegisterState& regs) override {
27 232021 : TickSample* sample = processor_->StartTickSample();
28 464042 : if (sample == nullptr) return;
29 232021 : Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
30 232021 : sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame, true);
31 462561 : if (is_counting_samples_ && !sample->timestamp.IsNull()) {
32 229551 : if (sample->state == JS) ++js_sample_count_;
33 229551 : if (sample->state == EXTERNAL) ++external_sample_count_;
34 : }
35 232021 : processor_->FinishTickSample();
36 : }
37 :
38 : private:
39 : ProfilerEventsProcessor* processor_;
40 : };
41 :
42 330 : ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
43 : ProfileGenerator* generator,
44 : base::TimeDelta period)
45 : : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
46 : generator_(generator),
47 : sampler_(new CpuSampler(isolate, this)),
48 : running_(1),
49 : period_(period),
50 : last_code_event_id_(0),
51 1320 : last_processed_code_event_id_(0) {
52 330 : sampler_->IncreaseProfilingDepth();
53 330 : }
54 :
55 1320 : ProfilerEventsProcessor::~ProfilerEventsProcessor() {
56 330 : sampler_->DecreaseProfilingDepth();
57 990 : }
58 :
59 392856 : void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
60 392856 : event.generic.order = last_code_event_id_.Increment(1);
61 392856 : events_buffer_.Enqueue(event);
62 392856 : }
63 :
64 :
65 41 : void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
66 : int fp_to_sp_delta) {
67 : TickSampleEventRecord record(last_code_event_id_.Value());
68 : RegisterState regs;
69 41 : Address fp = isolate->c_entry_fp(isolate->thread_local_top());
70 41 : regs.sp = fp - fp_to_sp_delta;
71 41 : regs.fp = fp;
72 41 : regs.pc = from;
73 41 : record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false, false);
74 41 : ticks_from_vm_buffer_.Enqueue(record);
75 41 : }
76 :
77 555 : void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
78 : bool update_stats) {
79 : TickSampleEventRecord record(last_code_event_id_.Value());
80 : RegisterState regs;
81 555 : StackFrameIterator it(isolate);
82 555 : if (!it.done()) {
83 544 : StackFrame* frame = it.frame();
84 272 : regs.sp = frame->sp();
85 272 : regs.fp = frame->fp();
86 272 : regs.pc = frame->pc();
87 : }
88 : record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats,
89 555 : false);
90 555 : ticks_from_vm_buffer_.Enqueue(record);
91 555 : }
92 :
93 :
94 337 : void ProfilerEventsProcessor::StopSynchronously() {
95 1011 : if (!base::NoBarrier_AtomicExchange(&running_, 0)) return;
96 330 : Join();
97 : }
98 :
99 :
100 393186 : bool ProfilerEventsProcessor::ProcessCodeEvent() {
101 : CodeEventsContainer record;
102 393186 : if (events_buffer_.Dequeue(&record)) {
103 392856 : switch (record.generic.type) {
104 : #define PROFILER_TYPE_CASE(type, clss) \
105 : case CodeEventRecord::type: \
106 : record.clss##_.UpdateCodeMap(generator_->code_map()); \
107 : break;
108 :
109 392856 : CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
110 :
111 : #undef PROFILER_TYPE_CASE
112 : default: return true; // Skip record.
113 : }
114 392856 : last_processed_code_event_id_ = record.generic.order;
115 392856 : return true;
116 : }
117 : return false;
118 : }
119 :
120 : ProfilerEventsProcessor::SampleProcessingResult
121 870168 : ProfilerEventsProcessor::ProcessOneSample() {
122 : TickSampleEventRecord record1;
123 1262123 : if (ticks_from_vm_buffer_.Peek(&record1) &&
124 391955 : (record1.order == last_processed_code_event_id_)) {
125 : TickSampleEventRecord record;
126 596 : ticks_from_vm_buffer_.Dequeue(&record);
127 596 : generator_->RecordTickSample(record.sample);
128 : return OneSampleProcessed;
129 : }
130 :
131 869572 : const TickSampleEventRecord* record = ticks_buffer_.Peek();
132 869572 : if (record == NULL) {
133 577633 : if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
134 332425 : return FoundSampleForNextCodeEvent;
135 : }
136 291939 : if (record->order != last_processed_code_event_id_) {
137 : return FoundSampleForNextCodeEvent;
138 : }
139 231866 : generator_->RecordTickSample(record->sample);
140 231866 : ticks_buffer_.Remove();
141 231866 : return OneSampleProcessed;
142 : }
143 :
144 :
145 330 : void ProfilerEventsProcessor::Run() {
146 490810 : while (!!base::NoBarrier_Load(&running_)) {
147 : base::TimeTicks nextSampleTime =
148 489820 : base::TimeTicks::HighResolutionNow() + period_;
149 : base::TimeTicks now;
150 : SampleProcessingResult result;
151 : // Keep processing existing events until we need to do next sample
152 : // or the ticks buffer is empty.
153 749837 : do {
154 749837 : result = ProcessOneSample();
155 749837 : if (result == FoundSampleForNextCodeEvent) {
156 : // All ticks of the current last_processed_code_event_id_ are
157 : // processed, proceed to the next code event.
158 273213 : ProcessCodeEvent();
159 : }
160 749837 : now = base::TimeTicks::HighResolutionNow();
161 749837 : } while (result != NoSamplesInQueue && now < nextSampleTime);
162 :
163 244910 : if (nextSampleTime > now) {
164 : #if V8_OS_WIN
165 : // Do not use Sleep on Windows as it is very imprecise.
166 : // Could be up to 16ms jitter, which is unacceptable for the purpose.
167 : while (base::TimeTicks::HighResolutionNow() < nextSampleTime) {
168 : }
169 : #else
170 244517 : base::OS::Sleep(nextSampleTime - now);
171 : #endif
172 : }
173 :
174 : // Schedule next sample. sampler_ is NULL in tests.
175 244910 : if (sampler_) sampler_->DoSample();
176 : }
177 :
178 : // Process remaining tick events.
179 119973 : do {
180 : SampleProcessingResult result;
181 120331 : do {
182 120331 : result = ProcessOneSample();
183 : } while (result == OneSampleProcessed);
184 : } while (ProcessCodeEvent());
185 330 : }
186 :
187 :
188 36 : void* ProfilerEventsProcessor::operator new(size_t size) {
189 330 : return AlignedAlloc(size, V8_ALIGNOF(ProfilerEventsProcessor));
190 : }
191 :
192 :
193 0 : void ProfilerEventsProcessor::operator delete(void* ptr) {
194 330 : AlignedFree(ptr);
195 0 : }
196 :
197 :
198 196 : int CpuProfiler::GetProfilesCount() {
199 : // The count of profiles doesn't depend on a security token.
200 196 : return profiles_->profiles()->length();
201 : }
202 :
203 :
204 61 : CpuProfile* CpuProfiler::GetProfile(int index) {
205 61 : return profiles_->profiles()->at(index);
206 : }
207 :
208 :
209 59397 : void CpuProfiler::DeleteAllProfiles() {
210 59397 : if (is_profiling_) StopProcessor();
211 59397 : ResetProfiles();
212 59398 : }
213 :
214 :
215 154 : void CpuProfiler::DeleteProfile(CpuProfile* profile) {
216 154 : profiles_->RemoveProfile(profile);
217 154 : delete profile;
218 154 : if (profiles_->profiles()->is_empty() && !is_profiling_) {
219 : // If this was the last profile, clean up all accessory data as well.
220 128 : ResetProfiles();
221 : }
222 154 : }
223 :
224 204696 : void CpuProfiler::CodeEventHandler(const CodeEventsContainer& evt_rec) {
225 204696 : switch (evt_rec.generic.type) {
226 : case CodeEventRecord::CODE_CREATION:
227 : case CodeEventRecord::CODE_MOVE:
228 : case CodeEventRecord::CODE_DISABLE_OPT:
229 204655 : processor_->Enqueue(evt_rec);
230 204655 : break;
231 : case CodeEventRecord::CODE_DEOPT: {
232 : const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
233 41 : Address pc = reinterpret_cast<Address>(rec->pc);
234 41 : int fp_to_sp_delta = rec->fp_to_sp_delta;
235 41 : processor_->Enqueue(evt_rec);
236 82 : processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
237 41 : break;
238 : }
239 : default:
240 0 : UNREACHABLE();
241 : }
242 204696 : }
243 :
244 61080 : CpuProfiler::CpuProfiler(Isolate* isolate)
245 : : isolate_(isolate),
246 : sampling_interval_(base::TimeDelta::FromMicroseconds(
247 61080 : FLAG_cpu_profiler_sampling_interval)),
248 61080 : profiles_(new CpuProfilesCollection(isolate)),
249 244320 : is_profiling_(false) {
250 : profiles_->set_cpu_profiler(this);
251 61080 : }
252 :
253 30 : CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
254 : ProfileGenerator* test_generator,
255 : ProfilerEventsProcessor* test_processor)
256 : : isolate_(isolate),
257 : sampling_interval_(base::TimeDelta::FromMicroseconds(
258 30 : FLAG_cpu_profiler_sampling_interval)),
259 : profiles_(test_profiles),
260 : generator_(test_generator),
261 : processor_(test_processor),
262 90 : is_profiling_(false) {
263 : profiles_->set_cpu_profiler(this);
264 30 : }
265 :
266 119161 : CpuProfiler::~CpuProfiler() {
267 : DCHECK(!is_profiling_);
268 119161 : }
269 :
270 60 : void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
271 : DCHECK(!is_profiling_);
272 : sampling_interval_ = value;
273 60 : }
274 :
275 59526 : void CpuProfiler::ResetProfiles() {
276 59526 : profiles_.reset(new CpuProfilesCollection(isolate_));
277 : profiles_->set_cpu_profiler(this);
278 59527 : }
279 :
280 294 : void CpuProfiler::CreateEntriesForRuntimeCallStats() {
281 : static_entries_.clear();
282 294 : RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
283 294 : CodeMap* code_map = generator_->code_map();
284 315168 : for (int i = 0; i < RuntimeCallStats::counters_count; ++i) {
285 314874 : RuntimeCallCounter* counter = &(rcs->*(RuntimeCallStats::counters[i]));
286 : DCHECK(counter->name());
287 : std::unique_ptr<CodeEntry> entry(
288 : new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
289 314874 : CodeEntry::kEmptyNamePrefix, "native V8Runtime"));
290 314874 : code_map->AddCode(reinterpret_cast<Address>(counter), entry.get(), 1);
291 314874 : static_entries_.push_back(std::move(entry));
292 : }
293 294 : }
294 :
295 6 : void CpuProfiler::CollectSample() {
296 6 : if (processor_) {
297 6 : processor_->AddCurrentStack(isolate_);
298 : }
299 6 : }
300 :
301 519 : void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
302 1038 : if (profiles_->StartProfiling(title, record_samples)) {
303 519 : StartProcessorIfNotStarted();
304 : }
305 519 : }
306 :
307 :
308 464 : void CpuProfiler::StartProfiling(String* title, bool record_samples) {
309 928 : StartProfiling(profiles_->GetName(title), record_samples);
310 464 : isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
311 464 : }
312 :
313 :
314 519 : void CpuProfiler::StartProcessorIfNotStarted() {
315 519 : if (processor_) {
316 519 : processor_->AddCurrentStack(isolate_);
317 744 : return;
318 : }
319 294 : Logger* logger = isolate_->logger();
320 : // Disable logging when using the new implementation.
321 294 : saved_is_logging_ = logger->is_logging_;
322 294 : logger->is_logging_ = false;
323 294 : generator_.reset(new ProfileGenerator(profiles_.get()));
324 : processor_.reset(new ProfilerEventsProcessor(isolate_, generator_.get(),
325 882 : sampling_interval_));
326 294 : CreateEntriesForRuntimeCallStats();
327 294 : logger->SetUpProfilerListener();
328 : ProfilerListener* profiler_listener = logger->profiler_listener();
329 294 : profiler_listener->AddObserver(this);
330 294 : is_profiling_ = true;
331 294 : isolate_->set_is_profiling(true);
332 : // Enumerate stuff we already have in the heap.
333 : DCHECK(isolate_->heap()->HasBeenSetUp());
334 294 : if (!FLAG_prof_browser_mode) {
335 31 : logger->LogCodeObjects();
336 : }
337 294 : logger->LogCompiledFunctions();
338 294 : logger->LogAccessorCallbacks();
339 294 : LogBuiltins();
340 : // Enable stack sampling.
341 588 : processor_->AddCurrentStack(isolate_);
342 294 : processor_->StartSynchronously();
343 : }
344 :
345 230 : CpuProfile* CpuProfiler::StopProfiling(const char* title) {
346 230 : if (!is_profiling_) return nullptr;
347 230 : StopProcessorIfLastProfile(title);
348 230 : return profiles_->StopProfiling(title);
349 : }
350 :
351 187 : CpuProfile* CpuProfiler::StopProfiling(String* title) {
352 187 : return StopProfiling(profiles_->GetName(title));
353 : }
354 :
355 230 : void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
356 460 : if (!profiles_->IsLastProfile(title)) return;
357 198 : StopProcessor();
358 : }
359 :
360 294 : void CpuProfiler::StopProcessor() {
361 294 : Logger* logger = isolate_->logger();
362 294 : is_profiling_ = false;
363 : isolate_->set_is_profiling(false);
364 : ProfilerListener* profiler_listener = logger->profiler_listener();
365 294 : profiler_listener->RemoveObserver(this);
366 294 : processor_->StopSynchronously();
367 294 : logger->TearDownProfilerListener();
368 : processor_.reset();
369 : generator_.reset();
370 294 : logger->is_logging_ = saved_is_logging_;
371 294 : }
372 :
373 :
374 294 : void CpuProfiler::LogBuiltins() {
375 294 : Builtins* builtins = isolate_->builtins();
376 : DCHECK(builtins->is_initialized());
377 188454 : for (int i = 0; i < Builtins::builtin_count; i++) {
378 : CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
379 : ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
380 : Builtins::Name id = static_cast<Builtins::Name>(i);
381 188160 : rec->start = builtins->builtin(id)->address();
382 188160 : rec->builtin_id = id;
383 188160 : processor_->Enqueue(evt_rec);
384 : }
385 294 : }
386 :
387 : } // namespace internal
388 : } // namespace v8
|