Line data Source code
1 : // Copyright 2012 the V8 project authors. All rights reserved.
2 : // Use of this source code is governed by a BSD-style license that can be
3 : // found in the LICENSE file.
4 :
5 : #include "src/profiler/cpu-profiler.h"
6 :
7 : #include "src/debug/debug.h"
8 : #include "src/deoptimizer.h"
9 : #include "src/frames-inl.h"
10 : #include "src/locked-queue-inl.h"
11 : #include "src/log-inl.h"
12 : #include "src/profiler/cpu-profiler-inl.h"
13 : #include "src/vm-state-inl.h"
14 :
15 : namespace v8 {
16 : namespace internal {
17 :
18 : static const int kProfilerStackSize = 64 * KB;
19 :
20 275 : class CpuSampler : public sampler::Sampler {
21 : public:
22 : CpuSampler(Isolate* isolate, ProfilerEventsProcessor* processor)
23 : : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
24 275 : processor_(processor) {}
25 :
26 32234 : void SampleStack(const v8::RegisterState& regs) override {
27 32234 : TickSample* sample = processor_->StartTickSample();
28 64468 : if (sample == nullptr) return;
29 32234 : Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
30 32234 : sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame, true);
31 63471 : if (is_counting_samples_ && !sample->timestamp.IsNull()) {
32 30911 : if (sample->state == JS) ++js_sample_count_;
33 30911 : if (sample->state == EXTERNAL) ++external_sample_count_;
34 : }
35 32234 : processor_->FinishTickSample();
36 : }
37 :
38 : private:
39 : ProfilerEventsProcessor* processor_;
40 : };
41 :
42 275 : ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
43 : ProfileGenerator* generator,
44 : base::TimeDelta period)
45 : : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
46 : generator_(generator),
47 : sampler_(new CpuSampler(isolate, this)),
48 : running_(1),
49 : period_(period),
50 : last_code_event_id_(0),
51 1100 : last_processed_code_event_id_(0) {
52 275 : sampler_->IncreaseProfilingDepth();
53 275 : }
54 :
55 1100 : ProfilerEventsProcessor::~ProfilerEventsProcessor() {
56 275 : sampler_->DecreaseProfilingDepth();
57 825 : }
58 :
59 355869 : void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
60 355869 : event.generic.order = last_code_event_id_.Increment(1);
61 355869 : events_buffer_.Enqueue(event);
62 355869 : }
63 :
64 :
65 9 : void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
66 : int fp_to_sp_delta) {
67 : TickSampleEventRecord record(last_code_event_id_.Value());
68 : RegisterState regs;
69 9 : Address fp = isolate->c_entry_fp(isolate->thread_local_top());
70 9 : regs.sp = fp - fp_to_sp_delta;
71 9 : regs.fp = fp;
72 9 : regs.pc = from;
73 9 : record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false, false);
74 9 : ticks_from_vm_buffer_.Enqueue(record);
75 9 : }
76 :
77 312 : void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
78 : bool update_stats) {
79 : TickSampleEventRecord record(last_code_event_id_.Value());
80 : RegisterState regs;
81 312 : StackFrameIterator it(isolate);
82 312 : if (!it.done()) {
83 152 : StackFrame* frame = it.frame();
84 76 : regs.sp = frame->sp();
85 76 : regs.fp = frame->fp();
86 76 : regs.pc = frame->pc();
87 : }
88 : record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats,
89 312 : false);
90 312 : ticks_from_vm_buffer_.Enqueue(record);
91 312 : }
92 :
93 :
94 281 : void ProfilerEventsProcessor::StopSynchronously() {
95 843 : if (!base::Relaxed_AtomicExchange(&running_, 0)) return;
96 275 : Join();
97 : }
98 :
99 :
100 356144 : bool ProfilerEventsProcessor::ProcessCodeEvent() {
101 : CodeEventsContainer record;
102 356144 : if (events_buffer_.Dequeue(&record)) {
103 355869 : switch (record.generic.type) {
104 : #define PROFILER_TYPE_CASE(type, clss) \
105 : case CodeEventRecord::type: \
106 : record.clss##_.UpdateCodeMap(generator_->code_map()); \
107 : break;
108 :
109 355869 : CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
110 :
111 : #undef PROFILER_TYPE_CASE
112 : default: return true; // Skip record.
113 : }
114 355869 : last_processed_code_event_id_ = record.generic.order;
115 355869 : return true;
116 : }
117 : return false;
118 : }
119 :
120 : ProfilerEventsProcessor::SampleProcessingResult
121 422389 : ProfilerEventsProcessor::ProcessOneSample() {
122 : TickSampleEventRecord record1;
123 778071 : if (ticks_from_vm_buffer_.Peek(&record1) &&
124 355682 : (record1.order == last_processed_code_event_id_)) {
125 : TickSampleEventRecord record;
126 321 : ticks_from_vm_buffer_.Dequeue(&record);
127 321 : generator_->RecordTickSample(record.sample);
128 : return OneSampleProcessed;
129 : }
130 :
131 422068 : const TickSampleEventRecord* record = ticks_buffer_.Peek();
132 422068 : if (record == nullptr) {
133 344949 : if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
134 310729 : return FoundSampleForNextCodeEvent;
135 : }
136 77119 : if (record->order != last_processed_code_event_id_) {
137 : return FoundSampleForNextCodeEvent;
138 : }
139 32107 : generator_->RecordTickSample(record->sample);
140 32107 : ticks_buffer_.Remove();
141 32107 : return OneSampleProcessed;
142 : }
143 :
144 :
145 275 : void ProfilerEventsProcessor::Run() {
146 69075 : while (!!base::Relaxed_Load(&running_)) {
147 : base::TimeTicks nextSampleTime =
148 68250 : base::TimeTicks::HighResolutionNow() + period_;
149 : base::TimeTicks now;
150 : SampleProcessingResult result;
151 : // Keep processing existing events until we need to do next sample
152 : // or the ticks buffer is empty.
153 316540 : do {
154 316540 : result = ProcessOneSample();
155 316540 : if (result == FoundSampleForNextCodeEvent) {
156 : // All ticks of the current last_processed_code_event_id_ are
157 : // processed, proceed to the next code event.
158 250589 : ProcessCodeEvent();
159 : }
160 316540 : now = base::TimeTicks::HighResolutionNow();
161 316540 : } while (result != NoSamplesInQueue && now < nextSampleTime);
162 :
163 34125 : if (nextSampleTime > now) {
164 : #if V8_OS_WIN
165 : // Do not use Sleep on Windows as it is very imprecise.
166 : // Could be up to 16ms jitter, which is unacceptable for the purpose.
167 : while (base::TimeTicks::HighResolutionNow() < nextSampleTime) {
168 : }
169 : #else
170 33817 : base::OS::Sleep(nextSampleTime - now);
171 : #endif
172 : }
173 :
174 : // Schedule next sample. sampler_ is nullptr in tests.
175 34125 : if (sampler_) sampler_->DoSample();
176 : }
177 :
178 : // Process remaining tick events.
179 105555 : do {
180 : SampleProcessingResult result;
181 105849 : do {
182 105849 : result = ProcessOneSample();
183 : } while (result == OneSampleProcessed);
184 : } while (ProcessCodeEvent());
185 275 : }
186 :
187 :
188 30 : void* ProfilerEventsProcessor::operator new(size_t size) {
189 275 : return AlignedAlloc(size, V8_ALIGNOF(ProfilerEventsProcessor));
190 : }
191 :
192 :
193 0 : void ProfilerEventsProcessor::operator delete(void* ptr) {
194 275 : AlignedFree(ptr);
195 0 : }
196 :
197 :
198 164 : int CpuProfiler::GetProfilesCount() {
199 : // The count of profiles doesn't depend on a security token.
200 328 : return static_cast<int>(profiles_->profiles()->size());
201 : }
202 :
203 :
204 50 : CpuProfile* CpuProfiler::GetProfile(int index) {
205 100 : return profiles_->profiles()->at(index);
206 : }
207 :
208 :
209 53460 : void CpuProfiler::DeleteAllProfiles() {
210 53460 : if (is_profiling_) StopProcessor();
211 53460 : ResetProfiles();
212 53460 : }
213 :
214 :
215 128 : void CpuProfiler::DeleteProfile(CpuProfile* profile) {
216 128 : profiles_->RemoveProfile(profile);
217 128 : delete profile;
218 128 : if (profiles_->profiles()->empty() && !is_profiling_) {
219 : // If this was the last profile, clean up all accessory data as well.
220 106 : ResetProfiles();
221 : }
222 128 : }
223 :
224 184124 : void CpuProfiler::CodeEventHandler(const CodeEventsContainer& evt_rec) {
225 184124 : switch (evt_rec.generic.type) {
226 : case CodeEventRecord::CODE_CREATION:
227 : case CodeEventRecord::CODE_MOVE:
228 : case CodeEventRecord::CODE_DISABLE_OPT:
229 184115 : processor_->Enqueue(evt_rec);
230 184115 : break;
231 : case CodeEventRecord::CODE_DEOPT: {
232 : const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
233 9 : Address pc = reinterpret_cast<Address>(rec->pc);
234 9 : int fp_to_sp_delta = rec->fp_to_sp_delta;
235 9 : processor_->Enqueue(evt_rec);
236 18 : processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
237 9 : break;
238 : }
239 : default:
240 0 : UNREACHABLE();
241 : }
242 184124 : }
243 :
244 55248 : CpuProfiler::CpuProfiler(Isolate* isolate)
245 : : isolate_(isolate),
246 : sampling_interval_(base::TimeDelta::FromMicroseconds(
247 55248 : FLAG_cpu_profiler_sampling_interval)),
248 55248 : profiles_(new CpuProfilesCollection(isolate)),
249 220992 : is_profiling_(false) {
250 : profiles_->set_cpu_profiler(this);
251 55248 : }
252 :
253 25 : CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
254 : ProfileGenerator* test_generator,
255 : ProfilerEventsProcessor* test_processor)
256 : : isolate_(isolate),
257 : sampling_interval_(base::TimeDelta::FromMicroseconds(
258 25 : FLAG_cpu_profiler_sampling_interval)),
259 : profiles_(test_profiles),
260 : generator_(test_generator),
261 : processor_(test_processor),
262 75 : is_profiling_(false) {
263 : profiles_->set_cpu_profiler(this);
264 25 : }
265 :
266 107223 : CpuProfiler::~CpuProfiler() {
267 : DCHECK(!is_profiling_);
268 107223 : }
269 :
270 45 : void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
271 : DCHECK(!is_profiling_);
272 : sampling_interval_ = value;
273 45 : }
274 :
275 53566 : void CpuProfiler::ResetProfiles() {
276 53566 : profiles_.reset(new CpuProfilesCollection(isolate_));
277 : profiles_->set_cpu_profiler(this);
278 53566 : }
279 :
280 245 : void CpuProfiler::CreateEntriesForRuntimeCallStats() {
281 : static_entries_.clear();
282 490 : RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
283 245 : CodeMap* code_map = generator_->code_map();
284 280525 : for (int i = 0; i < RuntimeCallStats::counters_count; ++i) {
285 280280 : RuntimeCallCounter* counter = &(rcs->*(RuntimeCallStats::counters[i]));
286 : DCHECK(counter->name());
287 : std::unique_ptr<CodeEntry> entry(
288 : new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
289 280280 : CodeEntry::kEmptyNamePrefix, "native V8Runtime"));
290 280280 : code_map->AddCode(reinterpret_cast<Address>(counter), entry.get(), 1);
291 280280 : static_entries_.push_back(std::move(entry));
292 : }
293 245 : }
294 :
295 5 : void CpuProfiler::CollectSample() {
296 5 : if (processor_) {
297 5 : processor_->AddCurrentStack(isolate_);
298 : }
299 5 : }
300 :
301 282 : void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
302 564 : if (profiles_->StartProfiling(title, record_samples)) {
303 564 : TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
304 282 : StartProcessorIfNotStarted();
305 : }
306 282 : }
307 :
308 :
309 236 : void CpuProfiler::StartProfiling(String* title, bool record_samples) {
310 472 : StartProfiling(profiles_->GetName(title), record_samples);
311 236 : isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
312 236 : }
313 :
314 :
315 282 : void CpuProfiler::StartProcessorIfNotStarted() {
316 282 : if (processor_) {
317 282 : processor_->AddCurrentStack(isolate_);
318 319 : return;
319 : }
320 245 : Logger* logger = isolate_->logger();
321 : // Disable logging when using the new implementation.
322 245 : saved_is_logging_ = logger->is_logging_;
323 245 : logger->is_logging_ = false;
324 245 : generator_.reset(new ProfileGenerator(profiles_.get()));
325 : processor_.reset(new ProfilerEventsProcessor(isolate_, generator_.get(),
326 735 : sampling_interval_));
327 245 : CreateEntriesForRuntimeCallStats();
328 245 : logger->SetUpProfilerListener();
329 : ProfilerListener* profiler_listener = logger->profiler_listener();
330 245 : profiler_listener->AddObserver(this);
331 245 : is_profiling_ = true;
332 245 : isolate_->set_is_profiling(true);
333 : // Enumerate stuff we already have in the heap.
334 : DCHECK(isolate_->heap()->HasBeenSetUp());
335 245 : if (!FLAG_prof_browser_mode) {
336 26 : logger->LogCodeObjects();
337 : }
338 245 : logger->LogCompiledFunctions();
339 245 : logger->LogAccessorCallbacks();
340 245 : LogBuiltins();
341 : // Enable stack sampling.
342 490 : processor_->AddCurrentStack(isolate_);
343 245 : processor_->StartSynchronously();
344 : }
345 :
346 192 : CpuProfile* CpuProfiler::StopProfiling(const char* title) {
347 192 : if (!is_profiling_) return nullptr;
348 192 : StopProcessorIfLastProfile(title);
349 192 : return profiles_->StopProfiling(title);
350 : }
351 :
352 156 : CpuProfile* CpuProfiler::StopProfiling(String* title) {
353 156 : return StopProfiling(profiles_->GetName(title));
354 : }
355 :
356 192 : void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
357 384 : if (!profiles_->IsLastProfile(title)) return;
358 165 : StopProcessor();
359 : }
360 :
361 245 : void CpuProfiler::StopProcessor() {
362 245 : Logger* logger = isolate_->logger();
363 245 : is_profiling_ = false;
364 : isolate_->set_is_profiling(false);
365 : ProfilerListener* profiler_listener = logger->profiler_listener();
366 245 : profiler_listener->RemoveObserver(this);
367 245 : processor_->StopSynchronously();
368 245 : logger->TearDownProfilerListener();
369 : processor_.reset();
370 : generator_.reset();
371 245 : logger->is_logging_ = saved_is_logging_;
372 245 : }
373 :
374 :
375 245 : void CpuProfiler::LogBuiltins() {
376 245 : Builtins* builtins = isolate_->builtins();
377 : DCHECK(builtins->is_initialized());
378 171990 : for (int i = 0; i < Builtins::builtin_count; i++) {
379 : CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
380 : ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
381 : Builtins::Name id = static_cast<Builtins::Name>(i);
382 171745 : rec->start = builtins->builtin(id)->address();
383 171745 : rec->builtin_id = id;
384 171745 : processor_->Enqueue(evt_rec);
385 : }
386 245 : }
387 :
388 : } // namespace internal
389 : } // namespace v8
|