LCOV - code coverage report
Current view: top level - src/profiler - cpu-profiler.cc (source / functions) Hit Total Coverage
Test: app.info Lines: 218 223 97.8 %
Date: 2019-04-19 Functions: 41 44 93.2 %

          Line data    Source code
       1             : // Copyright 2012 the V8 project authors. All rights reserved.
       2             : // Use of this source code is governed by a BSD-style license that can be
       3             : // found in the LICENSE file.
       4             : 
       5             : #include "src/profiler/cpu-profiler.h"
       6             : 
       7             : #include <unordered_map>
       8             : #include <utility>
       9             : 
      10             : #include "src/base/lazy-instance.h"
      11             : #include "src/base/template-utils.h"
      12             : #include "src/debug/debug.h"
      13             : #include "src/frames-inl.h"
      14             : #include "src/locked-queue-inl.h"
      15             : #include "src/log.h"
      16             : #include "src/profiler/cpu-profiler-inl.h"
      17             : #include "src/vm-state-inl.h"
      18             : #include "src/wasm/wasm-engine.h"
      19             : 
      20             : namespace v8 {
      21             : namespace internal {
      22             : 
      23             : static const int kProfilerStackSize = 64 * KB;
      24             : 
      25         807 : class CpuSampler : public sampler::Sampler {
      26             :  public:
      27             :   CpuSampler(Isolate* isolate, SamplingEventsProcessor* processor)
      28             :       : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
      29         812 :         processor_(processor) {}
      30             : 
      31       63078 :   void SampleStack(const v8::RegisterState& regs) override {
      32       63078 :     TickSample* sample = processor_->StartTickSample();
      33       63078 :     if (sample == nullptr) return;
      34             :     Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
      35       63078 :     sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame, true);
      36      117732 :     if (is_counting_samples_ && !sample->timestamp.IsNull()) {
      37       54654 :       if (sample->state == JS) ++js_sample_count_;
      38       54654 :       if (sample->state == EXTERNAL) ++external_sample_count_;
      39             :     }
      40       63078 :     processor_->FinishTickSample();
      41             :   }
      42             : 
      43             :  private:
      44             :   SamplingEventsProcessor* processor_;
      45             : };
      46             : 
      47         812 : ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
      48             :                                                  ProfileGenerator* generator)
      49             :     : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
      50             :       generator_(generator),
      51             :       running_(1),
      52             :       last_code_event_id_(0),
      53             :       last_processed_code_event_id_(0),
      54        2436 :       isolate_(isolate) {}
      55             : 
      56         812 : SamplingEventsProcessor::SamplingEventsProcessor(Isolate* isolate,
      57             :                                                  ProfileGenerator* generator,
      58             :                                                  base::TimeDelta period,
      59             :                                                  bool use_precise_sampling)
      60             :     : ProfilerEventsProcessor(isolate, generator),
      61             :       sampler_(new CpuSampler(isolate, this)),
      62             :       period_(period),
      63        2436 :       use_precise_sampling_(use_precise_sampling) {
      64         812 :   sampler_->Start();
      65         812 : }
      66             : 
      67        4035 : SamplingEventsProcessor::~SamplingEventsProcessor() { sampler_->Stop(); }
      68             : 
      69             : ProfilerEventsProcessor::~ProfilerEventsProcessor() = default;
      70             : 
      71     1825174 : void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
      72     1825174 :   event.generic.order = ++last_code_event_id_;
      73     1825174 :   events_buffer_.Enqueue(event);
      74     1825174 : }
      75             : 
      76           6 : void ProfilerEventsProcessor::AddDeoptStack(Address from, int fp_to_sp_delta) {
      77             :   TickSampleEventRecord record(last_code_event_id_);
      78             :   RegisterState regs;
      79           6 :   Address fp = isolate_->c_entry_fp(isolate_->thread_local_top());
      80           6 :   regs.sp = reinterpret_cast<void*>(fp - fp_to_sp_delta);
      81           6 :   regs.fp = reinterpret_cast<void*>(fp);
      82           6 :   regs.pc = reinterpret_cast<void*>(from);
      83             :   record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, false,
      84           6 :                      false);
      85           6 :   ticks_from_vm_buffer_.Enqueue(record);
      86           6 : }
      87             : 
      88         963 : void ProfilerEventsProcessor::AddCurrentStack(bool update_stats) {
      89             :   TickSampleEventRecord record(last_code_event_id_);
      90             :   RegisterState regs;
      91         963 :   StackFrameIterator it(isolate_);
      92         963 :   if (!it.done()) {
      93             :     StackFrame* frame = it.frame();
      94         199 :     regs.sp = reinterpret_cast<void*>(frame->sp());
      95         199 :     regs.fp = reinterpret_cast<void*>(frame->fp());
      96         199 :     regs.pc = reinterpret_cast<void*>(frame->pc());
      97             :   }
      98         963 :   record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, update_stats,
      99         963 :                      false);
     100         963 :   ticks_from_vm_buffer_.Enqueue(record);
     101         963 : }
     102             : 
     103          34 : void ProfilerEventsProcessor::AddSample(TickSample sample) {
     104             :   TickSampleEventRecord record(last_code_event_id_);
     105          34 :   record.sample = sample;
     106          34 :   ticks_from_vm_buffer_.Enqueue(record);
     107          34 : }
     108             : 
     109         817 : void ProfilerEventsProcessor::StopSynchronously() {
     110        1634 :   if (!base::Relaxed_AtomicExchange(&running_, 0)) return;
     111             :   {
     112         812 :     base::MutexGuard guard(&running_mutex_);
     113         812 :     running_cond_.NotifyOne();
     114             :   }
     115         812 :   Join();
     116             : }
     117             : 
     118             : 
     119     1825983 : bool ProfilerEventsProcessor::ProcessCodeEvent() {
     120             :   CodeEventsContainer record;
     121     1825983 :   if (events_buffer_.Dequeue(&record)) {
     122     1825171 :     switch (record.generic.type) {
     123             : #define PROFILER_TYPE_CASE(type, clss)                          \
     124             :       case CodeEventRecord::type:                               \
     125             :         record.clss##_.UpdateCodeMap(generator_->code_map());   \
     126             :         break;
     127             : 
     128     1825174 :       CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
     129             : 
     130             : #undef PROFILER_TYPE_CASE
     131             :       default: return true;  // Skip record.
     132             :     }
     133     1825166 :     last_processed_code_event_id_ = record.generic.order;
     134     1825166 :     return true;
     135             :   }
     136             :   return false;
     137             : }
     138             : 
     139      663888 : void ProfilerEventsProcessor::CodeEventHandler(
     140             :     const CodeEventsContainer& evt_rec) {
     141      663888 :   switch (evt_rec.generic.type) {
     142             :     case CodeEventRecord::CODE_CREATION:
     143             :     case CodeEventRecord::CODE_MOVE:
     144             :     case CodeEventRecord::CODE_DISABLE_OPT:
     145      663882 :       Enqueue(evt_rec);
     146      663882 :       break;
     147             :     case CodeEventRecord::CODE_DEOPT: {
     148             :       const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
     149           6 :       Address pc = rec->pc;
     150           6 :       int fp_to_sp_delta = rec->fp_to_sp_delta;
     151           6 :       Enqueue(evt_rec);
     152           6 :       AddDeoptStack(pc, fp_to_sp_delta);
     153           6 :       break;
     154             :     }
     155             :     case CodeEventRecord::NONE:
     156             :     case CodeEventRecord::REPORT_BUILTIN:
     157           0 :       UNREACHABLE();
     158             :   }
     159      663888 : }
     160             : 
     161             : ProfilerEventsProcessor::SampleProcessingResult
     162     1954825 : SamplingEventsProcessor::ProcessOneSample() {
     163             :   TickSampleEventRecord record1;
     164     3780269 :   if (ticks_from_vm_buffer_.Peek(&record1) &&
     165     1825444 :       (record1.order == last_processed_code_event_id_)) {
     166             :     TickSampleEventRecord record;
     167        1003 :     ticks_from_vm_buffer_.Dequeue(&record);
     168        1003 :     generator_->RecordTickSample(record.sample);
     169             :     return OneSampleProcessed;
     170             :   }
     171             : 
     172     1953827 :   const TickSampleEventRecord* record = ticks_buffer_.Peek();
     173     1953829 :   if (record == nullptr) {
     174     1013583 :     if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
     175      947704 :     return FoundSampleForNextCodeEvent;
     176             :   }
     177      940246 :   if (record->order != last_processed_code_event_id_) {
     178             :     return FoundSampleForNextCodeEvent;
     179             :   }
     180       62893 :   generator_->RecordTickSample(record->sample);
     181             :   ticks_buffer_.Remove();
     182       62893 :   return OneSampleProcessed;
     183             : }
     184             : 
     185         812 : void SamplingEventsProcessor::Run() {
     186         812 :   base::MutexGuard guard(&running_mutex_);
     187      199048 :   while (!!base::Relaxed_Load(&running_)) {
     188             :     base::TimeTicks nextSampleTime =
     189      131616 :         base::TimeTicks::HighResolutionNow() + period_;
     190             :     base::TimeTicks now;
     191             :     SampleProcessingResult result;
     192             :     // Keep processing existing events until we need to do next sample
     193             :     // or the ticks buffer is empty.
     194      545700 :     do {
     195      545700 :       result = ProcessOneSample();
     196      545700 :       if (result == FoundSampleForNextCodeEvent) {
     197             :         // All ticks of the current last_processed_code_event_id_ are
     198             :         // processed, proceed to the next code event.
     199      418008 :         ProcessCodeEvent();
     200             :       }
     201      545700 :       now = base::TimeTicks::HighResolutionNow();
     202      545700 :     } while (result != NoSamplesInQueue && now < nextSampleTime);
     203             : 
     204       65808 :     if (nextSampleTime > now) {
     205             : #if V8_OS_WIN
     206             :       if (use_precise_sampling_ &&
     207             :           nextSampleTime - now < base::TimeDelta::FromMilliseconds(100)) {
     208             :         // Do not use Sleep on Windows as it is very imprecise, with up to 16ms
     209             :         // jitter, which is unacceptable for short profile intervals.
     210             :         while (base::TimeTicks::HighResolutionNow() < nextSampleTime) {
     211             :         }
     212             :       } else  // NOLINT
     213             : #else
     214             :       USE(use_precise_sampling_);
     215             : #endif  // V8_OS_WIN
     216             :       {
     217             :         // Allow another thread to interrupt the delay between samples in the
     218             :         // event of profiler shutdown.
     219      194845 :         while (now < nextSampleTime &&
     220      194845 :                running_cond_.WaitFor(&running_mutex_, nextSampleTime - now)) {
     221             :           // If true was returned, we got interrupted before the timeout
     222             :           // elapsed. If this was not due to a change in running state, a
     223             :           // spurious wakeup occurred (thus we should continue to wait).
     224         201 :           if (!base::Relaxed_Load(&running_)) {
     225             :             break;
     226             :           }
     227           0 :           now = base::TimeTicks::HighResolutionNow();
     228             :         }
     229             :       }
     230             :     }
     231             : 
     232             :     // Schedule next sample.
     233       65807 :     sampler_->DoSample();
     234             :   }
     235             : 
     236             :   // Process remaining tick events.
     237     1407978 :   do {
     238             :     SampleProcessingResult result;
     239             :     do {
     240     1409132 :       result = ProcessOneSample();
     241     1409132 :     } while (result == OneSampleProcessed);
     242     1407978 :   } while (ProcessCodeEvent());
     243         812 : }
     244             : 
     245          34 : void* SamplingEventsProcessor::operator new(size_t size) {
     246         812 :   return AlignedAlloc(size, alignof(SamplingEventsProcessor));
     247             : }
     248             : 
     249         807 : void SamplingEventsProcessor::operator delete(void* ptr) { AlignedFree(ptr); }
     250             : 
     251         155 : int CpuProfiler::GetProfilesCount() {
     252             :   // The count of profiles doesn't depend on a security token.
     253         155 :   return static_cast<int>(profiles_->profiles()->size());
     254             : }
     255             : 
     256             : 
     257          41 : CpuProfile* CpuProfiler::GetProfile(int index) {
     258          82 :   return profiles_->profiles()->at(index).get();
     259             : }
     260             : 
     261             : 
     262          50 : void CpuProfiler::DeleteAllProfiles() {
     263          50 :   if (is_profiling_) StopProcessor();
     264          50 :   ResetProfiles();
     265          50 : }
     266             : 
     267             : 
     268         770 : void CpuProfiler::DeleteProfile(CpuProfile* profile) {
     269         770 :   profiles_->RemoveProfile(profile);
     270         770 :   if (profiles_->profiles()->empty() && !is_profiling_) {
     271             :     // If this was the last profile, clean up all accessory data as well.
     272         630 :     ResetProfiles();
     273             :   }
     274         770 : }
     275             : 
     276             : namespace {
     277             : 
     278         267 : class CpuProfilersManager {
     279             :  public:
     280         292 :   void AddProfiler(Isolate* isolate, CpuProfiler* profiler) {
     281         292 :     base::MutexGuard lock(&mutex_);
     282             :     profilers_.emplace(isolate, profiler);
     283         292 :   }
     284             : 
     285         292 :   void RemoveProfiler(Isolate* isolate, CpuProfiler* profiler) {
     286         292 :     base::MutexGuard lock(&mutex_);
     287             :     auto range = profilers_.equal_range(isolate);
     288         292 :     for (auto it = range.first; it != range.second; ++it) {
     289         292 :       if (it->second != profiler) continue;
     290             :       profilers_.erase(it);
     291         292 :       return;
     292             :     }
     293           0 :     UNREACHABLE();
     294             :   }
     295             : 
     296          10 :   void CallCollectSample(Isolate* isolate) {
     297          10 :     base::MutexGuard lock(&mutex_);
     298             :     auto range = profilers_.equal_range(isolate);
     299          20 :     for (auto it = range.first; it != range.second; ++it) {
     300          10 :       it->second->CollectSample();
     301             :     }
     302          10 :   }
     303             : 
     304             :  private:
     305             :   std::unordered_multimap<Isolate*, CpuProfiler*> profilers_;
     306             :   base::Mutex mutex_;
     307             : };
     308             : 
     309         594 : DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager)
     310             : 
     311             : }  // namespace
     312             : 
     313         273 : CpuProfiler::CpuProfiler(Isolate* isolate)
     314         273 :     : CpuProfiler(isolate, new CpuProfilesCollection(isolate), nullptr,
     315         546 :                   nullptr) {}
     316             : 
     317         292 : CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
     318             :                          ProfileGenerator* test_generator,
     319             :                          ProfilerEventsProcessor* test_processor)
     320             :     : isolate_(isolate),
     321             :       sampling_interval_(base::TimeDelta::FromMicroseconds(
     322         292 :           FLAG_cpu_profiler_sampling_interval)),
     323             :       profiles_(test_profiles),
     324             :       generator_(test_generator),
     325             :       processor_(test_processor),
     326         876 :       is_profiling_(false) {
     327             :   profiles_->set_cpu_profiler(this);
     328         292 :   GetProfilersManager()->AddProfiler(isolate, this);
     329         292 : }
     330             : 
     331         584 : CpuProfiler::~CpuProfiler() {
     332             :   DCHECK(!is_profiling_);
     333         292 :   GetProfilersManager()->RemoveProfiler(isolate_, this);
     334         292 : }
     335             : 
     336         575 : void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
     337             :   DCHECK(!is_profiling_);
     338         575 :   sampling_interval_ = value;
     339         575 : }
     340             : 
     341           5 : void CpuProfiler::set_use_precise_sampling(bool value) {
     342             :   DCHECK(!is_profiling_);
     343           5 :   use_precise_sampling_ = value;
     344           5 : }
     345             : 
     346         680 : void CpuProfiler::ResetProfiles() {
     347         680 :   profiles_.reset(new CpuProfilesCollection(isolate_));
     348             :   profiles_->set_cpu_profiler(this);
     349             :   profiler_listener_.reset();
     350         680 :   generator_.reset();
     351         680 : }
     352             : 
     353         763 : void CpuProfiler::CreateEntriesForRuntimeCallStats() {
     354         763 :   RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
     355             :   CodeMap* code_map = generator_->code_map();
     356     1752611 :   for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
     357             :     RuntimeCallCounter* counter = rcs->GetCounter(i);
     358             :     DCHECK(counter->name());
     359             :     auto entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
     360      875924 :                                "native V8Runtime");
     361      875924 :     code_map->AddCode(reinterpret_cast<Address>(counter), entry, 1);
     362             :   }
     363         763 : }
     364             : 
     365             : // static
     366          10 : void CpuProfiler::CollectSample(Isolate* isolate) {
     367          10 :   GetProfilersManager()->CallCollectSample(isolate);
     368          10 : }
     369             : 
     370           0 : void CpuProfiler::CollectSample() {
     371          10 :   if (processor_) {
     372          10 :     processor_->AddCurrentStack();
     373             :   }
     374           0 : }
     375             : 
     376         928 : void CpuProfiler::StartProfiling(const char* title, bool record_samples,
     377             :                                  ProfilingMode mode) {
     378        1856 :   if (profiles_->StartProfiling(title, record_samples, mode)) {
     379        2784 :     TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
     380         928 :     StartProcessorIfNotStarted();
     381             :   }
     382         928 : }
     383             : 
     384         853 : void CpuProfiler::StartProfiling(String title, bool record_samples,
     385             :                                  ProfilingMode mode) {
     386        1706 :   StartProfiling(profiles_->GetName(title), record_samples, mode);
     387        1706 :   isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
     388         853 : }
     389             : 
     390         928 : void CpuProfiler::StartProcessorIfNotStarted() {
     391         928 :   if (processor_) {
     392         150 :     processor_->AddCurrentStack();
     393         150 :     return;
     394             :   }
     395         778 :   isolate_->wasm_engine()->EnableCodeLogging(isolate_);
     396         778 :   Logger* logger = isolate_->logger();
     397             :   // Disable logging when using the new implementation.
     398         778 :   saved_is_logging_ = logger->is_logging();
     399             :   logger->set_is_logging(false);
     400             : 
     401             :   bool codemap_needs_initialization = false;
     402         778 :   if (!generator_) {
     403         763 :     generator_.reset(new ProfileGenerator(profiles_.get()));
     404             :     codemap_needs_initialization = true;
     405         763 :     CreateEntriesForRuntimeCallStats();
     406             :   }
     407        1556 :   processor_.reset(new SamplingEventsProcessor(
     408         778 :       isolate_, generator_.get(), sampling_interval_, use_precise_sampling_));
     409         778 :   if (profiler_listener_) {
     410          15 :     profiler_listener_->set_observer(processor_.get());
     411             :   } else {
     412         763 :     profiler_listener_.reset(new ProfilerListener(isolate_, processor_.get()));
     413             :   }
     414         778 :   logger->AddCodeEventListener(profiler_listener_.get());
     415         778 :   is_profiling_ = true;
     416         778 :   isolate_->set_is_profiling(true);
     417             :   // Enumerate stuff we already have in the heap.
     418             :   DCHECK(isolate_->heap()->HasBeenSetUp());
     419         778 :   if (codemap_needs_initialization) {
     420         763 :     if (!FLAG_prof_browser_mode) {
     421          25 :       logger->LogCodeObjects();
     422             :     }
     423         763 :     logger->LogCompiledFunctions();
     424         763 :     logger->LogAccessorCallbacks();
     425         763 :     LogBuiltins();
     426             :   }
     427             :   // Enable stack sampling.
     428         778 :   processor_->AddCurrentStack();
     429         778 :   processor_->StartSynchronously();
     430             : }
     431             : 
     432         883 : CpuProfile* CpuProfiler::StopProfiling(const char* title) {
     433         883 :   if (!is_profiling_) return nullptr;
     434         883 :   StopProcessorIfLastProfile(title);
     435         883 :   return profiles_->StopProfiling(title);
     436             : }
     437             : 
     438         818 : CpuProfile* CpuProfiler::StopProfiling(String title) {
     439         818 :   return StopProfiling(profiles_->GetName(title));
     440             : }
     441             : 
     442         883 : void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
     443         883 :   if (!profiles_->IsLastProfile(title)) return;
     444         743 :   StopProcessor();
     445             : }
     446             : 
     447         778 : void CpuProfiler::StopProcessor() {
     448         778 :   Logger* logger = isolate_->logger();
     449         778 :   is_profiling_ = false;
     450             :   isolate_->set_is_profiling(false);
     451         778 :   logger->RemoveCodeEventListener(profiler_listener_.get());
     452         778 :   processor_->StopSynchronously();
     453             :   processor_.reset();
     454         778 :   logger->set_is_logging(saved_is_logging_);
     455         778 : }
     456             : 
     457             : 
     458         763 : void CpuProfiler::LogBuiltins() {
     459         763 :   Builtins* builtins = isolate_->builtins();
     460             :   DCHECK(builtins->is_initialized());
     461     2323335 :   for (int i = 0; i < Builtins::builtin_count; i++) {
     462             :     CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
     463             :     ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
     464             :     Builtins::Name id = static_cast<Builtins::Name>(i);
     465     2322572 :     rec->instruction_start = builtins->builtin(id)->InstructionStart();
     466     1161286 :     rec->builtin_id = id;
     467     1161286 :     processor_->Enqueue(evt_rec);
     468             :   }
     469         763 : }
     470             : 
     471             : }  // namespace internal
     472      122036 : }  // namespace v8

Generated by: LCOV version 1.10