summaryrefslogtreecommitdiff
path: root/deps/v8/src/profiler/profile-generator.h
blob: d8d38ce0343f2eff8092699e07cf7228621cb6ce (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_PROFILER_PROFILE_GENERATOR_H_
#define V8_PROFILER_PROFILE_GENERATOR_H_

#include <atomic>
#include <deque>
#include <limits>
#include <map>
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>

#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/builtins/builtins.h"
#include "src/execution/vm-state.h"
#include "src/logging/code-events.h"
#include "src/profiler/strings-storage.h"
#include "src/utils/allocation.h"

namespace v8 {
namespace internal {

struct TickSample;

// Provides a mapping from the offsets within generated code or a bytecode array
// to the source line and inlining id.
class V8_EXPORT_PRIVATE SourcePositionTable : public Malloced {
 public:
  SourcePositionTable() = default;
  SourcePositionTable(const SourcePositionTable&) = delete;
  SourcePositionTable& operator=(const SourcePositionTable&) = delete;

  void SetPosition(int pc_offset, int line, int inlining_id);
  int GetSourceLineNumber(int pc_offset) const;
  int GetInliningId(int pc_offset) const;

  size_t Size() const;
  void print() const;

 private:
  struct SourcePositionTuple {
    bool operator<(const SourcePositionTuple& other) const {
      return pc_offset < other.pc_offset;
    }
    int pc_offset;
    int line_number;
    int inlining_id;
  };
  // This is logically a map, but we store it as a vector of tuples, sorted by
  // the pc offset, so that we can save space and look up items using binary
  // search.
  std::vector<SourcePositionTuple> pc_offsets_to_lines_;
};

struct CodeEntryAndLineNumber;

class CodeEntry {
 public:
  enum class CodeType { JS, WASM, OTHER };

  // CodeEntry may reference strings (|name|, |resource_name|) managed by a
  // StringsStorage instance. These must be freed via ReleaseStrings.
  inline CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
                   const char* resource_name = CodeEntry::kEmptyResourceName,
                   int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
                   int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
                   std::unique_ptr<SourcePositionTable> line_info = nullptr,
                   bool is_shared_cross_origin = false,
                   CodeType code_type = CodeType::JS);
  CodeEntry(const CodeEntry&) = delete;
  CodeEntry& operator=(const CodeEntry&) = delete;
  ~CodeEntry() {
    // No alive handles should be associated with the CodeEntry at time of
    // destruction.
    DCHECK(!heap_object_location_);
    DCHECK_EQ(ref_count_, 0UL);
  }

  const char* name() const { return name_; }
  const char* resource_name() const { return resource_name_; }
  int line_number() const { return line_number_; }
  int column_number() const { return column_number_; }
  const SourcePositionTable* line_info() const { return line_info_.get(); }
  int script_id() const { return script_id_; }
  void set_script_id(int script_id) { script_id_ = script_id; }
  int position() const { return position_; }
  void set_position(int position) { position_ = position; }
  void set_bailout_reason(const char* bailout_reason) {
    EnsureRareData()->bailout_reason_ = bailout_reason;
  }
  const char* bailout_reason() const {
    return rare_data_ ? rare_data_->bailout_reason_ : kEmptyBailoutReason;
  }

  void set_deopt_info(const char* deopt_reason, int deopt_id,
                      std::vector<CpuProfileDeoptFrame> inlined_frames);

  size_t EstimatedSize() const;
  CpuProfileDeoptInfo GetDeoptInfo();
  bool has_deopt_info() const {
    return rare_data_ && rare_data_->deopt_id_ != kNoDeoptimizationId;
  }
  void clear_deopt_info() {
    if (!rare_data_) return;
    // TODO(alph): Clear rare_data_ if that was the only field in use.
    rare_data_->deopt_reason_ = kNoDeoptReason;
    rare_data_->deopt_id_ = kNoDeoptimizationId;
  }

  const char* code_type_string() const {
    switch (CodeTypeField::decode(bit_field_)) {
      case CodeType::JS:
        return "JS";
      case CodeType::WASM:
        return "wasm";
      case CodeType::OTHER:
        return "other";
    }
  }

  // Returns the start address of the instruction segment represented by this
  // CodeEntry. Used as a key in the containing CodeMap.
  Address instruction_start() const { return instruction_start_; }
  void set_instruction_start(Address address) { instruction_start_ = address; }

  Address** heap_object_location_address() { return &heap_object_location_; }

  void FillFunctionInfo(SharedFunctionInfo shared);

  void SetBuiltinId(Builtin id);
  Builtin builtin() const { return BuiltinField::decode(bit_field_); }

  bool is_shared_cross_origin() const {
    return SharedCrossOriginField::decode(bit_field_);
  }

  // Returns whether or not the lifetime of this CodeEntry is reference
  // counted, and managed by a CodeMap.
  bool is_ref_counted() const { return RefCountedField::decode(bit_field_); }

  uint32_t GetHash() const;
  bool IsSameFunctionAs(const CodeEntry* entry) const;

  int GetSourceLine(int pc_offset) const;

  struct Equals {
    bool operator()(const CodeEntry* lhs, const CodeEntry* rhs) const {
      return lhs->IsSameFunctionAs(rhs);
    }
  };
  struct Hasher {
    std::size_t operator()(CodeEntry* e) const { return e->GetHash(); }
  };

  void SetInlineStacks(
      std::unordered_set<CodeEntry*, Hasher, Equals> inline_entries,
      std::unordered_map<int, std::vector<CodeEntryAndLineNumber>>
          inline_stacks);
  const std::vector<CodeEntryAndLineNumber>* GetInlineStack(
      int pc_offset) const;

  CodeEventListener::LogEventsAndTags tag() const {
    return TagField::decode(bit_field_);
  }

  V8_EXPORT_PRIVATE static const char* const kEmptyResourceName;
  static const char* const kEmptyBailoutReason;
  static const char* const kNoDeoptReason;

  V8_EXPORT_PRIVATE static const char* const kProgramEntryName;
  V8_EXPORT_PRIVATE static const char* const kIdleEntryName;
  V8_EXPORT_PRIVATE static const char* const kGarbageCollectorEntryName;
  // Used to represent frames for which we have no reliable way to
  // detect function.
  V8_EXPORT_PRIVATE static const char* const kUnresolvedFunctionName;
  V8_EXPORT_PRIVATE static const char* const kRootEntryName;

  V8_EXPORT_PRIVATE static CodeEntry* program_entry();
  V8_EXPORT_PRIVATE static CodeEntry* idle_entry();
  V8_EXPORT_PRIVATE static CodeEntry* gc_entry();
  V8_EXPORT_PRIVATE static CodeEntry* unresolved_entry();
  V8_EXPORT_PRIVATE static CodeEntry* root_entry();

  // Releases strings owned by this CodeEntry, which may be allocated in the
  // provided StringsStorage instance. This instance is not stored directly
  // with the CodeEntry in order to reduce memory footprint.
  // Called before every destruction.
  void ReleaseStrings(StringsStorage& strings);

  void print() const;

 private:
  friend class CodeEntryStorage;

  struct RareData {
    const char* deopt_reason_ = kNoDeoptReason;
    const char* bailout_reason_ = kEmptyBailoutReason;
    int deopt_id_ = kNoDeoptimizationId;
    std::unordered_map<int, std::vector<CodeEntryAndLineNumber>> inline_stacks_;
    std::unordered_set<CodeEntry*, Hasher, Equals> inline_entries_;
    std::vector<CpuProfileDeoptFrame> deopt_inlined_frames_;
  };

  RareData* EnsureRareData();

  void mark_ref_counted() {
    bit_field_ = RefCountedField::update(bit_field_, true);
    ref_count_ = 1;
  }

  size_t AddRef() {
    DCHECK(is_ref_counted());
    DCHECK_LT(ref_count_, std::numeric_limits<size_t>::max());
    ref_count_++;
    return ref_count_;
  }

  size_t DecRef() {
    DCHECK(is_ref_counted());
    DCHECK_GT(ref_count_, 0UL);
    ref_count_--;
    return ref_count_;
  }

  using TagField = base::BitField<CodeEventListener::LogEventsAndTags, 0, 8>;
  using BuiltinField = base::BitField<Builtin, 8, 20>;
  static_assert(Builtins::kBuiltinCount <= BuiltinField::kNumValues,
                "builtin_count exceeds size of bitfield");
  using RefCountedField = base::BitField<bool, 28, 1>;
  using CodeTypeField = base::BitField<CodeType, 29, 2>;
  using SharedCrossOriginField = base::BitField<bool, 31, 1>;

  std::uint32_t bit_field_;
  std::atomic<std::size_t> ref_count_ = {0};
  const char* name_;
  const char* resource_name_;
  int line_number_;
  int column_number_;
  int script_id_;
  int position_;
  std::unique_ptr<SourcePositionTable> line_info_;
  std::unique_ptr<RareData> rare_data_;
  Address instruction_start_ = kNullAddress;
  Address* heap_object_location_ = nullptr;
};

struct CodeEntryAndLineNumber {
  CodeEntry* code_entry;
  int line_number;
};

using ProfileStackTrace = std::vector<CodeEntryAndLineNumber>;

// Filters stack frames from sources other than a target native context.
class ContextFilter {
 public:
  explicit ContextFilter(Address native_context_address = kNullAddress)
      : native_context_address_(native_context_address) {}

  // Invoked when a native context has changed address.
  void OnMoveEvent(Address from_address, Address to_address);

  bool Accept(Address native_context_address) const {
    if (native_context_address_ == kNullAddress) return true;
    return (native_context_address & ~kHeapObjectTag) ==
           native_context_address_;
  }

  // Update the context's tracked address based on VM-thread events.
  void set_native_context_address(Address address) {
    native_context_address_ = address;
  }
  Address native_context_address() const { return native_context_address_; }

 private:
  Address native_context_address_;
};

class ProfileTree;

class V8_EXPORT_PRIVATE ProfileNode {
 public:
  inline ProfileNode(ProfileTree* tree, CodeEntry* entry, ProfileNode* parent,
                     int line_number = 0);
  ~ProfileNode();
  ProfileNode(const ProfileNode&) = delete;
  ProfileNode& operator=(const ProfileNode&) = delete;

  ProfileNode* FindChild(
      CodeEntry* entry,
      int line_number = v8::CpuProfileNode::kNoLineNumberInfo);
  ProfileNode* FindOrAddChild(CodeEntry* entry, int line_number = 0);
  void IncrementSelfTicks() { ++self_ticks_; }
  void IncreaseSelfTicks(unsigned amount) { self_ticks_ += amount; }
  void IncrementLineTicks(int src_line);

  CodeEntry* entry() const { return entry_; }
  unsigned self_ticks() const { return self_ticks_; }
  const std::vector<ProfileNode*>* children() const { return &children_list_; }
  unsigned id() const { return id_; }
  ProfileNode* parent() const { return parent_; }
  int line_number() const {
    return line_number_ != 0 ? line_number_ : entry_->line_number();
  }
  CpuProfileNode::SourceType source_type() const;

  unsigned int GetHitLineCount() const {
    return static_cast<unsigned int>(line_ticks_.size());
  }
  bool GetLineTicks(v8::CpuProfileNode::LineTick* entries,
                    unsigned int length) const;
  void CollectDeoptInfo(CodeEntry* entry);
  const std::vector<CpuProfileDeoptInfo>& deopt_infos() const {
    return deopt_infos_;
  }
  Isolate* isolate() const;

  void Print(int indent) const;

 private:
  struct Equals {
    bool operator()(CodeEntryAndLineNumber lhs,
                    CodeEntryAndLineNumber rhs) const {
      return lhs.code_entry->IsSameFunctionAs(rhs.code_entry) &&
             lhs.line_number == rhs.line_number;
    }
  };
  struct Hasher {
    std::size_t operator()(CodeEntryAndLineNumber pair) const {
      return pair.code_entry->GetHash() ^ ComputeUnseededHash(pair.line_number);
    }
  };

  ProfileTree* tree_;
  CodeEntry* entry_;
  unsigned self_ticks_;
  std::unordered_map<CodeEntryAndLineNumber, ProfileNode*, Hasher, Equals>
      children_;
  int line_number_;
  std::vector<ProfileNode*> children_list_;
  ProfileNode* parent_;
  unsigned id_;
  // maps line number --> number of ticks
  std::unordered_map<int, int> line_ticks_;

  std::vector<CpuProfileDeoptInfo> deopt_infos_;
};

class CodeEntryStorage;

class V8_EXPORT_PRIVATE ProfileTree {
 public:
  explicit ProfileTree(Isolate* isolate, CodeEntryStorage* storage = nullptr);
  ~ProfileTree();
  ProfileTree(const ProfileTree&) = delete;
  ProfileTree& operator=(const ProfileTree&) = delete;

  using ProfilingMode = v8::CpuProfilingMode;

  ProfileNode* AddPathFromEnd(
      const std::vector<CodeEntry*>& path,
      int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
      bool update_stats = true);
  ProfileNode* AddPathFromEnd(
      const ProfileStackTrace& path,
      int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
      bool update_stats = true,
      ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
  ProfileNode* root() const { return root_; }
  unsigned next_node_id() { return next_node_id_++; }

  void Print() const { root_->Print(0); }

  Isolate* isolate() const { return isolate_; }

  void EnqueueNode(const ProfileNode* node) { pending_nodes_.push_back(node); }
  size_t pending_nodes_count() const { return pending_nodes_.size(); }
  std::vector<const ProfileNode*> TakePendingNodes() {
    return std::move(pending_nodes_);
  }

  CodeEntryStorage* code_entries() { return code_entries_; }

 private:
  template <typename Callback>
  void TraverseDepthFirst(Callback* callback);

  std::vector<const ProfileNode*> pending_nodes_;

  unsigned next_node_id_;
  Isolate* isolate_;
  CodeEntryStorage* const code_entries_;
  ProfileNode* root_;
};

class CpuProfiler;

class CpuProfile {
 public:
  struct SampleInfo {
    ProfileNode* node;
    base::TimeTicks timestamp;
    int line;
    StateTag state_tag;
    EmbedderStateTag embedder_state_tag;
  };

  V8_EXPORT_PRIVATE CpuProfile(
      CpuProfiler* profiler, ProfilerId id, const char* title,
      CpuProfilingOptions options,
      std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
  CpuProfile(const CpuProfile&) = delete;
  CpuProfile& operator=(const CpuProfile&) = delete;

  // Checks whether or not the given TickSample should be (sub)sampled, given
  // the sampling interval of the profiler that recorded it (in microseconds).
  V8_EXPORT_PRIVATE bool CheckSubsample(base::TimeDelta sampling_interval);
  // Add pc -> ... -> main() call path to the profile.
  void AddPath(base::TimeTicks timestamp, const ProfileStackTrace& path,
               int src_line, bool update_stats,
               base::TimeDelta sampling_interval, StateTag state,
               EmbedderStateTag embedder_state);
  void FinishProfile();

  const char* title() const { return title_; }
  const ProfileTree* top_down() const { return &top_down_; }

  int samples_count() const { return static_cast<int>(samples_.size()); }
  const SampleInfo& sample(int index) const { return samples_[index]; }

  int64_t sampling_interval_us() const {
    return options_.sampling_interval_us();
  }

  base::TimeTicks start_time() const { return start_time_; }
  base::TimeTicks end_time() const { return end_time_; }
  CpuProfiler* cpu_profiler() const { return profiler_; }
  ContextFilter& context_filter() { return context_filter_; }
  ProfilerId id() const { return id_; }

  void UpdateTicksScale();

  V8_EXPORT_PRIVATE void Print() const;

 private:
  void StreamPendingTraceEvents();

  const char* title_;
  const CpuProfilingOptions options_;
  std::unique_ptr<DiscardedSamplesDelegate> delegate_;
  ContextFilter context_filter_;
  base::TimeTicks start_time_;
  base::TimeTicks end_time_;
  std::deque<SampleInfo> samples_;
  ProfileTree top_down_;
  CpuProfiler* const profiler_;
  size_t streaming_next_sample_;
  const ProfilerId id_;
  // Number of microseconds worth of profiler ticks that should elapse before
  // the next sample is recorded.
  base::TimeDelta next_sample_delta_;
};

class CpuProfileMaxSamplesCallbackTask : public v8::Task {
 public:
  explicit CpuProfileMaxSamplesCallbackTask(
      std::unique_ptr<DiscardedSamplesDelegate> delegate)
      : delegate_(std::move(delegate)) {}

  void Run() override { delegate_->Notify(); }

 private:
  std::unique_ptr<DiscardedSamplesDelegate> delegate_;
};

class V8_EXPORT_PRIVATE CodeMap {
 public:
  explicit CodeMap(CodeEntryStorage& storage);
  ~CodeMap();
  CodeMap(const CodeMap&) = delete;
  CodeMap& operator=(const CodeMap&) = delete;

  // Adds the given CodeEntry to the CodeMap. The CodeMap takes ownership of
  // the CodeEntry.
  void AddCode(Address addr, CodeEntry* entry, unsigned size);
  void MoveCode(Address from, Address to);
  // Attempts to remove the given CodeEntry from the CodeMap.
  // Returns true iff the entry was found and removed.
  bool RemoveCode(CodeEntry*);
  void ClearCodesInRange(Address start, Address end);
  CodeEntry* FindEntry(Address addr, Address* out_instruction_start = nullptr);
  void Print();
  size_t size() const { return code_map_.size(); }

  size_t GetEstimatedMemoryUsage() const;

  CodeEntryStorage& code_entries() { return code_entries_; }

  void Clear();

 private:
  struct CodeEntryMapInfo {
    CodeEntry* entry;
    unsigned size;
  };

  std::multimap<Address, CodeEntryMapInfo> code_map_;
  CodeEntryStorage& code_entries_;
};

// Manages the lifetime of CodeEntry objects, and stores shared resources
// between them.
class V8_EXPORT_PRIVATE CodeEntryStorage {
 public:
  template <typename... Args>
  static CodeEntry* Create(Args&&... args) {
    CodeEntry* const entry = new CodeEntry(std::forward<Args>(args)...);
    entry->mark_ref_counted();
    return entry;
  }

  void AddRef(CodeEntry*);
  void DecRef(CodeEntry*);

  StringsStorage& strings() { return function_and_resource_names_; }

 private:
  StringsStorage function_and_resource_names_;
};

class V8_EXPORT_PRIVATE CpuProfilesCollection {
 public:
  explicit CpuProfilesCollection(Isolate* isolate);
  CpuProfilesCollection(const CpuProfilesCollection&) = delete;
  CpuProfilesCollection& operator=(const CpuProfilesCollection&) = delete;

  void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
  CpuProfilingResult StartProfiling(
      const char* title = nullptr, CpuProfilingOptions options = {},
      std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);

  CpuProfile* StopProfiling(ProfilerId id);
  bool IsLastProfileLeft(ProfilerId id);
  CpuProfile* Lookup(const char* title);

  std::vector<std::unique_ptr<CpuProfile>>* profiles() {
    return &finished_profiles_;
  }
  const char* GetName(Name name) { return resource_names_.GetName(name); }
  void RemoveProfile(CpuProfile* profile);

  // Finds a common sampling interval dividing each CpuProfile's interval,
  // rounded up to the nearest multiple of the CpuProfiler's sampling interval.
  // Returns 0 if no profiles are attached.
  base::TimeDelta GetCommonSamplingInterval() const;

  // Called from profile generator thread.
  void AddPathToCurrentProfiles(
      base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
      bool update_stats, base::TimeDelta sampling_interval, StateTag state,
      EmbedderStateTag embedder_state_tag,
      Address native_context_address = kNullAddress,
      Address native_embedder_context_address = kNullAddress);

  // Called from profile generator thread.
  void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to);

  // Limits the number of profiles that can be simultaneously collected.
  static const int kMaxSimultaneousProfiles = 100;

 private:
  StringsStorage resource_names_;
  std::vector<std::unique_ptr<CpuProfile>> finished_profiles_;
  CpuProfiler* profiler_;

  // Accessed by VM thread and profile generator thread.
  std::vector<std::unique_ptr<CpuProfile>> current_profiles_;
  base::Semaphore current_profiles_semaphore_;
  ProfilerId last_id_;
  Isolate* isolate_;
};

}  // namespace internal
}  // namespace v8

#endif  // V8_PROFILER_PROFILE_GENERATOR_H_