1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
|
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/profiler/stack_sampler_impl.h"
#include <iterator>
#include <utility>
#include "base/check.h"
#include "base/compiler_specific.h"
#include "base/profiler/metadata_recorder.h"
#include "base/profiler/profile_builder.h"
#include "base/profiler/sample_metadata.h"
#include "base/profiler/stack_buffer.h"
#include "base/profiler/stack_copier.h"
#include "base/profiler/suspendable_thread_delegate.h"
#include "base/profiler/unwinder.h"
#include "build/build_config.h"
// IMPORTANT NOTE: Some functions within this implementation are invoked while
// the target thread is suspended so it must not do any allocation from the
// heap, including indirectly via use of DCHECK/CHECK or other logging
// statements. Otherwise this code can deadlock on heap locks acquired by the
// target thread before it was suspended. These functions are commented with "NO
// HEAP ALLOCATIONS".
namespace base {
namespace {
// Notifies the unwinders about the stack capture, and records metadata, while
// the thread is suspended.
class StackCopierDelegate : public StackCopier::Delegate {
public:
StackCopierDelegate(
const base::circular_deque<std::unique_ptr<Unwinder>>* unwinders,
ProfileBuilder* profile_builder,
MetadataRecorder::MetadataProvider* metadata_provider)
: unwinders_(unwinders),
profile_builder_(profile_builder),
metadata_provider_(metadata_provider) {}
StackCopierDelegate(const StackCopierDelegate&) = delete;
StackCopierDelegate& operator=(const StackCopierDelegate&) = delete;
// StackCopier::Delegate:
// IMPORTANT NOTE: to avoid deadlock this function must not invoke any
// non-reentrant code that is also invoked by the target thread. In
// particular, it may not perform any heap allocation or deallocation,
// including indirectly via use of DCHECK/CHECK or other logging statements.
void OnStackCopy() override {
for (const auto& unwinder : *unwinders_)
unwinder->OnStackCapture();
profile_builder_->RecordMetadata(*metadata_provider_);
}
private:
const base::circular_deque<std::unique_ptr<Unwinder>>* unwinders_;
ProfileBuilder* const profile_builder_;
const MetadataRecorder::MetadataProvider* const metadata_provider_;
};
} // namespace
// |core_unwinders| is iterated backward since |core_unwinders| is passed in
// increasing priority order while |unwinders_| is stored in decreasing priority
// order.
StackSamplerImpl::StackSamplerImpl(
std::unique_ptr<StackCopier> stack_copier,
std::vector<std::unique_ptr<Unwinder>> core_unwinders,
ModuleCache* module_cache,
StackSamplerTestDelegate* test_delegate)
: stack_copier_(std::move(stack_copier)),
unwinders_(std::make_move_iterator(core_unwinders.rbegin()),
std::make_move_iterator(core_unwinders.rend())),
module_cache_(module_cache),
test_delegate_(test_delegate) {
DCHECK(!unwinders_.empty());
for (const auto& unwinder : unwinders_)
unwinder->AddInitialModules(module_cache_);
}
StackSamplerImpl::~StackSamplerImpl() = default;
void StackSamplerImpl::AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder) {
unwinder->AddInitialModules(module_cache_);
unwinders_.push_front(std::move(unwinder));
}
void StackSamplerImpl::RecordStackFrames(StackBuffer* stack_buffer,
ProfileBuilder* profile_builder) {
DCHECK(stack_buffer);
RegisterContext thread_context;
uintptr_t stack_top;
TimeTicks timestamp;
bool copy_stack_succeeded;
{
// Make this scope as small as possible because |metadata_provider| is
// holding a lock.
MetadataRecorder::MetadataProvider metadata_provider(
GetSampleMetadataRecorder());
StackCopierDelegate delegate(&unwinders_, profile_builder,
&metadata_provider);
copy_stack_succeeded = stack_copier_->CopyStack(
stack_buffer, &stack_top, ×tamp, &thread_context, &delegate);
}
if (!copy_stack_succeeded) {
profile_builder->OnSampleCompleted(
{}, timestamp.is_null() ? TimeTicks::Now() : timestamp);
return;
}
for (const auto& unwinder : unwinders_)
unwinder->UpdateModules(module_cache_);
if (test_delegate_)
test_delegate_->OnPreStackWalk();
profile_builder->OnSampleCompleted(
WalkStack(module_cache_, &thread_context, stack_top, unwinders_),
timestamp);
}
// static
std::vector<Frame> StackSamplerImpl::WalkStackForTesting(
ModuleCache* module_cache,
RegisterContext* thread_context,
uintptr_t stack_top,
const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders) {
return WalkStack(module_cache, thread_context, stack_top, unwinders);
}
// static
std::vector<Frame> StackSamplerImpl::WalkStack(
ModuleCache* module_cache,
RegisterContext* thread_context,
uintptr_t stack_top,
const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders) {
std::vector<Frame> stack;
// Reserve enough memory for most stacks, to avoid repeated
// allocations. Approximately 99.9% of recorded stacks are 128 frames or
// fewer.
stack.reserve(128);
// Record the first frame from the context values.
stack.emplace_back(RegisterContextInstructionPointer(thread_context),
module_cache->GetModuleForAddress(
RegisterContextInstructionPointer(thread_context)));
size_t prior_stack_size;
UnwindResult result;
do {
// Choose an authoritative unwinder for the current module. Use the first
// unwinder that thinks it can unwind from the current frame.
auto unwinder =
std::find_if(unwinders.begin(), unwinders.end(),
[&stack](const std::unique_ptr<Unwinder>& unwinder) {
return unwinder->CanUnwindFrom(stack.back());
});
if (unwinder == unwinders.end())
return stack;
prior_stack_size = stack.size();
result = unwinder->get()->TryUnwind(thread_context, stack_top, module_cache,
&stack);
// The unwinder with the lowest priority should be the only one that returns
// COMPLETED since the stack starts in native code.
DCHECK(result != UnwindResult::COMPLETED ||
unwinder->get() == unwinders.back().get());
} while (result != UnwindResult::ABORTED &&
result != UnwindResult::COMPLETED &&
// Give up if the authoritative unwinder for the module was unable to
// unwind.
stack.size() > prior_stack_size);
return stack;
}
} // namespace base
|