summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include/jemalloc/internal/prof_inlines_b.h
blob: 8ba8a1e1ffe7e96538e53857dfee45d6bb748a6c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
#define JEMALLOC_INTERNAL_PROF_INLINES_B_H

#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sz.h"

JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void) {
	/*
	 * No locking is used when reading prof_gdump_val in the fast path, so
	 * there are no guarantees regarding how long it will take for all
	 * threads to notice state changes.
	 */
	return prof_gdump_val;
}

JEMALLOC_ALWAYS_INLINE prof_tdata_t *
prof_tdata_get(tsd_t *tsd, bool create) {
	prof_tdata_t *tdata;

	cassert(config_prof);

	tdata = tsd_prof_tdata_get(tsd);
	if (create) {
		if (unlikely(tdata == NULL)) {
			if (tsd_nominal(tsd)) {
				tdata = prof_tdata_init(tsd);
				tsd_prof_tdata_set(tsd, tdata);
			}
		} else if (unlikely(tdata->expired)) {
			tdata = prof_tdata_reinit(tsd, tdata);
			tsd_prof_tdata_set(tsd, tdata);
		}
		assert(tdata == NULL || tdata->attached);
	}

	return tdata;
}

JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
	cassert(config_prof);
	assert(ptr != NULL);

	return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
}

JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
    alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
	cassert(config_prof);
	assert(ptr != NULL);

	arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
}

JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
	cassert(config_prof);
	assert(ptr != NULL);

	arena_prof_tctx_reset(tsdn, ptr, tctx);
}

JEMALLOC_ALWAYS_INLINE nstime_t
prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
	cassert(config_prof);
	assert(ptr != NULL);

	return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx);
}

JEMALLOC_ALWAYS_INLINE void
prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
    nstime_t t) {
	cassert(config_prof);
	assert(ptr != NULL);

	arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t);
}

JEMALLOC_ALWAYS_INLINE bool
prof_sample_check(tsd_t *tsd, size_t usize, bool update) {
	ssize_t check = update ? 0 : usize;

	int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
	if (update) {
		bytes_until_sample -= usize;
		if (tsd_nominal(tsd)) {
			tsd_bytes_until_sample_set(tsd, bytes_until_sample);
		}
	}
	if (likely(bytes_until_sample >= check)) {
		return true;
	}

	return false;
}

JEMALLOC_ALWAYS_INLINE bool
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
			 prof_tdata_t **tdata_out) {
	prof_tdata_t *tdata;

	cassert(config_prof);

	/* Fastpath: no need to load tdata */
	if (likely(prof_sample_check(tsd, usize, update))) {
		return true;
	}

	bool booted = tsd_prof_tdata_get(tsd);
	tdata = prof_tdata_get(tsd, true);
	if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
		tdata = NULL;
	}

	if (tdata_out != NULL) {
		*tdata_out = tdata;
	}

	if (unlikely(tdata == NULL)) {
		return true;
	}

	/*
	 * If this was the first creation of tdata, then
	 * prof_tdata_get() reset bytes_until_sample, so decrement and
	 * check it again
	 */
	if (!booted && prof_sample_check(tsd, usize, update)) {
		return true;
	}

	if (tsd_reentrancy_level_get(tsd) > 0) {
		return true;
	}
	/* Compute new sample threshold. */
	if (update) {
		prof_sample_threshold_update(tdata);
	}
	return !tdata->active;
}

JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
	prof_tctx_t *ret;
	prof_tdata_t *tdata;
	prof_bt_t bt;

	assert(usize == sz_s2u(usize));

	if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
	    &tdata))) {
		ret = (prof_tctx_t *)(uintptr_t)1U;
	} else {
		bt_init(&bt, tdata->vec);
		prof_backtrace(&bt);
		ret = prof_lookup(tsd, &bt);
	}

	return ret;
}

JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
    prof_tctx_t *tctx) {
	cassert(config_prof);
	assert(ptr != NULL);
	assert(usize == isalloc(tsdn, ptr));

	if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
		prof_malloc_sample_object(tsdn, ptr, usize, tctx);
	} else {
		prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
		    (prof_tctx_t *)(uintptr_t)1U);
	}
}

JEMALLOC_ALWAYS_INLINE void
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
    bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
    prof_tctx_t *old_tctx) {
	bool sampled, old_sampled, moved;

	cassert(config_prof);
	assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);

	if (prof_active && !updated && ptr != NULL) {
		assert(usize == isalloc(tsd_tsdn(tsd), ptr));
		if (prof_sample_accum_update(tsd, usize, true, NULL)) {
			/*
			 * Don't sample.  The usize passed to prof_alloc_prep()
			 * was larger than what actually got allocated, so a
			 * backtrace was captured for this allocation, even
			 * though its actual usize was insufficient to cross the
			 * sample threshold.
			 */
			prof_alloc_rollback(tsd, tctx, true);
			tctx = (prof_tctx_t *)(uintptr_t)1U;
		}
	}

	sampled = ((uintptr_t)tctx > (uintptr_t)1U);
	old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
	moved = (ptr != old_ptr);

	if (unlikely(sampled)) {
		prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
	} else if (moved) {
		prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
		    (prof_tctx_t *)(uintptr_t)1U);
	} else if (unlikely(old_sampled)) {
		/*
		 * prof_tctx_set() would work for the !moved case as well, but
		 * prof_tctx_reset() is slightly cheaper, and the proper thing
		 * to do here in the presence of explicit knowledge re: moved
		 * state.
		 */
		prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
	} else {
		assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
		    (uintptr_t)1U);
	}

	/*
	 * The prof_free_sampled_object() call must come after the
	 * prof_malloc_sample_object() call, because tctx and old_tctx may be
	 * the same, in which case reversing the call order could cause the tctx
	 * to be prematurely destroyed as a side effect of momentarily zeroed
	 * counters.
	 */
	if (unlikely(old_sampled)) {
		prof_free_sampled_object(tsd, ptr, old_usize, old_tctx);
	}
}

JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
	prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);

	cassert(config_prof);
	assert(usize == isalloc(tsd_tsdn(tsd), ptr));

	if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
		prof_free_sampled_object(tsd, ptr, usize, tctx);
	}
}

#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */