summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include/jemalloc/internal/arena_stats.h
blob: 23949ed926165786e53d95c57d83b150e17f94f1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
#ifndef JEMALLOC_INTERNAL_ARENA_STATS_H
#define JEMALLOC_INTERNAL_ARENA_STATS_H

#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/sc.h"

JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS

/*
 * In those architectures that support 64-bit atomics, we use atomic updates for
 * our 64-bit values.  Otherwise, we use a plain uint64_t and synchronize
 * externally.
 */
#ifdef JEMALLOC_ATOMIC_U64
typedef atomic_u64_t arena_stats_u64_t;
#else
/* Must hold the arena stats mutex while reading atomically. */
typedef uint64_t arena_stats_u64_t;
#endif

typedef struct arena_stats_large_s arena_stats_large_t;
struct arena_stats_large_s {
	/*
	 * Total number of allocation/deallocation requests served directly by
	 * the arena.
	 */
	arena_stats_u64_t	nmalloc;
	arena_stats_u64_t	ndalloc;

	/*
	 * Number of allocation requests that correspond to this size class.
	 * This includes requests served by tcache, though tcache only
	 * periodically merges into this counter.
	 */
	arena_stats_u64_t	nrequests; /* Partially derived. */
	/*
	 * Number of tcache fills / flushes for large (similarly, periodically
	 * merged).  Note that there is no large tcache batch-fill currently
	 * (i.e. only fill 1 at a time); however flush may be batched.
	 */
	arena_stats_u64_t	nfills; /* Partially derived. */
	arena_stats_u64_t	nflushes; /* Partially derived. */

	/* Current number of allocations of this size class. */
	size_t		curlextents; /* Derived. */
};

typedef struct arena_stats_decay_s arena_stats_decay_t;
struct arena_stats_decay_s {
	/* Total number of purge sweeps. */
	arena_stats_u64_t	npurge;
	/* Total number of madvise calls made. */
	arena_stats_u64_t	nmadvise;
	/* Total number of pages purged. */
	arena_stats_u64_t	purged;
};

typedef struct arena_stats_extents_s arena_stats_extents_t;
struct arena_stats_extents_s {
	/*
	 * Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
	 * We track both bytes and # of extents: two extents in the same bucket
	 * may have different sizes if adjacent size classes differ by more than
	 * a page, so bytes cannot always be derived from # of extents.
	 */
	atomic_zu_t ndirty;
	atomic_zu_t dirty_bytes;
	atomic_zu_t nmuzzy;
	atomic_zu_t muzzy_bytes;
	atomic_zu_t nretained;
	atomic_zu_t retained_bytes;
};

/*
 * Arena stats.  Note that fields marked "derived" are not directly maintained
 * within the arena code; rather their values are derived during stats merge
 * requests.
 */
typedef struct arena_stats_s arena_stats_t;
struct arena_stats_s {
#ifndef JEMALLOC_ATOMIC_U64
	malloc_mutex_t		mtx;
#endif

	/* Number of bytes currently mapped, excluding retained memory. */
	atomic_zu_t		mapped; /* Partially derived. */

	/*
	 * Number of unused virtual memory bytes currently retained.  Retained
	 * bytes are technically mapped (though always decommitted or purged),
	 * but they are excluded from the mapped statistic (above).
	 */
	atomic_zu_t		retained; /* Derived. */

	/* Number of extent_t structs allocated by base, but not being used. */
	atomic_zu_t		extent_avail;

	arena_stats_decay_t	decay_dirty;
	arena_stats_decay_t	decay_muzzy;

	atomic_zu_t		base; /* Derived. */
	atomic_zu_t		internal;
	atomic_zu_t		resident; /* Derived. */
	atomic_zu_t		metadata_thp;

	atomic_zu_t		allocated_large; /* Derived. */
	arena_stats_u64_t	nmalloc_large; /* Derived. */
	arena_stats_u64_t	ndalloc_large; /* Derived. */
	arena_stats_u64_t	nfills_large; /* Derived. */
	arena_stats_u64_t	nflushes_large; /* Derived. */
	arena_stats_u64_t	nrequests_large; /* Derived. */

	/* VM space had to be leaked (undocumented).  Normally 0. */
	atomic_zu_t		abandoned_vm;

	/* Number of bytes cached in tcache associated with this arena. */
	atomic_zu_t		tcache_bytes; /* Derived. */

	mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];

	/* One element for each large size class. */
	arena_stats_large_t	lstats[SC_NSIZES - SC_NBINS];

	/* Arena uptime. */
	nstime_t		uptime;
};

static inline bool
arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
	if (config_debug) {
		for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
			assert(((char *)arena_stats)[i] == 0);
		}
	}
#ifndef JEMALLOC_ATOMIC_U64
	if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
	    WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
		return true;
	}
#endif
	/* Memory is zeroed, so there is no need to clear stats. */
	return false;
}

static inline void
arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
	malloc_mutex_lock(tsdn, &arena_stats->mtx);
#endif
}

static inline void
arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
#ifndef JEMALLOC_ATOMIC_U64
	malloc_mutex_unlock(tsdn, &arena_stats->mtx);
#endif
}

static inline uint64_t
arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
    arena_stats_u64_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
	return atomic_load_u64(p, ATOMIC_RELAXED);
#else
	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
	return *p;
#endif
}

static inline void
arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
    arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
	atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
#else
	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
	*p += x;
#endif
}

static inline void
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
    arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64
	uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
	assert(r - x <= r);
#else
	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
	*p -= x;
	assert(*p + x >= *p);
#endif
}

/*
 * Non-atomically sets *dst += src.  *dst needs external synchronization.
 * This lets us avoid the cost of a fetch_add when its unnecessary (note that
 * the types here are atomic).
 */
static inline void
arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
#ifdef JEMALLOC_ATOMIC_U64
	uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
	atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
#else
	*dst += src;
#endif
}

static inline size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
    atomic_zu_t *p) {
#ifdef JEMALLOC_ATOMIC_U64
	return atomic_load_zu(p, ATOMIC_RELAXED);
#else
	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
	return atomic_load_zu(p, ATOMIC_RELAXED);
#endif
}

static inline void
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
    atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
	atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
#else
	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
	size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
	atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
#endif
}

static inline void
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
    atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64
	size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
	assert(r - x <= r);
#else
	malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
	size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
	atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
#endif
}

/* Like the _u64 variant, needs an externally synchronized *dst. */
static inline void
arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
	size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
	atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
}

static inline void
arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
    szind_t szind, uint64_t nrequests) {
	arena_stats_lock(tsdn, arena_stats);
	arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
	arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests);
	arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1);
	arena_stats_unlock(tsdn, arena_stats);
}

static inline void
arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
	arena_stats_lock(tsdn, arena_stats);
	arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
	arena_stats_unlock(tsdn, arena_stats);
}

#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */