1 | #ifndef JEMALLOC_INTERNAL_ARENA_STATS_H |
2 | #define JEMALLOC_INTERNAL_ARENA_STATS_H |
3 | |
4 | #include "jemalloc/internal/atomic.h" |
5 | #include "jemalloc/internal/mutex.h" |
6 | #include "jemalloc/internal/mutex_prof.h" |
7 | #include "jemalloc/internal/sc.h" |
8 | |
9 | JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS |
10 | |
11 | /* |
12 | * In those architectures that support 64-bit atomics, we use atomic updates for |
13 | * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize |
14 | * externally. |
15 | */ |
16 | #ifdef JEMALLOC_ATOMIC_U64 |
17 | typedef atomic_u64_t arena_stats_u64_t; |
18 | #else |
19 | /* Must hold the arena stats mutex while reading atomically. */ |
20 | typedef uint64_t arena_stats_u64_t; |
21 | #endif |
22 | |
23 | typedef struct arena_stats_large_s arena_stats_large_t; |
24 | struct arena_stats_large_s { |
25 | /* |
26 | * Total number of allocation/deallocation requests served directly by |
27 | * the arena. |
28 | */ |
29 | arena_stats_u64_t nmalloc; |
30 | arena_stats_u64_t ndalloc; |
31 | |
32 | /* |
33 | * Number of allocation requests that correspond to this size class. |
34 | * This includes requests served by tcache, though tcache only |
35 | * periodically merges into this counter. |
36 | */ |
37 | arena_stats_u64_t nrequests; /* Partially derived. */ |
38 | /* |
39 | * Number of tcache fills / flushes for large (similarly, periodically |
40 | * merged). Note that there is no large tcache batch-fill currently |
41 | * (i.e. only fill 1 at a time); however flush may be batched. |
42 | */ |
43 | arena_stats_u64_t nfills; /* Partially derived. */ |
44 | arena_stats_u64_t nflushes; /* Partially derived. */ |
45 | |
46 | /* Current number of allocations of this size class. */ |
47 | size_t curlextents; /* Derived. */ |
48 | }; |
49 | |
50 | typedef struct arena_stats_decay_s arena_stats_decay_t; |
51 | struct arena_stats_decay_s { |
52 | /* Total number of purge sweeps. */ |
53 | arena_stats_u64_t npurge; |
54 | /* Total number of madvise calls made. */ |
55 | arena_stats_u64_t nmadvise; |
56 | /* Total number of pages purged. */ |
57 | arena_stats_u64_t purged; |
58 | }; |
59 | |
60 | typedef struct arena_stats_extents_s arena_stats_extents_t; |
61 | struct arena_stats_extents_s { |
62 | /* |
63 | * Stats for a given index in the range [0, SC_NPSIZES] in an extents_t. |
64 | * We track both bytes and # of extents: two extents in the same bucket |
65 | * may have different sizes if adjacent size classes differ by more than |
66 | * a page, so bytes cannot always be derived from # of extents. |
67 | */ |
68 | atomic_zu_t ndirty; |
69 | atomic_zu_t dirty_bytes; |
70 | atomic_zu_t nmuzzy; |
71 | atomic_zu_t muzzy_bytes; |
72 | atomic_zu_t nretained; |
73 | atomic_zu_t retained_bytes; |
74 | }; |
75 | |
76 | /* |
77 | * Arena stats. Note that fields marked "derived" are not directly maintained |
78 | * within the arena code; rather their values are derived during stats merge |
79 | * requests. |
80 | */ |
81 | typedef struct arena_stats_s arena_stats_t; |
82 | struct arena_stats_s { |
83 | #ifndef JEMALLOC_ATOMIC_U64 |
84 | malloc_mutex_t mtx; |
85 | #endif |
86 | |
87 | /* Number of bytes currently mapped, excluding retained memory. */ |
88 | atomic_zu_t mapped; /* Partially derived. */ |
89 | |
90 | /* |
91 | * Number of unused virtual memory bytes currently retained. Retained |
92 | * bytes are technically mapped (though always decommitted or purged), |
93 | * but they are excluded from the mapped statistic (above). |
94 | */ |
95 | atomic_zu_t retained; /* Derived. */ |
96 | |
97 | /* Number of extent_t structs allocated by base, but not being used. */ |
98 | atomic_zu_t extent_avail; |
99 | |
100 | arena_stats_decay_t decay_dirty; |
101 | arena_stats_decay_t decay_muzzy; |
102 | |
103 | atomic_zu_t base; /* Derived. */ |
104 | atomic_zu_t internal; |
105 | atomic_zu_t resident; /* Derived. */ |
106 | atomic_zu_t metadata_thp; |
107 | |
108 | atomic_zu_t allocated_large; /* Derived. */ |
109 | arena_stats_u64_t nmalloc_large; /* Derived. */ |
110 | arena_stats_u64_t ndalloc_large; /* Derived. */ |
111 | arena_stats_u64_t nfills_large; /* Derived. */ |
112 | arena_stats_u64_t nflushes_large; /* Derived. */ |
113 | arena_stats_u64_t nrequests_large; /* Derived. */ |
114 | |
115 | /* VM space had to be leaked (undocumented). Normally 0. */ |
116 | atomic_zu_t abandoned_vm; |
117 | |
118 | /* Number of bytes cached in tcache associated with this arena. */ |
119 | atomic_zu_t tcache_bytes; /* Derived. */ |
120 | |
121 | mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; |
122 | |
123 | /* One element for each large size class. */ |
124 | arena_stats_large_t lstats[SC_NSIZES - SC_NBINS]; |
125 | |
126 | /* Arena uptime. */ |
127 | nstime_t uptime; |
128 | }; |
129 | |
130 | static inline bool |
131 | arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { |
132 | if (config_debug) { |
133 | for (size_t i = 0; i < sizeof(arena_stats_t); i++) { |
134 | assert(((char *)arena_stats)[i] == 0); |
135 | } |
136 | } |
137 | #ifndef JEMALLOC_ATOMIC_U64 |
138 | if (malloc_mutex_init(&arena_stats->mtx, "arena_stats" , |
139 | WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { |
140 | return true; |
141 | } |
142 | #endif |
143 | /* Memory is zeroed, so there is no need to clear stats. */ |
144 | return false; |
145 | } |
146 | |
147 | static inline void |
148 | arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { |
149 | #ifndef JEMALLOC_ATOMIC_U64 |
150 | malloc_mutex_lock(tsdn, &arena_stats->mtx); |
151 | #endif |
152 | } |
153 | |
154 | static inline void |
155 | arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { |
156 | #ifndef JEMALLOC_ATOMIC_U64 |
157 | malloc_mutex_unlock(tsdn, &arena_stats->mtx); |
158 | #endif |
159 | } |
160 | |
161 | static inline uint64_t |
162 | arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, |
163 | arena_stats_u64_t *p) { |
164 | #ifdef JEMALLOC_ATOMIC_U64 |
165 | return atomic_load_u64(p, ATOMIC_RELAXED); |
166 | #else |
167 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
168 | return *p; |
169 | #endif |
170 | } |
171 | |
172 | static inline void |
173 | arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, |
174 | arena_stats_u64_t *p, uint64_t x) { |
175 | #ifdef JEMALLOC_ATOMIC_U64 |
176 | atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); |
177 | #else |
178 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
179 | *p += x; |
180 | #endif |
181 | } |
182 | |
183 | static inline void |
184 | arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, |
185 | arena_stats_u64_t *p, uint64_t x) { |
186 | #ifdef JEMALLOC_ATOMIC_U64 |
187 | uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); |
188 | assert(r - x <= r); |
189 | #else |
190 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
191 | *p -= x; |
192 | assert(*p + x >= *p); |
193 | #endif |
194 | } |
195 | |
196 | /* |
197 | * Non-atomically sets *dst += src. *dst needs external synchronization. |
198 | * This lets us avoid the cost of a fetch_add when its unnecessary (note that |
199 | * the types here are atomic). |
200 | */ |
201 | static inline void |
202 | arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { |
203 | #ifdef JEMALLOC_ATOMIC_U64 |
204 | uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); |
205 | atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); |
206 | #else |
207 | *dst += src; |
208 | #endif |
209 | } |
210 | |
211 | static inline size_t |
212 | arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, |
213 | atomic_zu_t *p) { |
214 | #ifdef JEMALLOC_ATOMIC_U64 |
215 | return atomic_load_zu(p, ATOMIC_RELAXED); |
216 | #else |
217 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
218 | return atomic_load_zu(p, ATOMIC_RELAXED); |
219 | #endif |
220 | } |
221 | |
222 | static inline void |
223 | arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, |
224 | atomic_zu_t *p, size_t x) { |
225 | #ifdef JEMALLOC_ATOMIC_U64 |
226 | atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); |
227 | #else |
228 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
229 | size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); |
230 | atomic_store_zu(p, cur + x, ATOMIC_RELAXED); |
231 | #endif |
232 | } |
233 | |
234 | static inline void |
235 | arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, |
236 | atomic_zu_t *p, size_t x) { |
237 | #ifdef JEMALLOC_ATOMIC_U64 |
238 | size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); |
239 | assert(r - x <= r); |
240 | #else |
241 | malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); |
242 | size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); |
243 | atomic_store_zu(p, cur - x, ATOMIC_RELAXED); |
244 | #endif |
245 | } |
246 | |
247 | /* Like the _u64 variant, needs an externally synchronized *dst. */ |
248 | static inline void |
249 | arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { |
250 | size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); |
251 | atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); |
252 | } |
253 | |
254 | static inline void |
255 | arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, |
256 | szind_t szind, uint64_t nrequests) { |
257 | arena_stats_lock(tsdn, arena_stats); |
258 | arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS]; |
259 | arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests); |
260 | arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1); |
261 | arena_stats_unlock(tsdn, arena_stats); |
262 | } |
263 | |
264 | static inline void |
265 | arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { |
266 | arena_stats_lock(tsdn, arena_stats); |
267 | arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); |
268 | arena_stats_unlock(tsdn, arena_stats); |
269 | } |
270 | |
271 | #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ |
272 | |