1#include "jemalloc/internal/jemalloc_preamble.h"
2#include "jemalloc/internal/jemalloc_internal_includes.h"
3
4#include "jemalloc/internal/assert.h"
5#include "jemalloc/internal/ctl.h"
6#include "jemalloc/internal/extent_dss.h"
7#include "jemalloc/internal/extent_mmap.h"
8#include "jemalloc/internal/inspect.h"
9#include "jemalloc/internal/mutex.h"
10#include "jemalloc/internal/nstime.h"
11#include "jemalloc/internal/peak_event.h"
12#include "jemalloc/internal/prof_data.h"
13#include "jemalloc/internal/prof_log.h"
14#include "jemalloc/internal/prof_recent.h"
15#include "jemalloc/internal/prof_stats.h"
16#include "jemalloc/internal/prof_sys.h"
17#include "jemalloc/internal/safety_check.h"
18#include "jemalloc/internal/sc.h"
19#include "jemalloc/internal/util.h"
20
21/******************************************************************************/
22/* Data. */
23
24/*
25 * ctl_mtx protects the following:
26 * - ctl_stats->*
27 */
28static malloc_mutex_t ctl_mtx;
29static bool ctl_initialized;
30static ctl_stats_t *ctl_stats;
31static ctl_arenas_t *ctl_arenas;
32
33/******************************************************************************/
34/* Helpers for named and indexed nodes. */
35
36static const ctl_named_node_t *
37ctl_named_node(const ctl_node_t *node) {
38 return ((node->named) ? (const ctl_named_node_t *)node : NULL);
39}
40
41static const ctl_named_node_t *
42ctl_named_children(const ctl_named_node_t *node, size_t index) {
43 const ctl_named_node_t *children = ctl_named_node(node->children);
44
45 return (children ? &children[index] : NULL);
46}
47
48static const ctl_indexed_node_t *
49ctl_indexed_node(const ctl_node_t *node) {
50 return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
51}
52
53/******************************************************************************/
54/* Function prototypes for non-inline static functions. */
55
56#define CTL_PROTO(n) \
57static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
58 void *oldp, size_t *oldlenp, void *newp, size_t newlen);
59
60#define INDEX_PROTO(n) \
61static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
62 const size_t *mib, size_t miblen, size_t i);
63
64CTL_PROTO(version)
65CTL_PROTO(epoch)
66CTL_PROTO(background_thread)
67CTL_PROTO(max_background_threads)
68CTL_PROTO(thread_tcache_enabled)
69CTL_PROTO(thread_tcache_flush)
70CTL_PROTO(thread_peak_read)
71CTL_PROTO(thread_peak_reset)
72CTL_PROTO(thread_prof_name)
73CTL_PROTO(thread_prof_active)
74CTL_PROTO(thread_arena)
75CTL_PROTO(thread_allocated)
76CTL_PROTO(thread_allocatedp)
77CTL_PROTO(thread_deallocated)
78CTL_PROTO(thread_deallocatedp)
79CTL_PROTO(thread_idle)
80CTL_PROTO(config_cache_oblivious)
81CTL_PROTO(config_debug)
82CTL_PROTO(config_fill)
83CTL_PROTO(config_lazy_lock)
84CTL_PROTO(config_malloc_conf)
85CTL_PROTO(config_opt_safety_checks)
86CTL_PROTO(config_prof)
87CTL_PROTO(config_prof_libgcc)
88CTL_PROTO(config_prof_libunwind)
89CTL_PROTO(config_stats)
90CTL_PROTO(config_utrace)
91CTL_PROTO(config_xmalloc)
92CTL_PROTO(opt_abort)
93CTL_PROTO(opt_abort_conf)
94CTL_PROTO(opt_cache_oblivious)
95CTL_PROTO(opt_debug_double_free_max_scan)
96CTL_PROTO(opt_trust_madvise)
97CTL_PROTO(opt_confirm_conf)
98CTL_PROTO(opt_hpa)
99CTL_PROTO(opt_hpa_slab_max_alloc)
100CTL_PROTO(opt_hpa_hugification_threshold)
101CTL_PROTO(opt_hpa_hugify_delay_ms)
102CTL_PROTO(opt_hpa_min_purge_interval_ms)
103CTL_PROTO(opt_hpa_dirty_mult)
104CTL_PROTO(opt_hpa_sec_nshards)
105CTL_PROTO(opt_hpa_sec_max_alloc)
106CTL_PROTO(opt_hpa_sec_max_bytes)
107CTL_PROTO(opt_hpa_sec_bytes_after_flush)
108CTL_PROTO(opt_hpa_sec_batch_fill_extra)
109CTL_PROTO(opt_metadata_thp)
110CTL_PROTO(opt_retain)
111CTL_PROTO(opt_dss)
112CTL_PROTO(opt_narenas)
113CTL_PROTO(opt_percpu_arena)
114CTL_PROTO(opt_oversize_threshold)
115CTL_PROTO(opt_background_thread)
116CTL_PROTO(opt_mutex_max_spin)
117CTL_PROTO(opt_max_background_threads)
118CTL_PROTO(opt_dirty_decay_ms)
119CTL_PROTO(opt_muzzy_decay_ms)
120CTL_PROTO(opt_stats_print)
121CTL_PROTO(opt_stats_print_opts)
122CTL_PROTO(opt_stats_interval)
123CTL_PROTO(opt_stats_interval_opts)
124CTL_PROTO(opt_junk)
125CTL_PROTO(opt_zero)
126CTL_PROTO(opt_utrace)
127CTL_PROTO(opt_xmalloc)
128CTL_PROTO(opt_experimental_infallible_new)
129CTL_PROTO(opt_tcache)
130CTL_PROTO(opt_tcache_max)
131CTL_PROTO(opt_tcache_nslots_small_min)
132CTL_PROTO(opt_tcache_nslots_small_max)
133CTL_PROTO(opt_tcache_nslots_large)
134CTL_PROTO(opt_lg_tcache_nslots_mul)
135CTL_PROTO(opt_tcache_gc_incr_bytes)
136CTL_PROTO(opt_tcache_gc_delay_bytes)
137CTL_PROTO(opt_lg_tcache_flush_small_div)
138CTL_PROTO(opt_lg_tcache_flush_large_div)
139CTL_PROTO(opt_thp)
140CTL_PROTO(opt_lg_extent_max_active_fit)
141CTL_PROTO(opt_prof)
142CTL_PROTO(opt_prof_prefix)
143CTL_PROTO(opt_prof_active)
144CTL_PROTO(opt_prof_thread_active_init)
145CTL_PROTO(opt_lg_prof_sample)
146CTL_PROTO(opt_lg_prof_interval)
147CTL_PROTO(opt_prof_gdump)
148CTL_PROTO(opt_prof_final)
149CTL_PROTO(opt_prof_leak)
150CTL_PROTO(opt_prof_leak_error)
151CTL_PROTO(opt_prof_accum)
152CTL_PROTO(opt_prof_recent_alloc_max)
153CTL_PROTO(opt_prof_stats)
154CTL_PROTO(opt_prof_sys_thread_name)
155CTL_PROTO(opt_prof_time_res)
156CTL_PROTO(opt_lg_san_uaf_align)
157CTL_PROTO(opt_zero_realloc)
158CTL_PROTO(tcache_create)
159CTL_PROTO(tcache_flush)
160CTL_PROTO(tcache_destroy)
161CTL_PROTO(arena_i_initialized)
162CTL_PROTO(arena_i_decay)
163CTL_PROTO(arena_i_purge)
164CTL_PROTO(arena_i_reset)
165CTL_PROTO(arena_i_destroy)
166CTL_PROTO(arena_i_dss)
167CTL_PROTO(arena_i_oversize_threshold)
168CTL_PROTO(arena_i_dirty_decay_ms)
169CTL_PROTO(arena_i_muzzy_decay_ms)
170CTL_PROTO(arena_i_extent_hooks)
171CTL_PROTO(arena_i_retain_grow_limit)
172INDEX_PROTO(arena_i)
173CTL_PROTO(arenas_bin_i_size)
174CTL_PROTO(arenas_bin_i_nregs)
175CTL_PROTO(arenas_bin_i_slab_size)
176CTL_PROTO(arenas_bin_i_nshards)
177INDEX_PROTO(arenas_bin_i)
178CTL_PROTO(arenas_lextent_i_size)
179INDEX_PROTO(arenas_lextent_i)
180CTL_PROTO(arenas_narenas)
181CTL_PROTO(arenas_dirty_decay_ms)
182CTL_PROTO(arenas_muzzy_decay_ms)
183CTL_PROTO(arenas_quantum)
184CTL_PROTO(arenas_page)
185CTL_PROTO(arenas_tcache_max)
186CTL_PROTO(arenas_nbins)
187CTL_PROTO(arenas_nhbins)
188CTL_PROTO(arenas_nlextents)
189CTL_PROTO(arenas_create)
190CTL_PROTO(arenas_lookup)
191CTL_PROTO(prof_thread_active_init)
192CTL_PROTO(prof_active)
193CTL_PROTO(prof_dump)
194CTL_PROTO(prof_gdump)
195CTL_PROTO(prof_prefix)
196CTL_PROTO(prof_reset)
197CTL_PROTO(prof_interval)
198CTL_PROTO(lg_prof_sample)
199CTL_PROTO(prof_log_start)
200CTL_PROTO(prof_log_stop)
201CTL_PROTO(prof_stats_bins_i_live)
202CTL_PROTO(prof_stats_bins_i_accum)
203INDEX_PROTO(prof_stats_bins_i)
204CTL_PROTO(prof_stats_lextents_i_live)
205CTL_PROTO(prof_stats_lextents_i_accum)
206INDEX_PROTO(prof_stats_lextents_i)
207CTL_PROTO(stats_arenas_i_small_allocated)
208CTL_PROTO(stats_arenas_i_small_nmalloc)
209CTL_PROTO(stats_arenas_i_small_ndalloc)
210CTL_PROTO(stats_arenas_i_small_nrequests)
211CTL_PROTO(stats_arenas_i_small_nfills)
212CTL_PROTO(stats_arenas_i_small_nflushes)
213CTL_PROTO(stats_arenas_i_large_allocated)
214CTL_PROTO(stats_arenas_i_large_nmalloc)
215CTL_PROTO(stats_arenas_i_large_ndalloc)
216CTL_PROTO(stats_arenas_i_large_nrequests)
217CTL_PROTO(stats_arenas_i_large_nfills)
218CTL_PROTO(stats_arenas_i_large_nflushes)
219CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
220CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
221CTL_PROTO(stats_arenas_i_bins_j_nrequests)
222CTL_PROTO(stats_arenas_i_bins_j_curregs)
223CTL_PROTO(stats_arenas_i_bins_j_nfills)
224CTL_PROTO(stats_arenas_i_bins_j_nflushes)
225CTL_PROTO(stats_arenas_i_bins_j_nslabs)
226CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
227CTL_PROTO(stats_arenas_i_bins_j_curslabs)
228CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs)
229INDEX_PROTO(stats_arenas_i_bins_j)
230CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
231CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
232CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
233CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
234INDEX_PROTO(stats_arenas_i_lextents_j)
235CTL_PROTO(stats_arenas_i_extents_j_ndirty)
236CTL_PROTO(stats_arenas_i_extents_j_nmuzzy)
237CTL_PROTO(stats_arenas_i_extents_j_nretained)
238CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
239CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
240CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
241INDEX_PROTO(stats_arenas_i_extents_j)
242CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
243CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
244CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
245CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
246
247/* We have a set of stats for full slabs. */
248CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)
249CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
250CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)
251CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
252CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)
253CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)
254
255/* A parallel set for the empty slabs. */
256CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)
257CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)
258CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)
259CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)
260CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)
261CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)
262
263/*
264 * And one for the slabs that are neither empty nor full, but indexed by how
265 * full they are.
266 */
267CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)
268CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)
269CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)
270CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)
271CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
272CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
273
274INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
275CTL_PROTO(stats_arenas_i_nthreads)
276CTL_PROTO(stats_arenas_i_uptime)
277CTL_PROTO(stats_arenas_i_dss)
278CTL_PROTO(stats_arenas_i_dirty_decay_ms)
279CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
280CTL_PROTO(stats_arenas_i_pactive)
281CTL_PROTO(stats_arenas_i_pdirty)
282CTL_PROTO(stats_arenas_i_pmuzzy)
283CTL_PROTO(stats_arenas_i_mapped)
284CTL_PROTO(stats_arenas_i_retained)
285CTL_PROTO(stats_arenas_i_extent_avail)
286CTL_PROTO(stats_arenas_i_dirty_npurge)
287CTL_PROTO(stats_arenas_i_dirty_nmadvise)
288CTL_PROTO(stats_arenas_i_dirty_purged)
289CTL_PROTO(stats_arenas_i_muzzy_npurge)
290CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
291CTL_PROTO(stats_arenas_i_muzzy_purged)
292CTL_PROTO(stats_arenas_i_base)
293CTL_PROTO(stats_arenas_i_internal)
294CTL_PROTO(stats_arenas_i_metadata_thp)
295CTL_PROTO(stats_arenas_i_tcache_bytes)
296CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
297CTL_PROTO(stats_arenas_i_resident)
298CTL_PROTO(stats_arenas_i_abandoned_vm)
299CTL_PROTO(stats_arenas_i_hpa_sec_bytes)
300INDEX_PROTO(stats_arenas_i)
301CTL_PROTO(stats_allocated)
302CTL_PROTO(stats_active)
303CTL_PROTO(stats_background_thread_num_threads)
304CTL_PROTO(stats_background_thread_num_runs)
305CTL_PROTO(stats_background_thread_run_interval)
306CTL_PROTO(stats_metadata)
307CTL_PROTO(stats_metadata_thp)
308CTL_PROTO(stats_resident)
309CTL_PROTO(stats_mapped)
310CTL_PROTO(stats_retained)
311CTL_PROTO(stats_zero_reallocs)
312CTL_PROTO(experimental_hooks_install)
313CTL_PROTO(experimental_hooks_remove)
314CTL_PROTO(experimental_hooks_prof_backtrace)
315CTL_PROTO(experimental_hooks_prof_dump)
316CTL_PROTO(experimental_hooks_safety_check_abort)
317CTL_PROTO(experimental_thread_activity_callback)
318CTL_PROTO(experimental_utilization_query)
319CTL_PROTO(experimental_utilization_batch_query)
320CTL_PROTO(experimental_arenas_i_pactivep)
321INDEX_PROTO(experimental_arenas_i)
322CTL_PROTO(experimental_prof_recent_alloc_max)
323CTL_PROTO(experimental_prof_recent_alloc_dump)
324CTL_PROTO(experimental_batch_alloc)
325CTL_PROTO(experimental_arenas_create_ext)
326
327#define MUTEX_STATS_CTL_PROTO_GEN(n) \
328CTL_PROTO(stats_##n##_num_ops) \
329CTL_PROTO(stats_##n##_num_wait) \
330CTL_PROTO(stats_##n##_num_spin_acq) \
331CTL_PROTO(stats_##n##_num_owner_switch) \
332CTL_PROTO(stats_##n##_total_wait_time) \
333CTL_PROTO(stats_##n##_max_wait_time) \
334CTL_PROTO(stats_##n##_max_num_thds)
335
336/* Global mutexes. */
337#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
338MUTEX_PROF_GLOBAL_MUTEXES
339#undef OP
340
341/* Per arena mutexes. */
342#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
343MUTEX_PROF_ARENA_MUTEXES
344#undef OP
345
346/* Arena bin mutexes. */
347MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
348#undef MUTEX_STATS_CTL_PROTO_GEN
349
350CTL_PROTO(stats_mutexes_reset)
351
352/******************************************************************************/
353/* mallctl tree. */
354
355#define NAME(n) {true}, n
356#define CHILD(t, c) \
357 sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
358 (ctl_node_t *)c##_node, \
359 NULL
360#define CTL(c) 0, NULL, c##_ctl
361
362/*
363 * Only handles internal indexed nodes, since there are currently no external
364 * ones.
365 */
366#define INDEX(i) {false}, i##_index
367
368static const ctl_named_node_t thread_tcache_node[] = {
369 {NAME("enabled"), CTL(thread_tcache_enabled)},
370 {NAME("flush"), CTL(thread_tcache_flush)}
371};
372
373static const ctl_named_node_t thread_peak_node[] = {
374 {NAME("read"), CTL(thread_peak_read)},
375 {NAME("reset"), CTL(thread_peak_reset)},
376};
377
378static const ctl_named_node_t thread_prof_node[] = {
379 {NAME("name"), CTL(thread_prof_name)},
380 {NAME("active"), CTL(thread_prof_active)}
381};
382
383static const ctl_named_node_t thread_node[] = {
384 {NAME("arena"), CTL(thread_arena)},
385 {NAME("allocated"), CTL(thread_allocated)},
386 {NAME("allocatedp"), CTL(thread_allocatedp)},
387 {NAME("deallocated"), CTL(thread_deallocated)},
388 {NAME("deallocatedp"), CTL(thread_deallocatedp)},
389 {NAME("tcache"), CHILD(named, thread_tcache)},
390 {NAME("peak"), CHILD(named, thread_peak)},
391 {NAME("prof"), CHILD(named, thread_prof)},
392 {NAME("idle"), CTL(thread_idle)}
393};
394
395static const ctl_named_node_t config_node[] = {
396 {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
397 {NAME("debug"), CTL(config_debug)},
398 {NAME("fill"), CTL(config_fill)},
399 {NAME("lazy_lock"), CTL(config_lazy_lock)},
400 {NAME("malloc_conf"), CTL(config_malloc_conf)},
401 {NAME("opt_safety_checks"), CTL(config_opt_safety_checks)},
402 {NAME("prof"), CTL(config_prof)},
403 {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
404 {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
405 {NAME("stats"), CTL(config_stats)},
406 {NAME("utrace"), CTL(config_utrace)},
407 {NAME("xmalloc"), CTL(config_xmalloc)}
408};
409
410static const ctl_named_node_t opt_node[] = {
411 {NAME("abort"), CTL(opt_abort)},
412 {NAME("abort_conf"), CTL(opt_abort_conf)},
413 {NAME("cache_oblivious"), CTL(opt_cache_oblivious)},
414 {NAME("trust_madvise"), CTL(opt_trust_madvise)},
415 {NAME("confirm_conf"), CTL(opt_confirm_conf)},
416 {NAME("hpa"), CTL(opt_hpa)},
417 {NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
418 {NAME("hpa_hugification_threshold"),
419 CTL(opt_hpa_hugification_threshold)},
420 {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)},
421 {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)},
422 {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)},
423 {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)},
424 {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)},
425 {NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)},
426 {NAME("hpa_sec_bytes_after_flush"),
427 CTL(opt_hpa_sec_bytes_after_flush)},
428 {NAME("hpa_sec_batch_fill_extra"),
429 CTL(opt_hpa_sec_batch_fill_extra)},
430 {NAME("metadata_thp"), CTL(opt_metadata_thp)},
431 {NAME("retain"), CTL(opt_retain)},
432 {NAME("dss"), CTL(opt_dss)},
433 {NAME("narenas"), CTL(opt_narenas)},
434 {NAME("percpu_arena"), CTL(opt_percpu_arena)},
435 {NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
436 {NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)},
437 {NAME("background_thread"), CTL(opt_background_thread)},
438 {NAME("max_background_threads"), CTL(opt_max_background_threads)},
439 {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
440 {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
441 {NAME("stats_print"), CTL(opt_stats_print)},
442 {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
443 {NAME("stats_interval"), CTL(opt_stats_interval)},
444 {NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)},
445 {NAME("junk"), CTL(opt_junk)},
446 {NAME("zero"), CTL(opt_zero)},
447 {NAME("utrace"), CTL(opt_utrace)},
448 {NAME("xmalloc"), CTL(opt_xmalloc)},
449 {NAME("experimental_infallible_new"),
450 CTL(opt_experimental_infallible_new)},
451 {NAME("tcache"), CTL(opt_tcache)},
452 {NAME("tcache_max"), CTL(opt_tcache_max)},
453 {NAME("tcache_nslots_small_min"),
454 CTL(opt_tcache_nslots_small_min)},
455 {NAME("tcache_nslots_small_max"),
456 CTL(opt_tcache_nslots_small_max)},
457 {NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)},
458 {NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)},
459 {NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)},
460 {NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)},
461 {NAME("lg_tcache_flush_small_div"),
462 CTL(opt_lg_tcache_flush_small_div)},
463 {NAME("lg_tcache_flush_large_div"),
464 CTL(opt_lg_tcache_flush_large_div)},
465 {NAME("thp"), CTL(opt_thp)},
466 {NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
467 {NAME("prof"), CTL(opt_prof)},
468 {NAME("prof_prefix"), CTL(opt_prof_prefix)},
469 {NAME("prof_active"), CTL(opt_prof_active)},
470 {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
471 {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
472 {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
473 {NAME("prof_gdump"), CTL(opt_prof_gdump)},
474 {NAME("prof_final"), CTL(opt_prof_final)},
475 {NAME("prof_leak"), CTL(opt_prof_leak)},
476 {NAME("prof_leak_error"), CTL(opt_prof_leak_error)},
477 {NAME("prof_accum"), CTL(opt_prof_accum)},
478 {NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)},
479 {NAME("prof_stats"), CTL(opt_prof_stats)},
480 {NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)},
481 {NAME("prof_time_resolution"), CTL(opt_prof_time_res)},
482 {NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)},
483 {NAME("zero_realloc"), CTL(opt_zero_realloc)},
484 {NAME("debug_double_free_max_scan"),
485 CTL(opt_debug_double_free_max_scan)}
486};
487
488static const ctl_named_node_t tcache_node[] = {
489 {NAME("create"), CTL(tcache_create)},
490 {NAME("flush"), CTL(tcache_flush)},
491 {NAME("destroy"), CTL(tcache_destroy)}
492};
493
494static const ctl_named_node_t arena_i_node[] = {
495 {NAME("initialized"), CTL(arena_i_initialized)},
496 {NAME("decay"), CTL(arena_i_decay)},
497 {NAME("purge"), CTL(arena_i_purge)},
498 {NAME("reset"), CTL(arena_i_reset)},
499 {NAME("destroy"), CTL(arena_i_destroy)},
500 {NAME("dss"), CTL(arena_i_dss)},
501 /*
502 * Undocumented for now, since we anticipate an arena API in flux after
503 * we cut the last 5-series release.
504 */
505 {NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)},
506 {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
507 {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
508 {NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
509 {NAME("retain_grow_limit"), CTL(arena_i_retain_grow_limit)}
510};
511static const ctl_named_node_t super_arena_i_node[] = {
512 {NAME(""), CHILD(named, arena_i)}
513};
514
515static const ctl_indexed_node_t arena_node[] = {
516 {INDEX(arena_i)}
517};
518
519static const ctl_named_node_t arenas_bin_i_node[] = {
520 {NAME("size"), CTL(arenas_bin_i_size)},
521 {NAME("nregs"), CTL(arenas_bin_i_nregs)},
522 {NAME("slab_size"), CTL(arenas_bin_i_slab_size)},
523 {NAME("nshards"), CTL(arenas_bin_i_nshards)}
524};
525static const ctl_named_node_t super_arenas_bin_i_node[] = {
526 {NAME(""), CHILD(named, arenas_bin_i)}
527};
528
529static const ctl_indexed_node_t arenas_bin_node[] = {
530 {INDEX(arenas_bin_i)}
531};
532
533static const ctl_named_node_t arenas_lextent_i_node[] = {
534 {NAME("size"), CTL(arenas_lextent_i_size)}
535};
536static const ctl_named_node_t super_arenas_lextent_i_node[] = {
537 {NAME(""), CHILD(named, arenas_lextent_i)}
538};
539
540static const ctl_indexed_node_t arenas_lextent_node[] = {
541 {INDEX(arenas_lextent_i)}
542};
543
544static const ctl_named_node_t arenas_node[] = {
545 {NAME("narenas"), CTL(arenas_narenas)},
546 {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
547 {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
548 {NAME("quantum"), CTL(arenas_quantum)},
549 {NAME("page"), CTL(arenas_page)},
550 {NAME("tcache_max"), CTL(arenas_tcache_max)},
551 {NAME("nbins"), CTL(arenas_nbins)},
552 {NAME("nhbins"), CTL(arenas_nhbins)},
553 {NAME("bin"), CHILD(indexed, arenas_bin)},
554 {NAME("nlextents"), CTL(arenas_nlextents)},
555 {NAME("lextent"), CHILD(indexed, arenas_lextent)},
556 {NAME("create"), CTL(arenas_create)},
557 {NAME("lookup"), CTL(arenas_lookup)}
558};
559
560static const ctl_named_node_t prof_stats_bins_i_node[] = {
561 {NAME("live"), CTL(prof_stats_bins_i_live)},
562 {NAME("accum"), CTL(prof_stats_bins_i_accum)}
563};
564
565static const ctl_named_node_t super_prof_stats_bins_i_node[] = {
566 {NAME(""), CHILD(named, prof_stats_bins_i)}
567};
568
569static const ctl_indexed_node_t prof_stats_bins_node[] = {
570 {INDEX(prof_stats_bins_i)}
571};
572
573static const ctl_named_node_t prof_stats_lextents_i_node[] = {
574 {NAME("live"), CTL(prof_stats_lextents_i_live)},
575 {NAME("accum"), CTL(prof_stats_lextents_i_accum)}
576};
577
578static const ctl_named_node_t super_prof_stats_lextents_i_node[] = {
579 {NAME(""), CHILD(named, prof_stats_lextents_i)}
580};
581
582static const ctl_indexed_node_t prof_stats_lextents_node[] = {
583 {INDEX(prof_stats_lextents_i)}
584};
585
586static const ctl_named_node_t prof_stats_node[] = {
587 {NAME("bins"), CHILD(indexed, prof_stats_bins)},
588 {NAME("lextents"), CHILD(indexed, prof_stats_lextents)},
589};
590
591static const ctl_named_node_t prof_node[] = {
592 {NAME("thread_active_init"), CTL(prof_thread_active_init)},
593 {NAME("active"), CTL(prof_active)},
594 {NAME("dump"), CTL(prof_dump)},
595 {NAME("gdump"), CTL(prof_gdump)},
596 {NAME("prefix"), CTL(prof_prefix)},
597 {NAME("reset"), CTL(prof_reset)},
598 {NAME("interval"), CTL(prof_interval)},
599 {NAME("lg_sample"), CTL(lg_prof_sample)},
600 {NAME("log_start"), CTL(prof_log_start)},
601 {NAME("log_stop"), CTL(prof_log_stop)},
602 {NAME("stats"), CHILD(named, prof_stats)}
603};
604
605static const ctl_named_node_t stats_arenas_i_small_node[] = {
606 {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
607 {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
608 {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
609 {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)},
610 {NAME("nfills"), CTL(stats_arenas_i_small_nfills)},
611 {NAME("nflushes"), CTL(stats_arenas_i_small_nflushes)}
612};
613
614static const ctl_named_node_t stats_arenas_i_large_node[] = {
615 {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
616 {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
617 {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
618 {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)},
619 {NAME("nfills"), CTL(stats_arenas_i_large_nfills)},
620 {NAME("nflushes"), CTL(stats_arenas_i_large_nflushes)}
621};
622
623#define MUTEX_PROF_DATA_NODE(prefix) \
624static const ctl_named_node_t stats_##prefix##_node[] = { \
625 {NAME("num_ops"), \
626 CTL(stats_##prefix##_num_ops)}, \
627 {NAME("num_wait"), \
628 CTL(stats_##prefix##_num_wait)}, \
629 {NAME("num_spin_acq"), \
630 CTL(stats_##prefix##_num_spin_acq)}, \
631 {NAME("num_owner_switch"), \
632 CTL(stats_##prefix##_num_owner_switch)}, \
633 {NAME("total_wait_time"), \
634 CTL(stats_##prefix##_total_wait_time)}, \
635 {NAME("max_wait_time"), \
636 CTL(stats_##prefix##_max_wait_time)}, \
637 {NAME("max_num_thds"), \
638 CTL(stats_##prefix##_max_num_thds)} \
639 /* Note that # of current waiting thread not provided. */ \
640};
641
642MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
643
644static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
645 {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
646 {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
647 {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
648 {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
649 {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
650 {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
651 {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
652 {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
653 {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
654 {NAME("nonfull_slabs"), CTL(stats_arenas_i_bins_j_nonfull_slabs)},
655 {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
656};
657
658static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
659 {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
660};
661
662static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
663 {INDEX(stats_arenas_i_bins_j)}
664};
665
666static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
667 {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
668 {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
669 {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
670 {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
671};
672static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
673 {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
674};
675
676static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
677 {INDEX(stats_arenas_i_lextents_j)}
678};
679
680static const ctl_named_node_t stats_arenas_i_extents_j_node[] = {
681 {NAME("ndirty"), CTL(stats_arenas_i_extents_j_ndirty)},
682 {NAME("nmuzzy"), CTL(stats_arenas_i_extents_j_nmuzzy)},
683 {NAME("nretained"), CTL(stats_arenas_i_extents_j_nretained)},
684 {NAME("dirty_bytes"), CTL(stats_arenas_i_extents_j_dirty_bytes)},
685 {NAME("muzzy_bytes"), CTL(stats_arenas_i_extents_j_muzzy_bytes)},
686 {NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}
687};
688
689static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = {
690 {NAME(""), CHILD(named, stats_arenas_i_extents_j)}
691};
692
693static const ctl_indexed_node_t stats_arenas_i_extents_node[] = {
694 {INDEX(stats_arenas_i_extents_j)}
695};
696
697#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
698MUTEX_PROF_ARENA_MUTEXES
699#undef OP
700
701static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
702#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
703MUTEX_PROF_ARENA_MUTEXES
704#undef OP
705};
706
707static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = {
708 {NAME("npageslabs_nonhuge"),
709 CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)},
710 {NAME("npageslabs_huge"),
711 CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)},
712 {NAME("nactive_nonhuge"),
713 CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)},
714 {NAME("nactive_huge"),
715 CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)},
716 {NAME("ndirty_nonhuge"),
717 CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)},
718 {NAME("ndirty_huge"),
719 CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)}
720};
721
722static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = {
723 {NAME("npageslabs_nonhuge"),
724 CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)},
725 {NAME("npageslabs_huge"),
726 CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)},
727 {NAME("nactive_nonhuge"),
728 CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)},
729 {NAME("nactive_huge"),
730 CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)},
731 {NAME("ndirty_nonhuge"),
732 CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)},
733 {NAME("ndirty_huge"),
734 CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)}
735};
736
737static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
738 {NAME("npageslabs_nonhuge"),
739 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)},
740 {NAME("npageslabs_huge"),
741 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)},
742 {NAME("nactive_nonhuge"),
743 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)},
744 {NAME("nactive_huge"),
745 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)},
746 {NAME("ndirty_nonhuge"),
747 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)},
748 {NAME("ndirty_huge"),
749 CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)}
750};
751
752static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
753 {NAME(""),
754 CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)}
755};
756
757static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
758{
759 {INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}
760};
761
762static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
763 {NAME("full_slabs"), CHILD(named,
764 stats_arenas_i_hpa_shard_full_slabs)},
765 {NAME("empty_slabs"), CHILD(named,
766 stats_arenas_i_hpa_shard_empty_slabs)},
767 {NAME("nonfull_slabs"), CHILD(indexed,
768 stats_arenas_i_hpa_shard_nonfull_slabs)},
769
770 {NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)},
771 {NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)},
772 {NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)},
773 {NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}
774};
775
776static const ctl_named_node_t stats_arenas_i_node[] = {
777 {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
778 {NAME("uptime"), CTL(stats_arenas_i_uptime)},
779 {NAME("dss"), CTL(stats_arenas_i_dss)},
780 {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
781 {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
782 {NAME("pactive"), CTL(stats_arenas_i_pactive)},
783 {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
784 {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
785 {NAME("mapped"), CTL(stats_arenas_i_mapped)},
786 {NAME("retained"), CTL(stats_arenas_i_retained)},
787 {NAME("extent_avail"), CTL(stats_arenas_i_extent_avail)},
788 {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
789 {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
790 {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
791 {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
792 {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
793 {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
794 {NAME("base"), CTL(stats_arenas_i_base)},
795 {NAME("internal"), CTL(stats_arenas_i_internal)},
796 {NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
797 {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
798 {NAME("tcache_stashed_bytes"),
799 CTL(stats_arenas_i_tcache_stashed_bytes)},
800 {NAME("resident"), CTL(stats_arenas_i_resident)},
801 {NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
802 {NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)},
803 {NAME("small"), CHILD(named, stats_arenas_i_small)},
804 {NAME("large"), CHILD(named, stats_arenas_i_large)},
805 {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
806 {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
807 {NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
808 {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)},
809 {NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)}
810};
811static const ctl_named_node_t super_stats_arenas_i_node[] = {
812 {NAME(""), CHILD(named, stats_arenas_i)}
813};
814
815static const ctl_indexed_node_t stats_arenas_node[] = {
816 {INDEX(stats_arenas_i)}
817};
818
819static const ctl_named_node_t stats_background_thread_node[] = {
820 {NAME("num_threads"), CTL(stats_background_thread_num_threads)},
821 {NAME("num_runs"), CTL(stats_background_thread_num_runs)},
822 {NAME("run_interval"), CTL(stats_background_thread_run_interval)}
823};
824
825#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
826MUTEX_PROF_GLOBAL_MUTEXES
827#undef OP
828
829static const ctl_named_node_t stats_mutexes_node[] = {
830#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
831MUTEX_PROF_GLOBAL_MUTEXES
832#undef OP
833 {NAME("reset"), CTL(stats_mutexes_reset)}
834};
835#undef MUTEX_PROF_DATA_NODE
836
837static const ctl_named_node_t stats_node[] = {
838 {NAME("allocated"), CTL(stats_allocated)},
839 {NAME("active"), CTL(stats_active)},
840 {NAME("metadata"), CTL(stats_metadata)},
841 {NAME("metadata_thp"), CTL(stats_metadata_thp)},
842 {NAME("resident"), CTL(stats_resident)},
843 {NAME("mapped"), CTL(stats_mapped)},
844 {NAME("retained"), CTL(stats_retained)},
845 {NAME("background_thread"),
846 CHILD(named, stats_background_thread)},
847 {NAME("mutexes"), CHILD(named, stats_mutexes)},
848 {NAME("arenas"), CHILD(indexed, stats_arenas)},
849 {NAME("zero_reallocs"), CTL(stats_zero_reallocs)},
850};
851
852static const ctl_named_node_t experimental_hooks_node[] = {
853 {NAME("install"), CTL(experimental_hooks_install)},
854 {NAME("remove"), CTL(experimental_hooks_remove)},
855 {NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)},
856 {NAME("prof_dump"), CTL(experimental_hooks_prof_dump)},
857 {NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)},
858};
859
860static const ctl_named_node_t experimental_thread_node[] = {
861 {NAME("activity_callback"),
862 CTL(experimental_thread_activity_callback)}
863};
864
865static const ctl_named_node_t experimental_utilization_node[] = {
866 {NAME("query"), CTL(experimental_utilization_query)},
867 {NAME("batch_query"), CTL(experimental_utilization_batch_query)}
868};
869
870static const ctl_named_node_t experimental_arenas_i_node[] = {
871 {NAME("pactivep"), CTL(experimental_arenas_i_pactivep)}
872};
873static const ctl_named_node_t super_experimental_arenas_i_node[] = {
874 {NAME(""), CHILD(named, experimental_arenas_i)}
875};
876
877static const ctl_indexed_node_t experimental_arenas_node[] = {
878 {INDEX(experimental_arenas_i)}
879};
880
881static const ctl_named_node_t experimental_prof_recent_node[] = {
882 {NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)},
883 {NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)},
884};
885
886static const ctl_named_node_t experimental_node[] = {
887 {NAME("hooks"), CHILD(named, experimental_hooks)},
888 {NAME("utilization"), CHILD(named, experimental_utilization)},
889 {NAME("arenas"), CHILD(indexed, experimental_arenas)},
890 {NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)},
891 {NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
892 {NAME("batch_alloc"), CTL(experimental_batch_alloc)},
893 {NAME("thread"), CHILD(named, experimental_thread)}
894};
895
896static const ctl_named_node_t root_node[] = {
897 {NAME("version"), CTL(version)},
898 {NAME("epoch"), CTL(epoch)},
899 {NAME("background_thread"), CTL(background_thread)},
900 {NAME("max_background_threads"), CTL(max_background_threads)},
901 {NAME("thread"), CHILD(named, thread)},
902 {NAME("config"), CHILD(named, config)},
903 {NAME("opt"), CHILD(named, opt)},
904 {NAME("tcache"), CHILD(named, tcache)},
905 {NAME("arena"), CHILD(indexed, arena)},
906 {NAME("arenas"), CHILD(named, arenas)},
907 {NAME("prof"), CHILD(named, prof)},
908 {NAME("stats"), CHILD(named, stats)},
909 {NAME("experimental"), CHILD(named, experimental)}
910};
911static const ctl_named_node_t super_root_node[] = {
912 {NAME(""), CHILD(named, root)}
913};
914
915#undef NAME
916#undef CHILD
917#undef CTL
918#undef INDEX
919
920/******************************************************************************/
921
922/*
923 * Sets *dst + *src non-atomically. This is safe, since everything is
924 * synchronized by the ctl mutex.
925 */
926static void
927ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) {
928 locked_inc_u64_unsynchronized(dst,
929 locked_read_u64_unsynchronized(src));
930}
931
932static void
933ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
934 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
935 size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
936 atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
937}
938
939/******************************************************************************/
940
941static unsigned
942arenas_i2a_impl(size_t i, bool compat, bool validate) {
943 unsigned a;
944
945 switch (i) {
946 case MALLCTL_ARENAS_ALL:
947 a = 0;
948 break;
949 case MALLCTL_ARENAS_DESTROYED:
950 a = 1;
951 break;
952 default:
953 if (compat && i == ctl_arenas->narenas) {
954 /*
955 * Provide deprecated backward compatibility for
956 * accessing the merged stats at index narenas rather
957 * than via MALLCTL_ARENAS_ALL. This is scheduled for
958 * removal in 6.0.0.
959 */
960 a = 0;
961 } else if (validate && i >= ctl_arenas->narenas) {
962 a = UINT_MAX;
963 } else {
964 /*
965 * This function should never be called for an index
966 * more than one past the range of indices that have
967 * initialized ctl data.
968 */
969 assert(i < ctl_arenas->narenas || (!validate && i ==
970 ctl_arenas->narenas));
971 a = (unsigned)i + 2;
972 }
973 break;
974 }
975
976 return a;
977}
978
979static unsigned
980arenas_i2a(size_t i) {
981 return arenas_i2a_impl(i, true, false);
982}
983
984static ctl_arena_t *
985arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
986 ctl_arena_t *ret;
987
988 assert(!compat || !init);
989
990 ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
991 if (init && ret == NULL) {
992 if (config_stats) {
993 struct container_s {
994 ctl_arena_t ctl_arena;
995 ctl_arena_stats_t astats;
996 };
997 struct container_s *cont =
998 (struct container_s *)base_alloc(tsd_tsdn(tsd),
999 b0get(), sizeof(struct container_s), QUANTUM);
1000 if (cont == NULL) {
1001 return NULL;
1002 }
1003 ret = &cont->ctl_arena;
1004 ret->astats = &cont->astats;
1005 } else {
1006 ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
1007 sizeof(ctl_arena_t), QUANTUM);
1008 if (ret == NULL) {
1009 return NULL;
1010 }
1011 }
1012 ret->arena_ind = (unsigned)i;
1013 ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
1014 }
1015
1016 assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
1017 return ret;
1018}
1019
1020static ctl_arena_t *
1021arenas_i(size_t i) {
1022 ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
1023 assert(ret != NULL);
1024 return ret;
1025}
1026
1027static void
1028ctl_arena_clear(ctl_arena_t *ctl_arena) {
1029 ctl_arena->nthreads = 0;
1030 ctl_arena->dss = dss_prec_names[dss_prec_limit];
1031 ctl_arena->dirty_decay_ms = -1;
1032 ctl_arena->muzzy_decay_ms = -1;
1033 ctl_arena->pactive = 0;
1034 ctl_arena->pdirty = 0;
1035 ctl_arena->pmuzzy = 0;
1036 if (config_stats) {
1037 memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
1038 ctl_arena->astats->allocated_small = 0;
1039 ctl_arena->astats->nmalloc_small = 0;
1040 ctl_arena->astats->ndalloc_small = 0;
1041 ctl_arena->astats->nrequests_small = 0;
1042 ctl_arena->astats->nfills_small = 0;
1043 ctl_arena->astats->nflushes_small = 0;
1044 memset(ctl_arena->astats->bstats, 0, SC_NBINS *
1045 sizeof(bin_stats_data_t));
1046 memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
1047 sizeof(arena_stats_large_t));
1048 memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
1049 sizeof(pac_estats_t));
1050 memset(&ctl_arena->astats->hpastats, 0,
1051 sizeof(hpa_shard_stats_t));
1052 memset(&ctl_arena->astats->secstats, 0,
1053 sizeof(sec_stats_t));
1054 }
1055}
1056
1057static void
1058ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
1059 unsigned i;
1060
1061 if (config_stats) {
1062 arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
1063 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
1064 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
1065 &ctl_arena->pdirty, &ctl_arena->pmuzzy,
1066 &ctl_arena->astats->astats, ctl_arena->astats->bstats,
1067 ctl_arena->astats->lstats, ctl_arena->astats->estats,
1068 &ctl_arena->astats->hpastats, &ctl_arena->astats->secstats);
1069
1070 for (i = 0; i < SC_NBINS; i++) {
1071 bin_stats_t *bstats =
1072 &ctl_arena->astats->bstats[i].stats_data;
1073 ctl_arena->astats->allocated_small += bstats->curregs *
1074 sz_index2size(i);
1075 ctl_arena->astats->nmalloc_small += bstats->nmalloc;
1076 ctl_arena->astats->ndalloc_small += bstats->ndalloc;
1077 ctl_arena->astats->nrequests_small += bstats->nrequests;
1078 ctl_arena->astats->nfills_small += bstats->nfills;
1079 ctl_arena->astats->nflushes_small += bstats->nflushes;
1080 }
1081 } else {
1082 arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
1083 &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
1084 &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
1085 &ctl_arena->pdirty, &ctl_arena->pmuzzy);
1086 }
1087}
1088
1089static void
1090ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
1091 bool destroyed) {
1092 unsigned i;
1093
1094 if (!destroyed) {
1095 ctl_sdarena->nthreads += ctl_arena->nthreads;
1096 ctl_sdarena->pactive += ctl_arena->pactive;
1097 ctl_sdarena->pdirty += ctl_arena->pdirty;
1098 ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
1099 } else {
1100 assert(ctl_arena->nthreads == 0);
1101 assert(ctl_arena->pactive == 0);
1102 assert(ctl_arena->pdirty == 0);
1103 assert(ctl_arena->pmuzzy == 0);
1104 }
1105
1106 if (config_stats) {
1107 ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
1108 ctl_arena_stats_t *astats = ctl_arena->astats;
1109
1110 if (!destroyed) {
1111 sdstats->astats.mapped += astats->astats.mapped;
1112 sdstats->astats.pa_shard_stats.pac_stats.retained
1113 += astats->astats.pa_shard_stats.pac_stats.retained;
1114 sdstats->astats.pa_shard_stats.edata_avail
1115 += astats->astats.pa_shard_stats.edata_avail;
1116 }
1117
1118 ctl_accum_locked_u64(
1119 &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
1120 &astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
1121 ctl_accum_locked_u64(
1122 &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
1123 &astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
1124 ctl_accum_locked_u64(
1125 &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
1126 &astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
1127
1128 ctl_accum_locked_u64(
1129 &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
1130 &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
1131 ctl_accum_locked_u64(
1132 &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
1133 &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
1134 ctl_accum_locked_u64(
1135 &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
1136 &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
1137
1138#define OP(mtx) malloc_mutex_prof_merge( \
1139 &(sdstats->astats.mutex_prof_data[ \
1140 arena_prof_mutex_##mtx]), \
1141 &(astats->astats.mutex_prof_data[ \
1142 arena_prof_mutex_##mtx]));
1143MUTEX_PROF_ARENA_MUTEXES
1144#undef OP
1145 if (!destroyed) {
1146 sdstats->astats.base += astats->astats.base;
1147 sdstats->astats.resident += astats->astats.resident;
1148 sdstats->astats.metadata_thp += astats->astats.metadata_thp;
1149 ctl_accum_atomic_zu(&sdstats->astats.internal,
1150 &astats->astats.internal);
1151 } else {
1152 assert(atomic_load_zu(
1153 &astats->astats.internal, ATOMIC_RELAXED) == 0);
1154 }
1155
1156 if (!destroyed) {
1157 sdstats->allocated_small += astats->allocated_small;
1158 } else {
1159 assert(astats->allocated_small == 0);
1160 }
1161 sdstats->nmalloc_small += astats->nmalloc_small;
1162 sdstats->ndalloc_small += astats->ndalloc_small;
1163 sdstats->nrequests_small += astats->nrequests_small;
1164 sdstats->nfills_small += astats->nfills_small;
1165 sdstats->nflushes_small += astats->nflushes_small;
1166
1167 if (!destroyed) {
1168 sdstats->astats.allocated_large +=
1169 astats->astats.allocated_large;
1170 } else {
1171 assert(astats->astats.allocated_large == 0);
1172 }
1173 sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
1174 sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
1175 sdstats->astats.nrequests_large
1176 += astats->astats.nrequests_large;
1177 sdstats->astats.nflushes_large += astats->astats.nflushes_large;
1178 ctl_accum_atomic_zu(
1179 &sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
1180 &astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
1181
1182 sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
1183 sdstats->astats.tcache_stashed_bytes +=
1184 astats->astats.tcache_stashed_bytes;
1185
1186 if (ctl_arena->arena_ind == 0) {
1187 sdstats->astats.uptime = astats->astats.uptime;
1188 }
1189
1190 /* Merge bin stats. */
1191 for (i = 0; i < SC_NBINS; i++) {
1192 bin_stats_t *bstats = &astats->bstats[i].stats_data;
1193 bin_stats_t *merged = &sdstats->bstats[i].stats_data;
1194 merged->nmalloc += bstats->nmalloc;
1195 merged->ndalloc += bstats->ndalloc;
1196 merged->nrequests += bstats->nrequests;
1197 if (!destroyed) {
1198 merged->curregs += bstats->curregs;
1199 } else {
1200 assert(bstats->curregs == 0);
1201 }
1202 merged->nfills += bstats->nfills;
1203 merged->nflushes += bstats->nflushes;
1204 merged->nslabs += bstats->nslabs;
1205 merged->reslabs += bstats->reslabs;
1206 if (!destroyed) {
1207 merged->curslabs += bstats->curslabs;
1208 merged->nonfull_slabs += bstats->nonfull_slabs;
1209 } else {
1210 assert(bstats->curslabs == 0);
1211 assert(bstats->nonfull_slabs == 0);
1212 }
1213 malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
1214 &astats->bstats[i].mutex_data);
1215 }
1216
1217 /* Merge stats for large allocations. */
1218 for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
1219 ctl_accum_locked_u64(&sdstats->lstats[i].nmalloc,
1220 &astats->lstats[i].nmalloc);
1221 ctl_accum_locked_u64(&sdstats->lstats[i].ndalloc,
1222 &astats->lstats[i].ndalloc);
1223 ctl_accum_locked_u64(&sdstats->lstats[i].nrequests,
1224 &astats->lstats[i].nrequests);
1225 if (!destroyed) {
1226 sdstats->lstats[i].curlextents +=
1227 astats->lstats[i].curlextents;
1228 } else {
1229 assert(astats->lstats[i].curlextents == 0);
1230 }
1231 }
1232
1233 /* Merge extents stats. */
1234 for (i = 0; i < SC_NPSIZES; i++) {
1235 sdstats->estats[i].ndirty += astats->estats[i].ndirty;
1236 sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy;
1237 sdstats->estats[i].nretained
1238 += astats->estats[i].nretained;
1239 sdstats->estats[i].dirty_bytes
1240 += astats->estats[i].dirty_bytes;
1241 sdstats->estats[i].muzzy_bytes
1242 += astats->estats[i].muzzy_bytes;
1243 sdstats->estats[i].retained_bytes
1244 += astats->estats[i].retained_bytes;
1245 }
1246
1247 /* Merge HPA stats. */
1248 hpa_shard_stats_accum(&sdstats->hpastats, &astats->hpastats);
1249 sec_stats_accum(&sdstats->secstats, &astats->secstats);
1250 }
1251}
1252
1253static void
1254ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
1255 unsigned i, bool destroyed) {
1256 ctl_arena_t *ctl_arena = arenas_i(i);
1257
1258 ctl_arena_clear(ctl_arena);
1259 ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
1260 /* Merge into sum stats as well. */
1261 ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
1262}
1263
1264static unsigned
1265ctl_arena_init(tsd_t *tsd, const arena_config_t *config) {
1266 unsigned arena_ind;
1267 ctl_arena_t *ctl_arena;
1268
1269 if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
1270 NULL) {
1271 ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
1272 arena_ind = ctl_arena->arena_ind;
1273 } else {
1274 arena_ind = ctl_arenas->narenas;
1275 }
1276
1277 /* Trigger stats allocation. */
1278 if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
1279 return UINT_MAX;
1280 }
1281
1282 /* Initialize new arena. */
1283 if (arena_init(tsd_tsdn(tsd), arena_ind, config) == NULL) {
1284 return UINT_MAX;
1285 }
1286
1287 if (arena_ind == ctl_arenas->narenas) {
1288 ctl_arenas->narenas++;
1289 }
1290
1291 return arena_ind;
1292}
1293
1294static void
1295ctl_background_thread_stats_read(tsdn_t *tsdn) {
1296 background_thread_stats_t *stats = &ctl_stats->background_thread;
1297 if (!have_background_thread ||
1298 background_thread_stats_read(tsdn, stats)) {
1299 memset(stats, 0, sizeof(background_thread_stats_t));
1300 nstime_init_zero(&stats->run_interval);
1301 }
1302 malloc_mutex_prof_copy(
1303 &ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
1304 &stats->max_counter_per_bg_thd);
1305}
1306
1307static void
1308ctl_refresh(tsdn_t *tsdn) {
1309 unsigned i;
1310 ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
1311 VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
1312
1313 /*
1314 * Clear sum stats, since they will be merged into by
1315 * ctl_arena_refresh().
1316 */
1317 ctl_arena_clear(ctl_sarena);
1318
1319 for (i = 0; i < ctl_arenas->narenas; i++) {
1320 tarenas[i] = arena_get(tsdn, i, false);
1321 }
1322
1323 for (i = 0; i < ctl_arenas->narenas; i++) {
1324 ctl_arena_t *ctl_arena = arenas_i(i);
1325 bool initialized = (tarenas[i] != NULL);
1326
1327 ctl_arena->initialized = initialized;
1328 if (initialized) {
1329 ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
1330 false);
1331 }
1332 }
1333
1334 if (config_stats) {
1335 ctl_stats->allocated = ctl_sarena->astats->allocated_small +
1336 ctl_sarena->astats->astats.allocated_large;
1337 ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
1338 ctl_stats->metadata = ctl_sarena->astats->astats.base +
1339 atomic_load_zu(&ctl_sarena->astats->astats.internal,
1340 ATOMIC_RELAXED);
1341 ctl_stats->resident = ctl_sarena->astats->astats.resident;
1342 ctl_stats->metadata_thp =
1343 ctl_sarena->astats->astats.metadata_thp;
1344 ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
1345 ctl_stats->retained = ctl_sarena->astats->astats
1346 .pa_shard_stats.pac_stats.retained;
1347
1348 ctl_background_thread_stats_read(tsdn);
1349
1350#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
1351 malloc_mutex_lock(tsdn, &mtx); \
1352 malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
1353 malloc_mutex_unlock(tsdn, &mtx);
1354
1355 if (config_prof && opt_prof) {
1356 READ_GLOBAL_MUTEX_PROF_DATA(
1357 global_prof_mutex_prof, bt2gctx_mtx);
1358 READ_GLOBAL_MUTEX_PROF_DATA(
1359 global_prof_mutex_prof_thds_data, tdatas_mtx);
1360 READ_GLOBAL_MUTEX_PROF_DATA(
1361 global_prof_mutex_prof_dump, prof_dump_mtx);
1362 READ_GLOBAL_MUTEX_PROF_DATA(
1363 global_prof_mutex_prof_recent_alloc,
1364 prof_recent_alloc_mtx);
1365 READ_GLOBAL_MUTEX_PROF_DATA(
1366 global_prof_mutex_prof_recent_dump,
1367 prof_recent_dump_mtx);
1368 READ_GLOBAL_MUTEX_PROF_DATA(
1369 global_prof_mutex_prof_stats, prof_stats_mtx);
1370 }
1371 if (have_background_thread) {
1372 READ_GLOBAL_MUTEX_PROF_DATA(
1373 global_prof_mutex_background_thread,
1374 background_thread_lock);
1375 } else {
1376 memset(&ctl_stats->mutex_prof_data[
1377 global_prof_mutex_background_thread], 0,
1378 sizeof(mutex_prof_data_t));
1379 }
1380 /* We own ctl mutex already. */
1381 malloc_mutex_prof_read(tsdn,
1382 &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
1383 &ctl_mtx);
1384#undef READ_GLOBAL_MUTEX_PROF_DATA
1385 }
1386 ctl_arenas->epoch++;
1387}
1388
1389static bool
1390ctl_init(tsd_t *tsd) {
1391 bool ret;
1392 tsdn_t *tsdn = tsd_tsdn(tsd);
1393
1394 malloc_mutex_lock(tsdn, &ctl_mtx);
1395 if (!ctl_initialized) {
1396 ctl_arena_t *ctl_sarena, *ctl_darena;
1397 unsigned i;
1398
1399 /*
1400 * Allocate demand-zeroed space for pointers to the full
1401 * range of supported arena indices.
1402 */
1403 if (ctl_arenas == NULL) {
1404 ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
1405 b0get(), sizeof(ctl_arenas_t), QUANTUM);
1406 if (ctl_arenas == NULL) {
1407 ret = true;
1408 goto label_return;
1409 }
1410 }
1411
1412 if (config_stats && ctl_stats == NULL) {
1413 ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
1414 sizeof(ctl_stats_t), QUANTUM);
1415 if (ctl_stats == NULL) {
1416 ret = true;
1417 goto label_return;
1418 }
1419 }
1420
1421 /*
1422 * Allocate space for the current full range of arenas
1423 * here rather than doing it lazily elsewhere, in order
1424 * to limit when OOM-caused errors can occur.
1425 */
1426 if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
1427 true)) == NULL) {
1428 ret = true;
1429 goto label_return;
1430 }
1431 ctl_sarena->initialized = true;
1432
1433 if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
1434 false, true)) == NULL) {
1435 ret = true;
1436 goto label_return;
1437 }
1438 ctl_arena_clear(ctl_darena);
1439 /*
1440 * Don't toggle ctl_darena to initialized until an arena is
1441 * actually destroyed, so that arena.<i>.initialized can be used
1442 * to query whether the stats are relevant.
1443 */
1444
1445 ctl_arenas->narenas = narenas_total_get();
1446 for (i = 0; i < ctl_arenas->narenas; i++) {
1447 if (arenas_i_impl(tsd, i, false, true) == NULL) {
1448 ret = true;
1449 goto label_return;
1450 }
1451 }
1452
1453 ql_new(&ctl_arenas->destroyed);
1454 ctl_refresh(tsdn);
1455
1456 ctl_initialized = true;
1457 }
1458
1459 ret = false;
1460label_return:
1461 malloc_mutex_unlock(tsdn, &ctl_mtx);
1462 return ret;
1463}
1464
1465static int
1466ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node,
1467 const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp,
1468 size_t *depthp) {
1469 int ret;
1470 const char *elm, *tdot, *dot;
1471 size_t elen, i, j;
1472 const ctl_named_node_t *node;
1473
1474 elm = name;
1475 /* Equivalent to strchrnul(). */
1476 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
1477 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1478 if (elen == 0) {
1479 ret = ENOENT;
1480 goto label_return;
1481 }
1482 node = starting_node;
1483 for (i = 0; i < *depthp; i++) {
1484 assert(node);
1485 assert(node->nchildren > 0);
1486 if (ctl_named_node(node->children) != NULL) {
1487 const ctl_named_node_t *pnode = node;
1488
1489 /* Children are named. */
1490 for (j = 0; j < node->nchildren; j++) {
1491 const ctl_named_node_t *child =
1492 ctl_named_children(node, j);
1493 if (strlen(child->name) == elen &&
1494 strncmp(elm, child->name, elen) == 0) {
1495 node = child;
1496 mibp[i] = j;
1497 break;
1498 }
1499 }
1500 if (node == pnode) {
1501 ret = ENOENT;
1502 goto label_return;
1503 }
1504 } else {
1505 uintmax_t index;
1506 const ctl_indexed_node_t *inode;
1507
1508 /* Children are indexed. */
1509 index = malloc_strtoumax(elm, NULL, 10);
1510 if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
1511 ret = ENOENT;
1512 goto label_return;
1513 }
1514
1515 inode = ctl_indexed_node(node->children);
1516 node = inode->index(tsdn, mibp, *depthp, (size_t)index);
1517 if (node == NULL) {
1518 ret = ENOENT;
1519 goto label_return;
1520 }
1521
1522 mibp[i] = (size_t)index;
1523 }
1524
1525 /* Reached the end? */
1526 if (node->ctl != NULL || *dot == '\0') {
1527 /* Terminal node. */
1528 if (*dot != '\0') {
1529 /*
1530 * The name contains more elements than are
1531 * in this path through the tree.
1532 */
1533 ret = ENOENT;
1534 goto label_return;
1535 }
1536 /* Complete lookup successful. */
1537 *depthp = i + 1;
1538 break;
1539 }
1540
1541 /* Update elm. */
1542 elm = &dot[1];
1543 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
1544 strchr(elm, '\0');
1545 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1546 }
1547 if (ending_nodep != NULL) {
1548 *ending_nodep = node;
1549 }
1550
1551 ret = 0;
1552label_return:
1553 return ret;
1554}
1555
1556int
1557ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
1558 void *newp, size_t newlen) {
1559 int ret;
1560 size_t depth;
1561 size_t mib[CTL_MAX_DEPTH];
1562 const ctl_named_node_t *node;
1563
1564 if (!ctl_initialized && ctl_init(tsd)) {
1565 ret = EAGAIN;
1566 goto label_return;
1567 }
1568
1569 depth = CTL_MAX_DEPTH;
1570 ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib,
1571 &depth);
1572 if (ret != 0) {
1573 goto label_return;
1574 }
1575
1576 if (node != NULL && node->ctl) {
1577 ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
1578 } else {
1579 /* The name refers to a partial path through the ctl tree. */
1580 ret = ENOENT;
1581 }
1582
1583label_return:
1584 return(ret);
1585}
1586
1587int
1588ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
1589 int ret;
1590
1591 if (!ctl_initialized && ctl_init(tsd)) {
1592 ret = EAGAIN;
1593 goto label_return;
1594 }
1595
1596 ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp,
1597 miblenp);
1598label_return:
1599 return(ret);
1600}
1601
1602static int
1603ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep,
1604 const size_t *mib, size_t miblen) {
1605 int ret;
1606
1607 const ctl_named_node_t *node = super_root_node;
1608 for (size_t i = 0; i < miblen; i++) {
1609 assert(node);
1610 assert(node->nchildren > 0);
1611 if (ctl_named_node(node->children) != NULL) {
1612 /* Children are named. */
1613 if (node->nchildren <= mib[i]) {
1614 ret = ENOENT;
1615 goto label_return;
1616 }
1617 node = ctl_named_children(node, mib[i]);
1618 } else {
1619 const ctl_indexed_node_t *inode;
1620
1621 /* Indexed element. */
1622 inode = ctl_indexed_node(node->children);
1623 node = inode->index(tsdn, mib, miblen, mib[i]);
1624 if (node == NULL) {
1625 ret = ENOENT;
1626 goto label_return;
1627 }
1628 }
1629 }
1630 assert(ending_nodep != NULL);
1631 *ending_nodep = node;
1632 ret = 0;
1633
1634label_return:
1635 return(ret);
1636}
1637
1638int
1639ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1640 size_t *oldlenp, void *newp, size_t newlen) {
1641 int ret;
1642 const ctl_named_node_t *node;
1643
1644 if (!ctl_initialized && ctl_init(tsd)) {
1645 ret = EAGAIN;
1646 goto label_return;
1647 }
1648
1649 ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
1650 if (ret != 0) {
1651 goto label_return;
1652 }
1653
1654 /* Call the ctl function. */
1655 if (node && node->ctl) {
1656 ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
1657 } else {
1658 /* Partial MIB. */
1659 ret = ENOENT;
1660 }
1661
1662label_return:
1663 return(ret);
1664}
1665
1666int
1667ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
1668 size_t *miblenp) {
1669 int ret;
1670 const ctl_named_node_t *node;
1671
1672 if (!ctl_initialized && ctl_init(tsd)) {
1673 ret = EAGAIN;
1674 goto label_return;
1675 }
1676
1677 ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
1678 if (ret != 0) {
1679 goto label_return;
1680 }
1681 if (node == NULL || node->ctl != NULL) {
1682 ret = ENOENT;
1683 goto label_return;
1684 }
1685
1686 assert(miblenp != NULL);
1687 assert(*miblenp >= miblen);
1688 *miblenp -= miblen;
1689 ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen,
1690 miblenp);
1691 *miblenp += miblen;
1692label_return:
1693 return(ret);
1694}
1695
1696int
1697ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
1698 size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1699 int ret;
1700 const ctl_named_node_t *node;
1701
1702 if (!ctl_initialized && ctl_init(tsd)) {
1703 ret = EAGAIN;
1704 goto label_return;
1705 }
1706
1707 ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
1708 if (ret != 0) {
1709 goto label_return;
1710 }
1711 if (node == NULL || node->ctl != NULL) {
1712 ret = ENOENT;
1713 goto label_return;
1714 }
1715
1716 assert(miblenp != NULL);
1717 assert(*miblenp >= miblen);
1718 *miblenp -= miblen;
1719 /*
1720 * The same node supplies the starting node and stores the ending node.
1721 */
1722 ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen,
1723 miblenp);
1724 *miblenp += miblen;
1725 if (ret != 0) {
1726 goto label_return;
1727 }
1728
1729 if (node != NULL && node->ctl) {
1730 ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp,
1731 newlen);
1732 } else {
1733 /* The name refers to a partial path through the ctl tree. */
1734 ret = ENOENT;
1735 }
1736
1737label_return:
1738 return(ret);
1739}
1740
1741bool
1742ctl_boot(void) {
1743 if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
1744 malloc_mutex_rank_exclusive)) {
1745 return true;
1746 }
1747
1748 ctl_initialized = false;
1749
1750 return false;
1751}
1752
1753void
1754ctl_prefork(tsdn_t *tsdn) {
1755 malloc_mutex_prefork(tsdn, &ctl_mtx);
1756}
1757
1758void
1759ctl_postfork_parent(tsdn_t *tsdn) {
1760 malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
1761}
1762
1763void
1764ctl_postfork_child(tsdn_t *tsdn) {
1765 malloc_mutex_postfork_child(tsdn, &ctl_mtx);
1766}
1767
1768void
1769ctl_mtx_assert_held(tsdn_t *tsdn) {
1770 malloc_mutex_assert_owner(tsdn, &ctl_mtx);
1771}
1772
1773/******************************************************************************/
1774/* *_ctl() functions. */
1775
1776#define READONLY() do { \
1777 if (newp != NULL || newlen != 0) { \
1778 ret = EPERM; \
1779 goto label_return; \
1780 } \
1781} while (0)
1782
1783#define WRITEONLY() do { \
1784 if (oldp != NULL || oldlenp != NULL) { \
1785 ret = EPERM; \
1786 goto label_return; \
1787 } \
1788} while (0)
1789
1790/* Can read or write, but not both. */
1791#define READ_XOR_WRITE() do { \
1792 if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
1793 newlen != 0)) { \
1794 ret = EPERM; \
1795 goto label_return; \
1796 } \
1797} while (0)
1798
1799/* Can neither read nor write. */
1800#define NEITHER_READ_NOR_WRITE() do { \
1801 if (oldp != NULL || oldlenp != NULL || newp != NULL || \
1802 newlen != 0) { \
1803 ret = EPERM; \
1804 goto label_return; \
1805 } \
1806} while (0)
1807
1808/* Verify that the space provided is enough. */
1809#define VERIFY_READ(t) do { \
1810 if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) { \
1811 *oldlenp = 0; \
1812 ret = EINVAL; \
1813 goto label_return; \
1814 } \
1815} while (0)
1816
1817#define READ(v, t) do { \
1818 if (oldp != NULL && oldlenp != NULL) { \
1819 if (*oldlenp != sizeof(t)) { \
1820 size_t copylen = (sizeof(t) <= *oldlenp) \
1821 ? sizeof(t) : *oldlenp; \
1822 memcpy(oldp, (void *)&(v), copylen); \
1823 *oldlenp = copylen; \
1824 ret = EINVAL; \
1825 goto label_return; \
1826 } \
1827 *(t *)oldp = (v); \
1828 } \
1829} while (0)
1830
1831#define WRITE(v, t) do { \
1832 if (newp != NULL) { \
1833 if (newlen != sizeof(t)) { \
1834 ret = EINVAL; \
1835 goto label_return; \
1836 } \
1837 (v) = *(t *)newp; \
1838 } \
1839} while (0)
1840
1841#define ASSURED_WRITE(v, t) do { \
1842 if (newp == NULL || newlen != sizeof(t)) { \
1843 ret = EINVAL; \
1844 goto label_return; \
1845 } \
1846 (v) = *(t *)newp; \
1847} while (0)
1848
1849#define MIB_UNSIGNED(v, i) do { \
1850 if (mib[i] > UINT_MAX) { \
1851 ret = EFAULT; \
1852 goto label_return; \
1853 } \
1854 v = (unsigned)mib[i]; \
1855} while (0)
1856
1857/*
1858 * There's a lot of code duplication in the following macros due to limitations
1859 * in how nested cpp macros are expanded.
1860 */
1861#define CTL_RO_CLGEN(c, l, n, v, t) \
1862static int \
1863n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1864 size_t *oldlenp, void *newp, size_t newlen) { \
1865 int ret; \
1866 t oldval; \
1867 \
1868 if (!(c)) { \
1869 return ENOENT; \
1870 } \
1871 if (l) { \
1872 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1873 } \
1874 READONLY(); \
1875 oldval = (v); \
1876 READ(oldval, t); \
1877 \
1878 ret = 0; \
1879label_return: \
1880 if (l) { \
1881 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1882 } \
1883 return ret; \
1884}
1885
1886#define CTL_RO_CGEN(c, n, v, t) \
1887static int \
1888n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1889 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1890 int ret; \
1891 t oldval; \
1892 \
1893 if (!(c)) { \
1894 return ENOENT; \
1895 } \
1896 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1897 READONLY(); \
1898 oldval = (v); \
1899 READ(oldval, t); \
1900 \
1901 ret = 0; \
1902label_return: \
1903 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1904 return ret; \
1905}
1906
1907#define CTL_RO_GEN(n, v, t) \
1908static int \
1909n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
1910 size_t *oldlenp, void *newp, size_t newlen) { \
1911 int ret; \
1912 t oldval; \
1913 \
1914 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
1915 READONLY(); \
1916 oldval = (v); \
1917 READ(oldval, t); \
1918 \
1919 ret = 0; \
1920label_return: \
1921 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
1922 return ret; \
1923}
1924
1925/*
1926 * ctl_mtx is not acquired, under the assumption that no pertinent data will
1927 * mutate during the call.
1928 */
1929#define CTL_RO_NL_CGEN(c, n, v, t) \
1930static int \
1931n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1932 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1933 int ret; \
1934 t oldval; \
1935 \
1936 if (!(c)) { \
1937 return ENOENT; \
1938 } \
1939 READONLY(); \
1940 oldval = (v); \
1941 READ(oldval, t); \
1942 \
1943 ret = 0; \
1944label_return: \
1945 return ret; \
1946}
1947
1948#define CTL_RO_NL_GEN(n, v, t) \
1949static int \
1950n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1951 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1952 int ret; \
1953 t oldval; \
1954 \
1955 READONLY(); \
1956 oldval = (v); \
1957 READ(oldval, t); \
1958 \
1959 ret = 0; \
1960label_return: \
1961 return ret; \
1962}
1963
1964#define CTL_RO_CONFIG_GEN(n, t) \
1965static int \
1966n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1967 void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
1968 int ret; \
1969 t oldval; \
1970 \
1971 READONLY(); \
1972 oldval = n; \
1973 READ(oldval, t); \
1974 \
1975 ret = 0; \
1976label_return: \
1977 return ret; \
1978}
1979
1980/******************************************************************************/
1981
1982CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1983
1984static int
1985epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1986 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1987 int ret;
1988 UNUSED uint64_t newval;
1989
1990 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1991 WRITE(newval, uint64_t);
1992 if (newp != NULL) {
1993 ctl_refresh(tsd_tsdn(tsd));
1994 }
1995 READ(ctl_arenas->epoch, uint64_t);
1996
1997 ret = 0;
1998label_return:
1999 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2000 return ret;
2001}
2002
2003static int
2004background_thread_ctl(tsd_t *tsd, const size_t *mib,
2005 size_t miblen, void *oldp, size_t *oldlenp,
2006 void *newp, size_t newlen) {
2007 int ret;
2008 bool oldval;
2009
2010 if (!have_background_thread) {
2011 return ENOENT;
2012 }
2013 background_thread_ctl_init(tsd_tsdn(tsd));
2014
2015 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2016 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2017 if (newp == NULL) {
2018 oldval = background_thread_enabled();
2019 READ(oldval, bool);
2020 } else {
2021 if (newlen != sizeof(bool)) {
2022 ret = EINVAL;
2023 goto label_return;
2024 }
2025 oldval = background_thread_enabled();
2026 READ(oldval, bool);
2027
2028 bool newval = *(bool *)newp;
2029 if (newval == oldval) {
2030 ret = 0;
2031 goto label_return;
2032 }
2033
2034 background_thread_enabled_set(tsd_tsdn(tsd), newval);
2035 if (newval) {
2036 if (background_threads_enable(tsd)) {
2037 ret = EFAULT;
2038 goto label_return;
2039 }
2040 } else {
2041 if (background_threads_disable(tsd)) {
2042 ret = EFAULT;
2043 goto label_return;
2044 }
2045 }
2046 }
2047 ret = 0;
2048label_return:
2049 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2050 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2051
2052 return ret;
2053}
2054
2055static int
2056max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
2057 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2058 size_t newlen) {
2059 int ret;
2060 size_t oldval;
2061
2062 if (!have_background_thread) {
2063 return ENOENT;
2064 }
2065 background_thread_ctl_init(tsd_tsdn(tsd));
2066
2067 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2068 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2069 if (newp == NULL) {
2070 oldval = max_background_threads;
2071 READ(oldval, size_t);
2072 } else {
2073 if (newlen != sizeof(size_t)) {
2074 ret = EINVAL;
2075 goto label_return;
2076 }
2077 oldval = max_background_threads;
2078 READ(oldval, size_t);
2079
2080 size_t newval = *(size_t *)newp;
2081 if (newval == oldval) {
2082 ret = 0;
2083 goto label_return;
2084 }
2085 if (newval > opt_max_background_threads) {
2086 ret = EINVAL;
2087 goto label_return;
2088 }
2089
2090 if (background_thread_enabled()) {
2091 background_thread_enabled_set(tsd_tsdn(tsd), false);
2092 if (background_threads_disable(tsd)) {
2093 ret = EFAULT;
2094 goto label_return;
2095 }
2096 max_background_threads = newval;
2097 background_thread_enabled_set(tsd_tsdn(tsd), true);
2098 if (background_threads_enable(tsd)) {
2099 ret = EFAULT;
2100 goto label_return;
2101 }
2102 } else {
2103 max_background_threads = newval;
2104 }
2105 }
2106 ret = 0;
2107label_return:
2108 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2109 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2110
2111 return ret;
2112}
2113
2114/******************************************************************************/
2115
2116CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
2117CTL_RO_CONFIG_GEN(config_debug, bool)
2118CTL_RO_CONFIG_GEN(config_fill, bool)
2119CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
2120CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
2121CTL_RO_CONFIG_GEN(config_opt_safety_checks, bool)
2122CTL_RO_CONFIG_GEN(config_prof, bool)
2123CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
2124CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
2125CTL_RO_CONFIG_GEN(config_stats, bool)
2126CTL_RO_CONFIG_GEN(config_utrace, bool)
2127CTL_RO_CONFIG_GEN(config_xmalloc, bool)
2128
2129/******************************************************************************/
2130
2131CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
2132CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
2133CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool)
2134CTL_RO_NL_GEN(opt_debug_double_free_max_scan,
2135 opt_debug_double_free_max_scan, unsigned)
2136CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
2137CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
2138
2139/* HPA options. */
2140CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
2141CTL_RO_NL_GEN(opt_hpa_hugification_threshold,
2142 opt_hpa_opts.hugification_threshold, size_t)
2143CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t)
2144CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms,
2145 uint64_t)
2146
2147/*
2148 * This will have to change before we publicly document this option; fxp_t and
2149 * its representation are internal implementation details.
2150 */
2151CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t)
2152CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
2153
2154/* HPA SEC options */
2155CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t)
2156CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t)
2157CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t)
2158CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush,
2159 size_t)
2160CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra,
2161 size_t)
2162
2163CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
2164 const char *)
2165CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
2166CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
2167CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
2168CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
2169 const char *)
2170CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t)
2171CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
2172CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
2173CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
2174CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
2175CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
2176CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
2177CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
2178CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
2179CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
2180CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
2181CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
2182CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
2183CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
2184CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new,
2185 opt_experimental_infallible_new, bool)
2186CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
2187CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t)
2188CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min,
2189 unsigned)
2190CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max,
2191 unsigned)
2192CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned)
2193CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t)
2194CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t)
2195CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t)
2196CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div,
2197 unsigned)
2198CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div,
2199 unsigned)
2200CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
2201CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
2202 size_t)
2203CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
2204CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
2205CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
2206CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
2207 opt_prof_thread_active_init, bool)
2208CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
2209CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
2210CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
2211CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
2212CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
2213CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
2214CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool)
2215CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max,
2216 opt_prof_recent_alloc_max, ssize_t)
2217CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool)
2218CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name,
2219 bool)
2220CTL_RO_NL_CGEN(config_prof, opt_prof_time_res,
2221 prof_time_res_mode_names[opt_prof_time_res], const char *)
2222CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
2223 opt_lg_san_uaf_align, ssize_t)
2224CTL_RO_NL_GEN(opt_zero_realloc,
2225 zero_realloc_mode_names[opt_zero_realloc_action], const char *)
2226
2227/******************************************************************************/
2228
2229static int
2230thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2231 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2232 int ret;
2233 arena_t *oldarena;
2234 unsigned newind, oldind;
2235
2236 oldarena = arena_choose(tsd, NULL);
2237 if (oldarena == NULL) {
2238 return EAGAIN;
2239 }
2240 newind = oldind = arena_ind_get(oldarena);
2241 WRITE(newind, unsigned);
2242 READ(oldind, unsigned);
2243
2244 if (newind != oldind) {
2245 arena_t *newarena;
2246
2247 if (newind >= narenas_total_get()) {
2248 /* New arena index is out of range. */
2249 ret = EFAULT;
2250 goto label_return;
2251 }
2252
2253 if (have_percpu_arena &&
2254 PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
2255 if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
2256 /*
2257 * If perCPU arena is enabled, thread_arena
2258 * control is not allowed for the auto arena
2259 * range.
2260 */
2261 ret = EPERM;
2262 goto label_return;
2263 }
2264 }
2265
2266 /* Initialize arena if necessary. */
2267 newarena = arena_get(tsd_tsdn(tsd), newind, true);
2268 if (newarena == NULL) {
2269 ret = EAGAIN;
2270 goto label_return;
2271 }
2272 /* Set new arena/tcache associations. */
2273 arena_migrate(tsd, oldarena, newarena);
2274 if (tcache_available(tsd)) {
2275 tcache_arena_reassociate(tsd_tsdn(tsd),
2276 tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd),
2277 newarena);
2278 }
2279 }
2280
2281 ret = 0;
2282label_return:
2283 return ret;
2284}
2285
2286CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
2287CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
2288CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
2289CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
2290
2291static int
2292thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
2293 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2294 size_t newlen) {
2295 int ret;
2296 bool oldval;
2297
2298 oldval = tcache_enabled_get(tsd);
2299 if (newp != NULL) {
2300 if (newlen != sizeof(bool)) {
2301 ret = EINVAL;
2302 goto label_return;
2303 }
2304 tcache_enabled_set(tsd, *(bool *)newp);
2305 }
2306 READ(oldval, bool);
2307
2308 ret = 0;
2309label_return:
2310 return ret;
2311}
2312
2313static int
2314thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
2315 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2316 size_t newlen) {
2317 int ret;
2318
2319 if (!tcache_available(tsd)) {
2320 ret = EFAULT;
2321 goto label_return;
2322 }
2323
2324 NEITHER_READ_NOR_WRITE();
2325
2326 tcache_flush(tsd);
2327
2328 ret = 0;
2329label_return:
2330 return ret;
2331}
2332
2333static int
2334thread_peak_read_ctl(tsd_t *tsd, const size_t *mib,
2335 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2336 size_t newlen) {
2337 int ret;
2338 if (!config_stats) {
2339 return ENOENT;
2340 }
2341 READONLY();
2342 peak_event_update(tsd);
2343 uint64_t result = peak_event_max(tsd);
2344 READ(result, uint64_t);
2345 ret = 0;
2346label_return:
2347 return ret;
2348}
2349
2350static int
2351thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib,
2352 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2353 size_t newlen) {
2354 int ret;
2355 if (!config_stats) {
2356 return ENOENT;
2357 }
2358 NEITHER_READ_NOR_WRITE();
2359 peak_event_zero(tsd);
2360 ret = 0;
2361label_return:
2362 return ret;
2363}
2364
2365static int
2366thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
2367 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2368 size_t newlen) {
2369 int ret;
2370
2371 if (!config_prof || !opt_prof) {
2372 return ENOENT;
2373 }
2374
2375 READ_XOR_WRITE();
2376
2377 if (newp != NULL) {
2378 if (newlen != sizeof(const char *)) {
2379 ret = EINVAL;
2380 goto label_return;
2381 }
2382
2383 if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
2384 0) {
2385 goto label_return;
2386 }
2387 } else {
2388 const char *oldname = prof_thread_name_get(tsd);
2389 READ(oldname, const char *);
2390 }
2391
2392 ret = 0;
2393label_return:
2394 return ret;
2395}
2396
2397static int
2398thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
2399 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2400 size_t newlen) {
2401 int ret;
2402 bool oldval;
2403
2404 if (!config_prof) {
2405 return ENOENT;
2406 }
2407
2408 oldval = opt_prof ? prof_thread_active_get(tsd) : false;
2409 if (newp != NULL) {
2410 if (!opt_prof) {
2411 ret = ENOENT;
2412 goto label_return;
2413 }
2414 if (newlen != sizeof(bool)) {
2415 ret = EINVAL;
2416 goto label_return;
2417 }
2418 if (prof_thread_active_set(tsd, *(bool *)newp)) {
2419 ret = EAGAIN;
2420 goto label_return;
2421 }
2422 }
2423 READ(oldval, bool);
2424
2425 ret = 0;
2426label_return:
2427 return ret;
2428}
2429
2430static int
2431thread_idle_ctl(tsd_t *tsd, const size_t *mib,
2432 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2433 size_t newlen) {
2434 int ret;
2435
2436 NEITHER_READ_NOR_WRITE();
2437
2438 if (tcache_available(tsd)) {
2439 tcache_flush(tsd);
2440 }
2441 /*
2442 * This heuristic is perhaps not the most well-considered. But it
2443 * matches the only idling policy we have experience with in the status
2444 * quo. Over time we should investigate more principled approaches.
2445 */
2446 if (opt_narenas > ncpus * 2) {
2447 arena_t *arena = arena_choose(tsd, NULL);
2448 if (arena != NULL) {
2449 arena_decay(tsd_tsdn(tsd), arena, false, true);
2450 }
2451 /*
2452 * The missing arena case is not actually an error; a thread
2453 * might be idle before it associates itself to one. This is
2454 * unusual, but not wrong.
2455 */
2456 }
2457
2458 ret = 0;
2459label_return:
2460 return ret;
2461}
2462
2463/******************************************************************************/
2464
2465static int
2466tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2467 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2468 int ret;
2469 unsigned tcache_ind;
2470
2471 READONLY();
2472 VERIFY_READ(unsigned);
2473 if (tcaches_create(tsd, b0get(), &tcache_ind)) {
2474 ret = EFAULT;
2475 goto label_return;
2476 }
2477 READ(tcache_ind, unsigned);
2478
2479 ret = 0;
2480label_return:
2481 return ret;
2482}
2483
2484static int
2485tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2486 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2487 int ret;
2488 unsigned tcache_ind;
2489
2490 WRITEONLY();
2491 ASSURED_WRITE(tcache_ind, unsigned);
2492 tcaches_flush(tsd, tcache_ind);
2493
2494 ret = 0;
2495label_return:
2496 return ret;
2497}
2498
2499static int
2500tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2501 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2502 int ret;
2503 unsigned tcache_ind;
2504
2505 WRITEONLY();
2506 ASSURED_WRITE(tcache_ind, unsigned);
2507 tcaches_destroy(tsd, tcache_ind);
2508
2509 ret = 0;
2510label_return:
2511 return ret;
2512}
2513
2514/******************************************************************************/
2515
2516static int
2517arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2518 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2519 int ret;
2520 tsdn_t *tsdn = tsd_tsdn(tsd);
2521 unsigned arena_ind;
2522 bool initialized;
2523
2524 READONLY();
2525 MIB_UNSIGNED(arena_ind, 1);
2526
2527 malloc_mutex_lock(tsdn, &ctl_mtx);
2528 initialized = arenas_i(arena_ind)->initialized;
2529 malloc_mutex_unlock(tsdn, &ctl_mtx);
2530
2531 READ(initialized, bool);
2532
2533 ret = 0;
2534label_return:
2535 return ret;
2536}
2537
2538static void
2539arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
2540 malloc_mutex_lock(tsdn, &ctl_mtx);
2541 {
2542 unsigned narenas = ctl_arenas->narenas;
2543
2544 /*
2545 * Access via index narenas is deprecated, and scheduled for
2546 * removal in 6.0.0.
2547 */
2548 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
2549 unsigned i;
2550 VARIABLE_ARRAY(arena_t *, tarenas, narenas);
2551
2552 for (i = 0; i < narenas; i++) {
2553 tarenas[i] = arena_get(tsdn, i, false);
2554 }
2555
2556 /*
2557 * No further need to hold ctl_mtx, since narenas and
2558 * tarenas contain everything needed below.
2559 */
2560 malloc_mutex_unlock(tsdn, &ctl_mtx);
2561
2562 for (i = 0; i < narenas; i++) {
2563 if (tarenas[i] != NULL) {
2564 arena_decay(tsdn, tarenas[i], false,
2565 all);
2566 }
2567 }
2568 } else {
2569 arena_t *tarena;
2570
2571 assert(arena_ind < narenas);
2572
2573 tarena = arena_get(tsdn, arena_ind, false);
2574
2575 /* No further need to hold ctl_mtx. */
2576 malloc_mutex_unlock(tsdn, &ctl_mtx);
2577
2578 if (tarena != NULL) {
2579 arena_decay(tsdn, tarena, false, all);
2580 }
2581 }
2582 }
2583}
2584
2585static int
2586arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2587 size_t *oldlenp, void *newp, size_t newlen) {
2588 int ret;
2589 unsigned arena_ind;
2590
2591 NEITHER_READ_NOR_WRITE();
2592 MIB_UNSIGNED(arena_ind, 1);
2593 arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
2594
2595 ret = 0;
2596label_return:
2597 return ret;
2598}
2599
2600static int
2601arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2602 size_t *oldlenp, void *newp, size_t newlen) {
2603 int ret;
2604 unsigned arena_ind;
2605
2606 NEITHER_READ_NOR_WRITE();
2607 MIB_UNSIGNED(arena_ind, 1);
2608 arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
2609
2610 ret = 0;
2611label_return:
2612 return ret;
2613}
2614
2615static int
2616arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
2617 void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
2618 arena_t **arena) {
2619 int ret;
2620
2621 NEITHER_READ_NOR_WRITE();
2622 MIB_UNSIGNED(*arena_ind, 1);
2623
2624 *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
2625 if (*arena == NULL || arena_is_auto(*arena)) {
2626 ret = EFAULT;
2627 goto label_return;
2628 }
2629
2630 ret = 0;
2631label_return:
2632 return ret;
2633}
2634
2635static void
2636arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
2637 /* Temporarily disable the background thread during arena reset. */
2638 if (have_background_thread) {
2639 malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2640 if (background_thread_enabled()) {
2641 background_thread_info_t *info =
2642 background_thread_info_get(arena_ind);
2643 assert(info->state == background_thread_started);
2644 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2645 info->state = background_thread_paused;
2646 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2647 }
2648 }
2649}
2650
2651static void
2652arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
2653 if (have_background_thread) {
2654 if (background_thread_enabled()) {
2655 background_thread_info_t *info =
2656 background_thread_info_get(arena_ind);
2657 assert(info->state == background_thread_paused);
2658 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2659 info->state = background_thread_started;
2660 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2661 }
2662 malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2663 }
2664}
2665
2666static int
2667arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2668 size_t *oldlenp, void *newp, size_t newlen) {
2669 int ret;
2670 unsigned arena_ind;
2671 arena_t *arena;
2672
2673 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2674 newp, newlen, &arena_ind, &arena);
2675 if (ret != 0) {
2676 return ret;
2677 }
2678
2679 arena_reset_prepare_background_thread(tsd, arena_ind);
2680 arena_reset(tsd, arena);
2681 arena_reset_finish_background_thread(tsd, arena_ind);
2682
2683 return ret;
2684}
2685
2686static int
2687arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2688 size_t *oldlenp, void *newp, size_t newlen) {
2689 int ret;
2690 unsigned arena_ind;
2691 arena_t *arena;
2692 ctl_arena_t *ctl_darena, *ctl_arena;
2693
2694 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2695
2696 ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2697 newp, newlen, &arena_ind, &arena);
2698 if (ret != 0) {
2699 goto label_return;
2700 }
2701
2702 if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
2703 true) != 0) {
2704 ret = EFAULT;
2705 goto label_return;
2706 }
2707
2708 arena_reset_prepare_background_thread(tsd, arena_ind);
2709 /* Merge stats after resetting and purging arena. */
2710 arena_reset(tsd, arena);
2711 arena_decay(tsd_tsdn(tsd), arena, false, true);
2712 ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
2713 ctl_darena->initialized = true;
2714 ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
2715 /* Destroy arena. */
2716 arena_destroy(tsd, arena);
2717 ctl_arena = arenas_i(arena_ind);
2718 ctl_arena->initialized = false;
2719 /* Record arena index for later recycling via arenas.create. */
2720 ql_elm_new(ctl_arena, destroyed_link);
2721 ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
2722 arena_reset_finish_background_thread(tsd, arena_ind);
2723
2724 assert(ret == 0);
2725label_return:
2726 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2727
2728 return ret;
2729}
2730
2731static int
2732arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2733 size_t *oldlenp, void *newp, size_t newlen) {
2734 int ret;
2735 const char *dss = NULL;
2736 unsigned arena_ind;
2737 dss_prec_t dss_prec_old = dss_prec_limit;
2738 dss_prec_t dss_prec = dss_prec_limit;
2739
2740 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2741 WRITE(dss, const char *);
2742 MIB_UNSIGNED(arena_ind, 1);
2743 if (dss != NULL) {
2744 int i;
2745 bool match = false;
2746
2747 for (i = 0; i < dss_prec_limit; i++) {
2748 if (strcmp(dss_prec_names[i], dss) == 0) {
2749 dss_prec = i;
2750 match = true;
2751 break;
2752 }
2753 }
2754
2755 if (!match) {
2756 ret = EINVAL;
2757 goto label_return;
2758 }
2759 }
2760
2761 /*
2762 * Access via index narenas is deprecated, and scheduled for removal in
2763 * 6.0.0.
2764 */
2765 if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
2766 ctl_arenas->narenas) {
2767 if (dss_prec != dss_prec_limit &&
2768 extent_dss_prec_set(dss_prec)) {
2769 ret = EFAULT;
2770 goto label_return;
2771 }
2772 dss_prec_old = extent_dss_prec_get();
2773 } else {
2774 arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2775 if (arena == NULL || (dss_prec != dss_prec_limit &&
2776 arena_dss_prec_set(arena, dss_prec))) {
2777 ret = EFAULT;
2778 goto label_return;
2779 }
2780 dss_prec_old = arena_dss_prec_get(arena);
2781 }
2782
2783 dss = dss_prec_names[dss_prec_old];
2784 READ(dss, const char *);
2785
2786 ret = 0;
2787label_return:
2788 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2789 return ret;
2790}
2791
2792static int
2793arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2794 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2795 int ret;
2796
2797 unsigned arena_ind;
2798 MIB_UNSIGNED(arena_ind, 1);
2799
2800 arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2801 if (arena == NULL) {
2802 ret = EFAULT;
2803 goto label_return;
2804 }
2805
2806 if (oldp != NULL && oldlenp != NULL) {
2807 size_t oldval = atomic_load_zu(
2808 &arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
2809 READ(oldval, size_t);
2810 }
2811 if (newp != NULL) {
2812 if (newlen != sizeof(size_t)) {
2813 ret = EINVAL;
2814 goto label_return;
2815 }
2816 atomic_store_zu(&arena->pa_shard.pac.oversize_threshold,
2817 *(size_t *)newp, ATOMIC_RELAXED);
2818 }
2819 ret = 0;
2820label_return:
2821 return ret;
2822}
2823
2824static int
2825arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2826 void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2827 int ret;
2828 unsigned arena_ind;
2829 arena_t *arena;
2830
2831 MIB_UNSIGNED(arena_ind, 1);
2832 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2833 if (arena == NULL) {
2834 ret = EFAULT;
2835 goto label_return;
2836 }
2837 extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy;
2838
2839 if (oldp != NULL && oldlenp != NULL) {
2840 size_t oldval = arena_decay_ms_get(arena, state);
2841 READ(oldval, ssize_t);
2842 }
2843 if (newp != NULL) {
2844 if (newlen != sizeof(ssize_t)) {
2845 ret = EINVAL;
2846 goto label_return;
2847 }
2848 if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) {
2849 /*
2850 * By default the huge arena purges eagerly. If it is
2851 * set to non-zero decay time afterwards, background
2852 * thread might be needed.
2853 */
2854 if (background_thread_create(tsd, arena_ind)) {
2855 ret = EFAULT;
2856 goto label_return;
2857 }
2858 }
2859
2860 if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state,
2861 *(ssize_t *)newp)) {
2862 ret = EFAULT;
2863 goto label_return;
2864 }
2865 }
2866
2867 ret = 0;
2868label_return:
2869 return ret;
2870}
2871
2872static int
2873arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2874 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2875 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2876 newlen, true);
2877}
2878
2879static int
2880arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2881 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2882 return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2883 newlen, false);
2884}
2885
2886static int
2887arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2888 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2889 int ret;
2890 unsigned arena_ind;
2891 arena_t *arena;
2892
2893 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2894 MIB_UNSIGNED(arena_ind, 1);
2895 if (arena_ind < narenas_total_get()) {
2896 extent_hooks_t *old_extent_hooks;
2897 arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2898 if (arena == NULL) {
2899 if (arena_ind >= narenas_auto) {
2900 ret = EFAULT;
2901 goto label_return;
2902 }
2903 old_extent_hooks =
2904 (extent_hooks_t *)&ehooks_default_extent_hooks;
2905 READ(old_extent_hooks, extent_hooks_t *);
2906 if (newp != NULL) {
2907 /* Initialize a new arena as a side effect. */
2908 extent_hooks_t *new_extent_hooks
2909 JEMALLOC_CC_SILENCE_INIT(NULL);
2910 WRITE(new_extent_hooks, extent_hooks_t *);
2911 arena_config_t config = arena_config_default;
2912 config.extent_hooks = new_extent_hooks;
2913
2914 arena = arena_init(tsd_tsdn(tsd), arena_ind,
2915 &config);
2916 if (arena == NULL) {
2917 ret = EFAULT;
2918 goto label_return;
2919 }
2920 }
2921 } else {
2922 if (newp != NULL) {
2923 extent_hooks_t *new_extent_hooks
2924 JEMALLOC_CC_SILENCE_INIT(NULL);
2925 WRITE(new_extent_hooks, extent_hooks_t *);
2926 old_extent_hooks = arena_set_extent_hooks(tsd,
2927 arena, new_extent_hooks);
2928 READ(old_extent_hooks, extent_hooks_t *);
2929 } else {
2930 old_extent_hooks =
2931 ehooks_get_extent_hooks_ptr(
2932 arena_get_ehooks(arena));
2933 READ(old_extent_hooks, extent_hooks_t *);
2934 }
2935 }
2936 } else {
2937 ret = EFAULT;
2938 goto label_return;
2939 }
2940 ret = 0;
2941label_return:
2942 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2943 return ret;
2944}
2945
2946static int
2947arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
2948 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2949 size_t newlen) {
2950 int ret;
2951 unsigned arena_ind;
2952 arena_t *arena;
2953
2954 if (!opt_retain) {
2955 /* Only relevant when retain is enabled. */
2956 return ENOENT;
2957 }
2958
2959 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2960 MIB_UNSIGNED(arena_ind, 1);
2961 if (arena_ind < narenas_total_get() && (arena =
2962 arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
2963 size_t old_limit, new_limit;
2964 if (newp != NULL) {
2965 WRITE(new_limit, size_t);
2966 }
2967 bool err = arena_retain_grow_limit_get_set(tsd, arena,
2968 &old_limit, newp != NULL ? &new_limit : NULL);
2969 if (!err) {
2970 READ(old_limit, size_t);
2971 ret = 0;
2972 } else {
2973 ret = EFAULT;
2974 }
2975 } else {
2976 ret = EFAULT;
2977 }
2978label_return:
2979 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2980 return ret;
2981}
2982
2983static const ctl_named_node_t *
2984arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2985 size_t i) {
2986 const ctl_named_node_t *ret;
2987
2988 malloc_mutex_lock(tsdn, &ctl_mtx);
2989 switch (i) {
2990 case MALLCTL_ARENAS_ALL:
2991 case MALLCTL_ARENAS_DESTROYED:
2992 break;
2993 default:
2994 if (i > ctl_arenas->narenas) {
2995 ret = NULL;
2996 goto label_return;
2997 }
2998 break;
2999 }
3000
3001 ret = super_arena_i_node;
3002label_return:
3003 malloc_mutex_unlock(tsdn, &ctl_mtx);
3004 return ret;
3005}
3006
3007/******************************************************************************/
3008
3009static int
3010arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3011 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3012 int ret;
3013 unsigned narenas;
3014
3015 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3016 READONLY();
3017 narenas = ctl_arenas->narenas;
3018 READ(narenas, unsigned);
3019
3020 ret = 0;
3021label_return:
3022 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3023 return ret;
3024}
3025
3026static int
3027arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
3028 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
3029 size_t newlen, bool dirty) {
3030 int ret;
3031
3032 if (oldp != NULL && oldlenp != NULL) {
3033 size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
3034 arena_muzzy_decay_ms_default_get());
3035 READ(oldval, ssize_t);
3036 }
3037 if (newp != NULL) {
3038 if (newlen != sizeof(ssize_t)) {
3039 ret = EINVAL;
3040 goto label_return;
3041 }
3042 if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
3043 : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
3044 ret = EFAULT;
3045 goto label_return;
3046 }
3047 }
3048
3049 ret = 0;
3050label_return:
3051 return ret;
3052}
3053
3054static int
3055arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3056 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3057 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
3058 newlen, true);
3059}
3060
3061static int
3062arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3063 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3064 return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
3065 newlen, false);
3066}
3067
3068CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
3069CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
3070CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
3071CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
3072CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
3073CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
3074CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
3075CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
3076CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t)
3077static const ctl_named_node_t *
3078arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
3079 size_t miblen, size_t i) {
3080 if (i > SC_NBINS) {
3081 return NULL;
3082 }
3083 return super_arenas_bin_i_node;
3084}
3085
3086CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
3087CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
3088 size_t)
3089static const ctl_named_node_t *
3090arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
3091 size_t miblen, size_t i) {
3092 if (i > SC_NSIZES - SC_NBINS) {
3093 return NULL;
3094 }
3095 return super_arenas_lextent_i_node;
3096}
3097
3098static int
3099arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3100 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3101 int ret;
3102 unsigned arena_ind;
3103
3104 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3105
3106 VERIFY_READ(unsigned);
3107 arena_config_t config = arena_config_default;
3108 WRITE(config.extent_hooks, extent_hooks_t *);
3109 if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
3110 ret = EAGAIN;
3111 goto label_return;
3112 }
3113 READ(arena_ind, unsigned);
3114
3115 ret = 0;
3116label_return:
3117 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3118 return ret;
3119}
3120
3121static int
3122experimental_arenas_create_ext_ctl(tsd_t *tsd,
3123 const size_t *mib, size_t miblen,
3124 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3125 int ret;
3126 unsigned arena_ind;
3127
3128 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3129
3130 arena_config_t config = arena_config_default;
3131 VERIFY_READ(unsigned);
3132 WRITE(config, arena_config_t);
3133
3134 if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
3135 ret = EAGAIN;
3136 goto label_return;
3137 }
3138 READ(arena_ind, unsigned);
3139 ret = 0;
3140label_return:
3141 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3142 return ret;
3143}
3144
3145static int
3146arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
3147 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
3148 size_t newlen) {
3149 int ret;
3150 unsigned arena_ind;
3151 void *ptr;
3152 edata_t *edata;
3153 arena_t *arena;
3154
3155 ptr = NULL;
3156 ret = EINVAL;
3157 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3158 WRITE(ptr, void *);
3159 edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr);
3160 if (edata == NULL) {
3161 goto label_return;
3162 }
3163
3164 arena = arena_get_from_edata(edata);
3165 if (arena == NULL) {
3166 goto label_return;
3167 }
3168
3169 arena_ind = arena_ind_get(arena);
3170 READ(arena_ind, unsigned);
3171
3172 ret = 0;
3173label_return:
3174 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3175 return ret;
3176}
3177
3178/******************************************************************************/
3179
3180static int
3181prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
3182 size_t miblen, void *oldp, size_t *oldlenp, void *newp,
3183 size_t newlen) {
3184 int ret;
3185 bool oldval;
3186
3187 if (!config_prof) {
3188 return ENOENT;
3189 }
3190
3191 if (newp != NULL) {
3192 if (!opt_prof) {
3193 ret = ENOENT;
3194 goto label_return;
3195 }
3196 if (newlen != sizeof(bool)) {
3197 ret = EINVAL;
3198 goto label_return;
3199 }
3200 oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
3201 *(bool *)newp);
3202 } else {
3203 oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) :
3204 false;
3205 }
3206 READ(oldval, bool);
3207
3208 ret = 0;
3209label_return:
3210 return ret;
3211}
3212
3213static int
3214prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3215 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3216 int ret;
3217 bool oldval;
3218
3219 if (!config_prof) {
3220 ret = ENOENT;
3221 goto label_return;
3222 }
3223
3224 if (newp != NULL) {
3225 if (newlen != sizeof(bool)) {
3226 ret = EINVAL;
3227 goto label_return;
3228 }
3229 bool val = *(bool *)newp;
3230 if (!opt_prof) {
3231 if (val) {
3232 ret = ENOENT;
3233 goto label_return;
3234 } else {
3235 /* No change needed (already off). */
3236 oldval = false;
3237 }
3238 } else {
3239 oldval = prof_active_set(tsd_tsdn(tsd), val);
3240 }
3241 } else {
3242 oldval = opt_prof ? prof_active_get(tsd_tsdn(tsd)) : false;
3243 }
3244 READ(oldval, bool);
3245
3246 ret = 0;
3247label_return:
3248 return ret;
3249}
3250
3251static int
3252prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3253 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3254 int ret;
3255 const char *filename = NULL;
3256
3257 if (!config_prof || !opt_prof) {
3258 return ENOENT;
3259 }
3260
3261 WRITEONLY();
3262 WRITE(filename, const char *);
3263
3264 if (prof_mdump(tsd, filename)) {
3265 ret = EFAULT;
3266 goto label_return;
3267 }
3268
3269 ret = 0;
3270label_return:
3271 return ret;
3272}
3273
3274static int
3275prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3276 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3277 int ret;
3278 bool oldval;
3279
3280 if (!config_prof) {
3281 return ENOENT;
3282 }
3283
3284 if (newp != NULL) {
3285 if (!opt_prof) {
3286 ret = ENOENT;
3287 goto label_return;
3288 }
3289 if (newlen != sizeof(bool)) {
3290 ret = EINVAL;
3291 goto label_return;
3292 }
3293 oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
3294 } else {
3295 oldval = opt_prof ? prof_gdump_get(tsd_tsdn(tsd)) : false;
3296 }
3297 READ(oldval, bool);
3298
3299 ret = 0;
3300label_return:
3301 return ret;
3302}
3303
3304static int
3305prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3306 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3307 int ret;
3308 const char *prefix = NULL;
3309
3310 if (!config_prof || !opt_prof) {
3311 return ENOENT;
3312 }
3313
3314 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3315 WRITEONLY();
3316 WRITE(prefix, const char *);
3317
3318 ret = prof_prefix_set(tsd_tsdn(tsd), prefix) ? EFAULT : 0;
3319label_return:
3320 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3321 return ret;
3322}
3323
3324static int
3325prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3326 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3327 int ret;
3328 size_t lg_sample = lg_prof_sample;
3329
3330 if (!config_prof || !opt_prof) {
3331 return ENOENT;
3332 }
3333
3334 WRITEONLY();
3335 WRITE(lg_sample, size_t);
3336 if (lg_sample >= (sizeof(uint64_t) << 3)) {
3337 lg_sample = (sizeof(uint64_t) << 3) - 1;
3338 }
3339
3340 prof_reset(tsd, lg_sample);
3341
3342 ret = 0;
3343label_return:
3344 return ret;
3345}
3346
3347CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
3348CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
3349
3350static int
3351prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
3352 size_t *oldlenp, void *newp, size_t newlen) {
3353 int ret;
3354
3355 const char *filename = NULL;
3356
3357 if (!config_prof || !opt_prof) {
3358 return ENOENT;
3359 }
3360
3361 WRITEONLY();
3362 WRITE(filename, const char *);
3363
3364 if (prof_log_start(tsd_tsdn(tsd), filename)) {
3365 ret = EFAULT;
3366 goto label_return;
3367 }
3368
3369 ret = 0;
3370label_return:
3371 return ret;
3372}
3373
3374static int
3375prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
3376 size_t *oldlenp, void *newp, size_t newlen) {
3377 if (!config_prof || !opt_prof) {
3378 return ENOENT;
3379 }
3380
3381 if (prof_log_stop(tsd_tsdn(tsd))) {
3382 return EFAULT;
3383 }
3384
3385 return 0;
3386}
3387
3388static int
3389experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib,
3390 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3391 int ret;
3392
3393 if (oldp == NULL && newp == NULL) {
3394 ret = EINVAL;
3395 goto label_return;
3396 }
3397 if (oldp != NULL) {
3398 prof_backtrace_hook_t old_hook =
3399 prof_backtrace_hook_get();
3400 READ(old_hook, prof_backtrace_hook_t);
3401 }
3402 if (newp != NULL) {
3403 if (!opt_prof) {
3404 ret = ENOENT;
3405 goto label_return;
3406 }
3407 prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
3408 WRITE(new_hook, prof_backtrace_hook_t);
3409 if (new_hook == NULL) {
3410 ret = EINVAL;
3411 goto label_return;
3412 }
3413 prof_backtrace_hook_set(new_hook);
3414 }
3415 ret = 0;
3416label_return:
3417 return ret;
3418}
3419
3420static int
3421experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib,
3422 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3423 int ret;
3424
3425 if (oldp == NULL && newp == NULL) {
3426 ret = EINVAL;
3427 goto label_return;
3428 }
3429 if (oldp != NULL) {
3430 prof_dump_hook_t old_hook =
3431 prof_dump_hook_get();
3432 READ(old_hook, prof_dump_hook_t);
3433 }
3434 if (newp != NULL) {
3435 if (!opt_prof) {
3436 ret = ENOENT;
3437 goto label_return;
3438 }
3439 prof_dump_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
3440 WRITE(new_hook, prof_dump_hook_t);
3441 prof_dump_hook_set(new_hook);
3442 }
3443 ret = 0;
3444label_return:
3445 return ret;
3446}
3447
3448/* For integration test purpose only. No plan to move out of experimental. */
3449static int
3450experimental_hooks_safety_check_abort_ctl(tsd_t *tsd, const size_t *mib,
3451 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3452 int ret;
3453
3454 WRITEONLY();
3455 if (newp != NULL) {
3456 if (newlen != sizeof(safety_check_abort_hook_t)) {
3457 ret = EINVAL;
3458 goto label_return;
3459 }
3460 safety_check_abort_hook_t hook JEMALLOC_CC_SILENCE_INIT(NULL);
3461 WRITE(hook, safety_check_abort_hook_t);
3462 safety_check_set_abort(hook);
3463 }
3464 ret = 0;
3465label_return:
3466 return ret;
3467}
3468
3469/******************************************************************************/
3470
3471CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
3472CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
3473CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
3474CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
3475CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
3476CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
3477CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
3478
3479CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
3480 ctl_stats->background_thread.num_threads, size_t)
3481CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
3482 ctl_stats->background_thread.num_runs, uint64_t)
3483CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
3484 nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
3485
3486CTL_RO_CGEN(config_stats, stats_zero_reallocs,
3487 atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t)
3488
3489CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
3490CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
3491 ssize_t)
3492CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
3493 ssize_t)
3494CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
3495CTL_RO_GEN(stats_arenas_i_uptime,
3496 nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
3497CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
3498CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
3499CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
3500CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
3501 arenas_i(mib[2])->astats->astats.mapped, size_t)
3502CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
3503 arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
3504CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
3505 arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
3506
3507CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
3508 locked_read_u64_unsynchronized(
3509 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
3510 uint64_t)
3511CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
3512 locked_read_u64_unsynchronized(
3513 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
3514 uint64_t)
3515CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
3516 locked_read_u64_unsynchronized(
3517 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
3518 uint64_t)
3519
3520CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
3521 locked_read_u64_unsynchronized(
3522 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
3523 uint64_t)
3524CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
3525 locked_read_u64_unsynchronized(
3526 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
3527 uint64_t)
3528CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
3529 locked_read_u64_unsynchronized(
3530 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
3531 uint64_t)
3532
3533CTL_RO_CGEN(config_stats, stats_arenas_i_base,
3534 arenas_i(mib[2])->astats->astats.base,
3535 size_t)
3536CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
3537 atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
3538 size_t)
3539CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
3540 arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
3541CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
3542 arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
3543CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes,
3544 arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t)
3545CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
3546 arenas_i(mib[2])->astats->astats.resident,
3547 size_t)
3548CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
3549 atomic_load_zu(
3550 &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
3551 ATOMIC_RELAXED), size_t)
3552
3553CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes,
3554 arenas_i(mib[2])->astats->secstats.bytes, size_t)
3555
3556CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
3557 arenas_i(mib[2])->astats->allocated_small, size_t)
3558CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
3559 arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
3560CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
3561 arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
3562CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
3563 arenas_i(mib[2])->astats->nrequests_small, uint64_t)
3564CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
3565 arenas_i(mib[2])->astats->nfills_small, uint64_t)
3566CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
3567 arenas_i(mib[2])->astats->nflushes_small, uint64_t)
3568CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
3569 arenas_i(mib[2])->astats->astats.allocated_large, size_t)
3570CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
3571 arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
3572CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
3573 arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
3574CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
3575 arenas_i(mib[2])->astats->astats.nrequests_large, uint64_t)
3576/*
3577 * Note: "nmalloc_large" here instead of "nfills" in the read. This is
3578 * intentional (large has no batch fill).
3579 */
3580CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
3581 arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
3582CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
3583 arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t)
3584
3585/* Lock profiling related APIs below. */
3586#define RO_MUTEX_CTL_GEN(n, l) \
3587CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
3588 l.n_lock_ops, uint64_t) \
3589CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
3590 l.n_wait_times, uint64_t) \
3591CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
3592 l.n_spin_acquired, uint64_t) \
3593CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
3594 l.n_owner_switches, uint64_t) \
3595CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
3596 nstime_ns(&l.tot_wait_time), uint64_t) \
3597CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
3598 nstime_ns(&l.max_wait_time), uint64_t) \
3599CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
3600 l.max_n_thds, uint32_t)
3601
3602/* Global mutexes. */
3603#define OP(mtx) \
3604 RO_MUTEX_CTL_GEN(mutexes_##mtx, \
3605 ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
3606MUTEX_PROF_GLOBAL_MUTEXES
3607#undef OP
3608
3609/* Per arena mutexes */
3610#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
3611 arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
3612MUTEX_PROF_ARENA_MUTEXES
3613#undef OP
3614
3615/* tcache bin mutex */
3616RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
3617 arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
3618#undef RO_MUTEX_CTL_GEN
3619
3620/* Resets all mutex stats, including global, arena and bin mutexes. */
3621static int
3622stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
3623 size_t miblen, void *oldp, size_t *oldlenp,
3624 void *newp, size_t newlen) {
3625 if (!config_stats) {
3626 return ENOENT;
3627 }
3628
3629 tsdn_t *tsdn = tsd_tsdn(tsd);
3630
3631#define MUTEX_PROF_RESET(mtx) \
3632 malloc_mutex_lock(tsdn, &mtx); \
3633 malloc_mutex_prof_data_reset(tsdn, &mtx); \
3634 malloc_mutex_unlock(tsdn, &mtx);
3635
3636 /* Global mutexes: ctl and prof. */
3637 MUTEX_PROF_RESET(ctl_mtx);
3638 if (have_background_thread) {
3639 MUTEX_PROF_RESET(background_thread_lock);
3640 }
3641 if (config_prof && opt_prof) {
3642 MUTEX_PROF_RESET(bt2gctx_mtx);
3643 MUTEX_PROF_RESET(tdatas_mtx);
3644 MUTEX_PROF_RESET(prof_dump_mtx);
3645 MUTEX_PROF_RESET(prof_recent_alloc_mtx);
3646 MUTEX_PROF_RESET(prof_recent_dump_mtx);
3647 MUTEX_PROF_RESET(prof_stats_mtx);
3648 }
3649
3650 /* Per arena mutexes. */
3651 unsigned n = narenas_total_get();
3652
3653 for (unsigned i = 0; i < n; i++) {
3654 arena_t *arena = arena_get(tsdn, i, false);
3655 if (!arena) {
3656 continue;
3657 }
3658 MUTEX_PROF_RESET(arena->large_mtx);
3659 MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
3660 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
3661 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
3662 MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
3663 MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx);
3664 MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx);
3665 MUTEX_PROF_RESET(arena->tcache_ql_mtx);
3666 MUTEX_PROF_RESET(arena->base->mtx);
3667
3668 for (szind_t j = 0; j < SC_NBINS; j++) {
3669 for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
3670 bin_t *bin = arena_get_bin(arena, j, k);
3671 MUTEX_PROF_RESET(bin->lock);
3672 }
3673 }
3674 }
3675#undef MUTEX_PROF_RESET
3676 return 0;
3677}
3678
3679CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
3680 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nmalloc, uint64_t)
3681CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
3682 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.ndalloc, uint64_t)
3683CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
3684 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nrequests, uint64_t)
3685CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
3686 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curregs, size_t)
3687CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
3688 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nfills, uint64_t)
3689CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
3690 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nflushes, uint64_t)
3691CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
3692 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nslabs, uint64_t)
3693CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
3694 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.reslabs, uint64_t)
3695CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
3696 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curslabs, size_t)
3697CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
3698 arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nonfull_slabs, size_t)
3699
3700static const ctl_named_node_t *
3701stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
3702 size_t miblen, size_t j) {
3703 if (j > SC_NBINS) {
3704 return NULL;
3705 }
3706 return super_stats_arenas_i_bins_j_node;
3707}
3708
3709CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
3710 locked_read_u64_unsynchronized(
3711 &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
3712CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
3713 locked_read_u64_unsynchronized(
3714 &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
3715CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
3716 locked_read_u64_unsynchronized(
3717 &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
3718CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
3719 arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
3720
3721static const ctl_named_node_t *
3722stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
3723 size_t miblen, size_t j) {
3724 if (j > SC_NSIZES - SC_NBINS) {
3725 return NULL;
3726 }
3727 return super_stats_arenas_i_lextents_j_node;
3728}
3729
3730CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
3731 arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t);
3732CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
3733 arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t);
3734CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
3735 arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t);
3736CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
3737 arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t);
3738CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
3739 arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t);
3740CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
3741 arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t);
3742
3743static const ctl_named_node_t *
3744stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
3745 size_t miblen, size_t j) {
3746 if (j >= SC_NPSIZES) {
3747 return NULL;
3748 }
3749 return super_stats_arenas_i_extents_j_node;
3750}
3751
3752CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
3753 arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
3754CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,
3755 arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t);
3756CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies,
3757 arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t);
3758CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
3759 arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t);
3760
3761/* Full, nonhuge */
3762CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge,
3763 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs,
3764 size_t);
3765CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge,
3766 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t);
3767CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge,
3768 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t);
3769
3770/* Full, huge */
3771CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,
3772 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs,
3773 size_t);
3774CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge,
3775 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t);
3776CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge,
3777 arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t);
3778
3779/* Empty, nonhuge */
3780CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge,
3781 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs,
3782 size_t);
3783CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge,
3784 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t);
3785CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge,
3786 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t);
3787
3788/* Empty, huge */
3789CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge,
3790 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs,
3791 size_t);
3792CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge,
3793 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t);
3794CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge,
3795 arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t);
3796
3797/* Nonfull, nonhuge */
3798CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge,
3799 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs,
3800 size_t);
3801CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge,
3802 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive,
3803 size_t);
3804CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge,
3805 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty,
3806 size_t);
3807
3808/* Nonfull, huge */
3809CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge,
3810 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs,
3811 size_t);
3812CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge,
3813 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive,
3814 size_t);
3815CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge,
3816 arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty,
3817 size_t);
3818
3819static const ctl_named_node_t *
3820stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib,
3821 size_t miblen, size_t j) {
3822 if (j >= PSSET_NPSIZES) {
3823 return NULL;
3824 }
3825 return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node;
3826}
3827
3828static bool
3829ctl_arenas_i_verify(size_t i) {
3830 size_t a = arenas_i2a_impl(i, true, true);
3831 if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
3832 return true;
3833 }
3834
3835 return false;
3836}
3837
3838static const ctl_named_node_t *
3839stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
3840 size_t miblen, size_t i) {
3841 const ctl_named_node_t *ret;
3842
3843 malloc_mutex_lock(tsdn, &ctl_mtx);
3844 if (ctl_arenas_i_verify(i)) {
3845 ret = NULL;
3846 goto label_return;
3847 }
3848
3849 ret = super_stats_arenas_i_node;
3850label_return:
3851 malloc_mutex_unlock(tsdn, &ctl_mtx);
3852 return ret;
3853}
3854
3855static int
3856experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3857 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3858 int ret;
3859 if (oldp == NULL || oldlenp == NULL|| newp == NULL) {
3860 ret = EINVAL;
3861 goto label_return;
3862 }
3863 /*
3864 * Note: this is a *private* struct. This is an experimental interface;
3865 * forcing the user to know the jemalloc internals well enough to
3866 * extract the ABI hopefully ensures nobody gets too comfortable with
3867 * this API, which can change at a moment's notice.
3868 */
3869 hooks_t hooks;
3870 WRITE(hooks, hooks_t);
3871 void *handle = hook_install(tsd_tsdn(tsd), &hooks);
3872 if (handle == NULL) {
3873 ret = EAGAIN;
3874 goto label_return;
3875 }
3876 READ(handle, void *);
3877
3878 ret = 0;
3879label_return:
3880 return ret;
3881}
3882
3883static int
3884experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3885 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3886 int ret;
3887 WRITEONLY();
3888 void *handle = NULL;
3889 WRITE(handle, void *);
3890 if (handle == NULL) {
3891 ret = EINVAL;
3892 goto label_return;
3893 }
3894 hook_remove(tsd_tsdn(tsd), handle);
3895 ret = 0;
3896label_return:
3897 return ret;
3898}
3899
3900static int
3901experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
3902 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3903 int ret;
3904
3905 if (!config_stats) {
3906 return ENOENT;
3907 }
3908
3909 activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
3910 READ(t_old, activity_callback_thunk_t);
3911
3912 if (newp != NULL) {
3913 /*
3914 * This initialization is unnecessary. If it's omitted, though,
3915 * clang gets confused and warns on the subsequent use of t_new.
3916 */
3917 activity_callback_thunk_t t_new = {NULL, NULL};
3918 WRITE(t_new, activity_callback_thunk_t);
3919 tsd_activity_callback_thunk_set(tsd, t_new);
3920 }
3921 ret = 0;
3922label_return:
3923 return ret;
3924}
3925
3926/*
3927 * Output six memory utilization entries for an input pointer, the first one of
3928 * type (void *) and the remaining five of type size_t, describing the following
3929 * (in the same order):
3930 *
3931 * (a) memory address of the extent a potential reallocation would go into,
3932 * == the five fields below describe about the extent the pointer resides in ==
3933 * (b) number of free regions in the extent,
3934 * (c) number of regions in the extent,
3935 * (d) size of the extent in terms of bytes,
3936 * (e) total number of free regions in the bin the extent belongs to, and
3937 * (f) total number of regions in the bin the extent belongs to.
3938 *
3939 * Note that "(e)" and "(f)" are only available when stats are enabled;
3940 * otherwise their values are undefined.
3941 *
3942 * This API is mainly intended for small class allocations, where extents are
3943 * used as slab. Note that if the bin the extent belongs to is completely
3944 * full, "(a)" will be NULL.
3945 *
3946 * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
3947 * will be zero (if stats are enabled; otherwise undefined). The other three
3948 * fields will be properly set though the values are trivial: "(b)" will be 0,
3949 * "(c)" will be 1, and "(d)" will be the usable size.
3950 *
3951 * The input pointer and size are respectively passed in by newp and newlen,
3952 * and the output fields and size are respectively oldp and *oldlenp.
3953 *
3954 * It can be beneficial to define the following macros to make it easier to
3955 * access the output:
3956 *
3957 * #define SLABCUR_READ(out) (*(void **)out)
3958 * #define COUNTS(out) ((size_t *)((void **)out + 1))
3959 * #define NFREE_READ(out) COUNTS(out)[0]
3960 * #define NREGS_READ(out) COUNTS(out)[1]
3961 * #define SIZE_READ(out) COUNTS(out)[2]
3962 * #define BIN_NFREE_READ(out) COUNTS(out)[3]
3963 * #define BIN_NREGS_READ(out) COUNTS(out)[4]
3964 *
3965 * and then write e.g. NFREE_READ(oldp) to fetch the output. See the unit test
3966 * test_query in test/unit/extent_util.c for an example.
3967 *
3968 * For a typical defragmentation workflow making use of this API for
3969 * understanding the fragmentation level, please refer to the comment for
3970 * experimental_utilization_batch_query_ctl.
3971 *
3972 * It's up to the application how to determine the significance of
3973 * fragmentation relying on the outputs returned. Possible choices are:
3974 *
3975 * (a) if extent utilization ratio is below certain threshold,
3976 * (b) if extent memory consumption is above certain threshold,
3977 * (c) if extent utilization ratio is significantly below bin utilization ratio,
3978 * (d) if input pointer deviates a lot from potential reallocation address, or
3979 * (e) some selection/combination of the above.
3980 *
3981 * The caller needs to make sure that the input/output arguments are valid,
3982 * in particular, that the size of the output is correct, i.e.:
3983 *
3984 * *oldlenp = sizeof(void *) + sizeof(size_t) * 5
3985 *
3986 * Otherwise, the function immediately returns EINVAL without touching anything.
3987 *
3988 * In the rare case where there's no associated extent found for the input
3989 * pointer, the function zeros out all output fields and return. Please refer
3990 * to the comment for experimental_utilization_batch_query_ctl to understand the
3991 * motivation from C++.
3992 */
3993static int
3994experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
3995 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3996 int ret;
3997
3998 assert(sizeof(inspect_extent_util_stats_verbose_t)
3999 == sizeof(void *) + sizeof(size_t) * 5);
4000
4001 if (oldp == NULL || oldlenp == NULL
4002 || *oldlenp != sizeof(inspect_extent_util_stats_verbose_t)
4003 || newp == NULL) {
4004 ret = EINVAL;
4005 goto label_return;
4006 }
4007
4008 void *ptr = NULL;
4009 WRITE(ptr, void *);
4010 inspect_extent_util_stats_verbose_t *util_stats
4011 = (inspect_extent_util_stats_verbose_t *)oldp;
4012 inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
4013 &util_stats->nfree, &util_stats->nregs, &util_stats->size,
4014 &util_stats->bin_nfree, &util_stats->bin_nregs,
4015 &util_stats->slabcur_addr);
4016 ret = 0;
4017
4018label_return:
4019 return ret;
4020}
4021
4022/*
4023 * Given an input array of pointers, output three memory utilization entries of
4024 * type size_t for each input pointer about the extent it resides in:
4025 *
4026 * (a) number of free regions in the extent,
4027 * (b) number of regions in the extent, and
4028 * (c) size of the extent in terms of bytes.
4029 *
4030 * This API is mainly intended for small class allocations, where extents are
4031 * used as slab. In case of large class allocations, the outputs are trivial:
4032 * "(a)" will be 0, "(b)" will be 1, and "(c)" will be the usable size.
4033 *
4034 * Note that multiple input pointers may reside on a same extent so the output
4035 * fields may contain duplicates.
4036 *
4037 * The format of the input/output looks like:
4038 *
4039 * input[0]: 1st_pointer_to_query | output[0]: 1st_extent_n_free_regions
4040 * | output[1]: 1st_extent_n_regions
4041 * | output[2]: 1st_extent_size
4042 * input[1]: 2nd_pointer_to_query | output[3]: 2nd_extent_n_free_regions
4043 * | output[4]: 2nd_extent_n_regions
4044 * | output[5]: 2nd_extent_size
4045 * ... | ...
4046 *
4047 * The input array and size are respectively passed in by newp and newlen, and
4048 * the output array and size are respectively oldp and *oldlenp.
4049 *
4050 * It can be beneficial to define the following macros to make it easier to
4051 * access the output:
4052 *
4053 * #define NFREE_READ(out, i) out[(i) * 3]
4054 * #define NREGS_READ(out, i) out[(i) * 3 + 1]
4055 * #define SIZE_READ(out, i) out[(i) * 3 + 2]
4056 *
4057 * and then write e.g. NFREE_READ(oldp, i) to fetch the output. See the unit
4058 * test test_batch in test/unit/extent_util.c for a concrete example.
4059 *
4060 * A typical workflow would be composed of the following steps:
4061 *
4062 * (1) flush tcache: mallctl("thread.tcache.flush", ...)
4063 * (2) initialize input array of pointers to query fragmentation
4064 * (3) allocate output array to hold utilization statistics
4065 * (4) query utilization: mallctl("experimental.utilization.batch_query", ...)
4066 * (5) (optional) decide if it's worthwhile to defragment; otherwise stop here
4067 * (6) disable tcache: mallctl("thread.tcache.enabled", ...)
4068 * (7) defragment allocations with significant fragmentation, e.g.:
4069 * for each allocation {
4070 * if it's fragmented {
4071 * malloc(...);
4072 * memcpy(...);
4073 * free(...);
4074 * }
4075 * }
4076 * (8) enable tcache: mallctl("thread.tcache.enabled", ...)
4077 *
4078 * The application can determine the significance of fragmentation themselves
4079 * relying on the statistics returned, both at the overall level i.e. step "(5)"
4080 * and at individual allocation level i.e. within step "(7)". Possible choices
4081 * are:
4082 *
4083 * (a) whether memory utilization ratio is below certain threshold,
4084 * (b) whether memory consumption is above certain threshold, or
4085 * (c) some combination of the two.
4086 *
4087 * The caller needs to make sure that the input/output arrays are valid and
4088 * their sizes are proper as well as matched, meaning:
4089 *
4090 * (a) newlen = n_pointers * sizeof(const void *)
4091 * (b) *oldlenp = n_pointers * sizeof(size_t) * 3
4092 * (c) n_pointers > 0
4093 *
4094 * Otherwise, the function immediately returns EINVAL without touching anything.
4095 *
4096 * In the rare case where there's no associated extent found for some pointers,
4097 * rather than immediately terminating the computation and raising an error,
4098 * the function simply zeros out the corresponding output fields and continues
4099 * the computation until all input pointers are handled. The motivations of
4100 * such a design are as follows:
4101 *
4102 * (a) The function always either processes nothing or processes everything, and
4103 * never leaves the output half touched and half untouched.
4104 *
4105 * (b) It facilitates usage needs especially common in C++. A vast variety of
4106 * C++ objects are instantiated with multiple dynamic memory allocations. For
4107 * example, std::string and std::vector typically use at least two allocations,
4108 * one for the metadata and one for the actual content. Other types may use
4109 * even more allocations. When inquiring about utilization statistics, the
4110 * caller often wants to examine into all such allocations, especially internal
4111 * one(s), rather than just the topmost one. The issue comes when some
4112 * implementations do certain optimizations to reduce/aggregate some internal
4113 * allocations, e.g. putting short strings directly into the metadata, and such
4114 * decisions are not known to the caller. Therefore, we permit pointers to
4115 * memory usages that may not be returned by previous malloc calls, and we
4116 * provide the caller a convenient way to identify such cases.
4117 */
4118static int
4119experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
4120 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4121 int ret;
4122
4123 assert(sizeof(inspect_extent_util_stats_t) == sizeof(size_t) * 3);
4124
4125 const size_t len = newlen / sizeof(const void *);
4126 if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
4127 || newlen != len * sizeof(const void *)
4128 || *oldlenp != len * sizeof(inspect_extent_util_stats_t)) {
4129 ret = EINVAL;
4130 goto label_return;
4131 }
4132
4133 void **ptrs = (void **)newp;
4134 inspect_extent_util_stats_t *util_stats =
4135 (inspect_extent_util_stats_t *)oldp;
4136 size_t i;
4137 for (i = 0; i < len; ++i) {
4138 inspect_extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
4139 &util_stats[i].nfree, &util_stats[i].nregs,
4140 &util_stats[i].size);
4141 }
4142 ret = 0;
4143
4144label_return:
4145 return ret;
4146}
4147
4148static const ctl_named_node_t *
4149experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
4150 size_t miblen, size_t i) {
4151 const ctl_named_node_t *ret;
4152
4153 malloc_mutex_lock(tsdn, &ctl_mtx);
4154 if (ctl_arenas_i_verify(i)) {
4155 ret = NULL;
4156 goto label_return;
4157 }
4158 ret = super_experimental_arenas_i_node;
4159label_return:
4160 malloc_mutex_unlock(tsdn, &ctl_mtx);
4161 return ret;
4162}
4163
4164static int
4165experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
4166 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4167 if (!config_stats) {
4168 return ENOENT;
4169 }
4170 if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(size_t *)) {
4171 return EINVAL;
4172 }
4173
4174 unsigned arena_ind;
4175 arena_t *arena;
4176 int ret;
4177 size_t *pactivep;
4178
4179 malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
4180 READONLY();
4181 MIB_UNSIGNED(arena_ind, 2);
4182 if (arena_ind < narenas_total_get() && (arena =
4183 arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
4184#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
4185 defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
4186 /* Expose the underlying counter for fast read. */
4187 pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
4188 READ(pactivep, size_t *);
4189 ret = 0;
4190#else
4191 ret = EFAULT;
4192#endif
4193 } else {
4194 ret = EFAULT;
4195 }
4196label_return:
4197 malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
4198 return ret;
4199}
4200
4201static int
4202experimental_prof_recent_alloc_max_ctl(tsd_t *tsd, const size_t *mib,
4203 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4204 int ret;
4205
4206 if (!(config_prof && opt_prof)) {
4207 ret = ENOENT;
4208 goto label_return;
4209 }
4210
4211 ssize_t old_max;
4212 if (newp != NULL) {
4213 if (newlen != sizeof(ssize_t)) {
4214 ret = EINVAL;
4215 goto label_return;
4216 }
4217 ssize_t max = *(ssize_t *)newp;
4218 if (max < -1) {
4219 ret = EINVAL;
4220 goto label_return;
4221 }
4222 old_max = prof_recent_alloc_max_ctl_write(tsd, max);
4223 } else {
4224 old_max = prof_recent_alloc_max_ctl_read();
4225 }
4226 READ(old_max, ssize_t);
4227
4228 ret = 0;
4229
4230label_return:
4231 return ret;
4232}
4233
4234typedef struct write_cb_packet_s write_cb_packet_t;
4235struct write_cb_packet_s {
4236 write_cb_t *write_cb;
4237 void *cbopaque;
4238};
4239
4240static int
4241experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib,
4242 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4243 int ret;
4244
4245 if (!(config_prof && opt_prof)) {
4246 ret = ENOENT;
4247 goto label_return;
4248 }
4249
4250 assert(sizeof(write_cb_packet_t) == sizeof(void *) * 2);
4251
4252 WRITEONLY();
4253 write_cb_packet_t write_cb_packet;
4254 ASSURED_WRITE(write_cb_packet, write_cb_packet_t);
4255
4256 prof_recent_alloc_dump(tsd, write_cb_packet.write_cb,
4257 write_cb_packet.cbopaque);
4258
4259 ret = 0;
4260
4261label_return:
4262 return ret;
4263}
4264
4265typedef struct batch_alloc_packet_s batch_alloc_packet_t;
4266struct batch_alloc_packet_s {
4267 void **ptrs;
4268 size_t num;
4269 size_t size;
4270 int flags;
4271};
4272
4273static int
4274experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib,
4275 size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4276 int ret;
4277
4278 VERIFY_READ(size_t);
4279
4280 batch_alloc_packet_t batch_alloc_packet;
4281 ASSURED_WRITE(batch_alloc_packet, batch_alloc_packet_t);
4282 size_t filled = batch_alloc(batch_alloc_packet.ptrs,
4283 batch_alloc_packet.num, batch_alloc_packet.size,
4284 batch_alloc_packet.flags);
4285 READ(filled, size_t);
4286
4287 ret = 0;
4288
4289label_return:
4290 return ret;
4291}
4292
4293static int
4294prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
4295 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4296 int ret;
4297 unsigned binind;
4298 prof_stats_t stats;
4299
4300 if (!(config_prof && opt_prof && opt_prof_stats)) {
4301 ret = ENOENT;
4302 goto label_return;
4303 }
4304
4305 READONLY();
4306 MIB_UNSIGNED(binind, 3);
4307 if (binind >= SC_NBINS) {
4308 ret = EINVAL;
4309 goto label_return;
4310 }
4311 prof_stats_get_live(tsd, (szind_t)binind, &stats);
4312 READ(stats, prof_stats_t);
4313
4314 ret = 0;
4315label_return:
4316 return ret;
4317}
4318
4319static int
4320prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
4321 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4322 int ret;
4323 unsigned binind;
4324 prof_stats_t stats;
4325
4326 if (!(config_prof && opt_prof && opt_prof_stats)) {
4327 ret = ENOENT;
4328 goto label_return;
4329 }
4330
4331 READONLY();
4332 MIB_UNSIGNED(binind, 3);
4333 if (binind >= SC_NBINS) {
4334 ret = EINVAL;
4335 goto label_return;
4336 }
4337 prof_stats_get_accum(tsd, (szind_t)binind, &stats);
4338 READ(stats, prof_stats_t);
4339
4340 ret = 0;
4341label_return:
4342 return ret;
4343}
4344
4345static const ctl_named_node_t *
4346prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
4347 size_t i) {
4348 if (!(config_prof && opt_prof && opt_prof_stats)) {
4349 return NULL;
4350 }
4351 if (i >= SC_NBINS) {
4352 return NULL;
4353 }
4354 return super_prof_stats_bins_i_node;
4355}
4356
4357static int
4358prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
4359 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4360 int ret;
4361 unsigned lextent_ind;
4362 prof_stats_t stats;
4363
4364 if (!(config_prof && opt_prof && opt_prof_stats)) {
4365 ret = ENOENT;
4366 goto label_return;
4367 }
4368
4369 READONLY();
4370 MIB_UNSIGNED(lextent_ind, 3);
4371 if (lextent_ind >= SC_NSIZES - SC_NBINS) {
4372 ret = EINVAL;
4373 goto label_return;
4374 }
4375 prof_stats_get_live(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
4376 READ(stats, prof_stats_t);
4377
4378 ret = 0;
4379label_return:
4380 return ret;
4381}
4382
4383static int
4384prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
4385 void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
4386 int ret;
4387 unsigned lextent_ind;
4388 prof_stats_t stats;
4389
4390 if (!(config_prof && opt_prof && opt_prof_stats)) {
4391 ret = ENOENT;
4392 goto label_return;
4393 }
4394
4395 READONLY();
4396 MIB_UNSIGNED(lextent_ind, 3);
4397 if (lextent_ind >= SC_NSIZES - SC_NBINS) {
4398 ret = EINVAL;
4399 goto label_return;
4400 }
4401 prof_stats_get_accum(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
4402 READ(stats, prof_stats_t);
4403
4404 ret = 0;
4405label_return:
4406 return ret;
4407}
4408
4409static const ctl_named_node_t *
4410prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
4411 size_t i) {
4412 if (!(config_prof && opt_prof && opt_prof_stats)) {
4413 return NULL;
4414 }
4415 if (i >= SC_NSIZES - SC_NBINS) {
4416 return NULL;
4417 }
4418 return super_prof_stats_lextents_i_node;
4419}
4420