1#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
2#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
3
4#include "jemalloc/internal/bin.h"
5#include "jemalloc/internal/div.h"
6#include "jemalloc/internal/extent_dss.h"
7#include "jemalloc/internal/hook.h"
8#include "jemalloc/internal/pages.h"
9#include "jemalloc/internal/stats.h"
10
11/*
12 * When the amount of pages to be purged exceeds this amount, deferred purge
13 * should happen.
14 */
15#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
16
17extern ssize_t opt_dirty_decay_ms;
18extern ssize_t opt_muzzy_decay_ms;
19
20extern percpu_arena_mode_t opt_percpu_arena;
21extern const char *percpu_arena_mode_names[];
22
23extern div_info_t arena_binind_div_info[SC_NBINS];
24
25extern malloc_mutex_t arenas_lock;
26extern emap_t arena_emap_global;
27
28extern size_t opt_oversize_threshold;
29extern size_t oversize_threshold;
30
31/*
32 * arena_bin_offsets[binind] is the offset of the first bin shard for size class
33 * binind.
34 */
35extern uint32_t arena_bin_offsets[SC_NBINS];
36
37void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
38 unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
39 ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
40void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
41 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
42 size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
43 bin_stats_data_t *bstats, arena_stats_large_t *lstats,
44 pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
45void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
46edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
47 size_t usize, size_t alignment, bool zero);
48void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
49 edata_t *edata);
50void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
51 edata_t *edata, size_t oldsize);
52void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
53 edata_t *edata, size_t oldsize);
54bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
55 ssize_t decay_ms);
56ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
57void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
58 bool all);
59uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
60void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
61void arena_reset(tsd_t *tsd, arena_t *arena);
62void arena_destroy(tsd_t *tsd, arena_t *arena);
63void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
64 cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
65 const unsigned nfill);
66
67void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
68 szind_t ind, bool zero);
69void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
70 size_t alignment, bool zero, tcache_t *tcache);
71void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
72void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
73 bool slow_path);
74void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
75
76void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
77 edata_t *slab, bin_t *bin);
78void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
79 edata_t *slab, bin_t *bin);
80void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
81bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
82 size_t extra, bool zero, size_t *newsize);
83void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
84 size_t size, size_t alignment, bool zero, tcache_t *tcache,
85 hook_ralloc_args_t *hook_args);
86dss_prec_t arena_dss_prec_get(arena_t *arena);
87ehooks_t *arena_get_ehooks(arena_t *arena);
88extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
89 extent_hooks_t *extent_hooks);
90bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
91ssize_t arena_dirty_decay_ms_default_get(void);
92bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
93ssize_t arena_muzzy_decay_ms_default_get(void);
94bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
95bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
96 size_t *old_limit, size_t *new_limit);
97unsigned arena_nthreads_get(arena_t *arena, bool internal);
98void arena_nthreads_inc(arena_t *arena, bool internal);
99void arena_nthreads_dec(arena_t *arena, bool internal);
100arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
101bool arena_init_huge(void);
102bool arena_is_huge(unsigned arena_ind);
103arena_t *arena_choose_huge(tsd_t *tsd);
104bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
105 unsigned *binshard);
106size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
107 void **ptrs, size_t nfill, bool zero);
108bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
109void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
110void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
111void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
112void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
113void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
114void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
115void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
116void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
117void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
118void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
119void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
120
121#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
122