1#ifndef JEMALLOC_INTERNAL_HPA_H
2#define JEMALLOC_INTERNAL_HPA_H
3
4#include "jemalloc/internal/exp_grow.h"
5#include "jemalloc/internal/hpa_hooks.h"
6#include "jemalloc/internal/hpa_opts.h"
7#include "jemalloc/internal/pai.h"
8#include "jemalloc/internal/psset.h"
9
10typedef struct hpa_central_s hpa_central_t;
11struct hpa_central_s {
12 /*
13 * The mutex guarding most of the operations on the central data
14 * structure.
15 */
16 malloc_mutex_t mtx;
17 /*
18 * Guards expansion of eden. We separate this from the regular mutex so
19 * that cheaper operations can still continue while we're doing the OS
20 * call.
21 */
22 malloc_mutex_t grow_mtx;
23 /*
24 * Either NULL (if empty), or some integer multiple of a
25 * hugepage-aligned number of hugepages. We carve them off one at a
26 * time to satisfy new pageslab requests.
27 *
28 * Guarded by grow_mtx.
29 */
30 void *eden;
31 size_t eden_len;
32 /* Source for metadata. */
33 base_t *base;
34 /* Number of grow operations done on this hpa_central_t. */
35 uint64_t age_counter;
36
37 /* The HPA hooks. */
38 hpa_hooks_t hooks;
39};
40
41typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
42struct hpa_shard_nonderived_stats_s {
43 /*
44 * The number of times we've purged within a hugepage.
45 *
46 * Guarded by mtx.
47 */
48 uint64_t npurge_passes;
49 /*
50 * The number of individual purge calls we perform (which should always
51 * be bigger than npurge_passes, since each pass purges at least one
52 * extent within a hugepage.
53 *
54 * Guarded by mtx.
55 */
56 uint64_t npurges;
57
58 /*
59 * The number of times we've hugified a pageslab.
60 *
61 * Guarded by mtx.
62 */
63 uint64_t nhugifies;
64 /*
65 * The number of times we've dehugified a pageslab.
66 *
67 * Guarded by mtx.
68 */
69 uint64_t ndehugifies;
70};
71
72/* Completely derived; only used by CTL. */
73typedef struct hpa_shard_stats_s hpa_shard_stats_t;
74struct hpa_shard_stats_s {
75 psset_stats_t psset_stats;
76 hpa_shard_nonderived_stats_t nonderived_stats;
77};
78
79typedef struct hpa_shard_s hpa_shard_t;
80struct hpa_shard_s {
81 /*
82 * pai must be the first member; we cast from a pointer to it to a
83 * pointer to the hpa_shard_t.
84 */
85 pai_t pai;
86
87 /* The central allocator we get our hugepages from. */
88 hpa_central_t *central;
89 /* Protects most of this shard's state. */
90 malloc_mutex_t mtx;
91 /*
92 * Guards the shard's access to the central allocator (preventing
93 * multiple threads operating on this shard from accessing the central
94 * allocator).
95 */
96 malloc_mutex_t grow_mtx;
97 /* The base metadata allocator. */
98 base_t *base;
99
100 /*
101 * This edata cache is the one we use when allocating a small extent
102 * from a pageslab. The pageslab itself comes from the centralized
103 * allocator, and so will use its edata_cache.
104 */
105 edata_cache_fast_t ecf;
106
107 psset_t psset;
108
109 /*
110 * How many grow operations have occurred.
111 *
112 * Guarded by grow_mtx.
113 */
114 uint64_t age_counter;
115
116 /* The arena ind we're associated with. */
117 unsigned ind;
118
119 /*
120 * Our emap. This is just a cache of the emap pointer in the associated
121 * hpa_central.
122 */
123 emap_t *emap;
124
125 /* The configuration choices for this hpa shard. */
126 hpa_shard_opts_t opts;
127
128 /*
129 * How many pages have we started but not yet finished purging in this
130 * hpa shard.
131 */
132 size_t npending_purge;
133
134 /*
135 * Those stats which are copied directly into the CTL-centric hpa shard
136 * stats.
137 */
138 hpa_shard_nonderived_stats_t stats;
139
140 /*
141 * Last time we performed purge on this shard.
142 */
143 nstime_t last_purge;
144};
145
146/*
147 * Whether or not the HPA can be used given the current configuration. This is
148 * is not necessarily a guarantee that it backs its allocations by hugepages,
149 * just that it can function properly given the system it's running on.
150 */
151bool hpa_supported();
152bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
153bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
154 base_t *base, edata_cache_t *edata_cache, unsigned ind,
155 const hpa_shard_opts_t *opts);
156
157void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
158void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
159 hpa_shard_stats_t *dst);
160
161/*
162 * Notify the shard that we won't use it for allocations much longer. Due to
163 * the possibility of races, we don't actually prevent allocations; just flush
164 * and disable the embedded edata_cache_small.
165 */
166void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
167void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
168
169void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
170 bool deferral_allowed);
171void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
172
173/*
174 * We share the fork ordering with the PA and arena prefork handling; that's why
175 * these are 3 and 4 rather than 0 and 1.
176 */
177void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard);
178void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard);
179void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard);
180void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
181
182#endif /* JEMALLOC_INTERNAL_HPA_H */
183