1#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
2#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
3
4#include "jemalloc/internal/arena_stats.h"
5#include "jemalloc/internal/atomic.h"
6#include "jemalloc/internal/bin.h"
7#include "jemalloc/internal/bitmap.h"
8#include "jemalloc/internal/extent_dss.h"
9#include "jemalloc/internal/jemalloc_internal_types.h"
10#include "jemalloc/internal/mutex.h"
11#include "jemalloc/internal/nstime.h"
12#include "jemalloc/internal/ql.h"
13#include "jemalloc/internal/sc.h"
14#include "jemalloc/internal/smoothstep.h"
15#include "jemalloc/internal/ticker.h"
16
17struct arena_decay_s {
18 /* Synchronizes all non-atomic fields. */
19 malloc_mutex_t mtx;
20 /*
21 * True if a thread is currently purging the extents associated with
22 * this decay structure.
23 */
24 bool purging;
25 /*
26 * Approximate time in milliseconds from the creation of a set of unused
27 * dirty pages until an equivalent set of unused dirty pages is purged
28 * and/or reused.
29 */
30 atomic_zd_t time_ms;
31 /* time / SMOOTHSTEP_NSTEPS. */
32 nstime_t interval;
33 /*
34 * Time at which the current decay interval logically started. We do
35 * not actually advance to a new epoch until sometime after it starts
36 * because of scheduling and computation delays, and it is even possible
37 * to completely skip epochs. In all cases, during epoch advancement we
38 * merge all relevant activity into the most recently recorded epoch.
39 */
40 nstime_t epoch;
41 /* Deadline randomness generator. */
42 uint64_t jitter_state;
43 /*
44 * Deadline for current epoch. This is the sum of interval and per
45 * epoch jitter which is a uniform random variable in [0..interval).
46 * Epochs always advance by precise multiples of interval, but we
47 * randomize the deadline to reduce the likelihood of arenas purging in
48 * lockstep.
49 */
50 nstime_t deadline;
51 /*
52 * Number of unpurged pages at beginning of current epoch. During epoch
53 * advancement we use the delta between arena->decay_*.nunpurged and
54 * extents_npages_get(&arena->extents_*) to determine how many dirty
55 * pages, if any, were generated.
56 */
57 size_t nunpurged;
58 /*
59 * Trailing log of how many unused dirty pages were generated during
60 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
61 * element is the most recent epoch. Corresponding epoch times are
62 * relative to epoch.
63 */
64 size_t backlog[SMOOTHSTEP_NSTEPS];
65
66 /*
67 * Pointer to associated stats. These stats are embedded directly in
68 * the arena's stats due to how stats structures are shared between the
69 * arena and ctl code.
70 *
71 * Synchronization: Same as associated arena's stats field. */
72 arena_stats_decay_t *stats;
73 /* Peak number of pages in associated extents. Used for debug only. */
74 uint64_t ceil_npages;
75};
76
77struct arena_s {
78 /*
79 * Number of threads currently assigned to this arena. Each thread has
80 * two distinct assignments, one for application-serving allocation, and
81 * the other for internal metadata allocation. Internal metadata must
82 * not be allocated from arenas explicitly created via the arenas.create
83 * mallctl, because the arena.<i>.reset mallctl indiscriminately
84 * discards all allocations for the affected arena.
85 *
86 * 0: Application allocation.
87 * 1: Internal metadata allocation.
88 *
89 * Synchronization: atomic.
90 */
91 atomic_u_t nthreads[2];
92
93 /* Next bin shard for binding new threads. Synchronization: atomic. */
94 atomic_u_t binshard_next;
95
96 /*
97 * When percpu_arena is enabled, to amortize the cost of reading /
98 * updating the current CPU id, track the most recent thread accessing
99 * this arena, and only read CPU if there is a mismatch.
100 */
101 tsdn_t *last_thd;
102
103 /* Synchronization: internal. */
104 arena_stats_t stats;
105
106 /*
107 * Lists of tcaches and cache_bin_array_descriptors for extant threads
108 * associated with this arena. Stats from these are merged
109 * incrementally, and at exit if opt_stats_print is enabled.
110 *
111 * Synchronization: tcache_ql_mtx.
112 */
113 ql_head(tcache_t) tcache_ql;
114 ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
115 malloc_mutex_t tcache_ql_mtx;
116
117 /* Synchronization: internal. */
118 prof_accum_t prof_accum;
119
120 /*
121 * PRNG state for cache index randomization of large allocation base
122 * pointers.
123 *
124 * Synchronization: atomic.
125 */
126 atomic_zu_t offset_state;
127
128 /*
129 * Extent serial number generator state.
130 *
131 * Synchronization: atomic.
132 */
133 atomic_zu_t extent_sn_next;
134
135 /*
136 * Represents a dss_prec_t, but atomically.
137 *
138 * Synchronization: atomic.
139 */
140 atomic_u_t dss_prec;
141
142 /*
143 * Number of pages in active extents.
144 *
145 * Synchronization: atomic.
146 */
147 atomic_zu_t nactive;
148
149 /*
150 * Extant large allocations.
151 *
152 * Synchronization: large_mtx.
153 */
154 extent_list_t large;
155 /* Synchronizes all large allocation/update/deallocation. */
156 malloc_mutex_t large_mtx;
157
158 /*
159 * Collections of extents that were previously allocated. These are
160 * used when allocating extents, in an attempt to re-use address space.
161 *
162 * Synchronization: internal.
163 */
164 extents_t extents_dirty;
165 extents_t extents_muzzy;
166 extents_t extents_retained;
167
168 /*
169 * Decay-based purging state, responsible for scheduling extent state
170 * transitions.
171 *
172 * Synchronization: internal.
173 */
174 arena_decay_t decay_dirty; /* dirty --> muzzy */
175 arena_decay_t decay_muzzy; /* muzzy --> retained */
176
177 /*
178 * Next extent size class in a growing series to use when satisfying a
179 * request via the extent hooks (only if opt_retain). This limits the
180 * number of disjoint virtual memory ranges so that extent merging can
181 * be effective even if multiple arenas' extent allocation requests are
182 * highly interleaved.
183 *
184 * retain_grow_limit is the max allowed size ind to expand (unless the
185 * required size is greater). Default is no limit, and controlled
186 * through mallctl only.
187 *
188 * Synchronization: extent_grow_mtx
189 */
190 pszind_t extent_grow_next;
191 pszind_t retain_grow_limit;
192 malloc_mutex_t extent_grow_mtx;
193
194 /*
195 * Available extent structures that were allocated via
196 * base_alloc_extent().
197 *
198 * Synchronization: extent_avail_mtx.
199 */
200 extent_tree_t extent_avail;
201 atomic_zu_t extent_avail_cnt;
202 malloc_mutex_t extent_avail_mtx;
203
204 /*
205 * bins is used to store heaps of free regions.
206 *
207 * Synchronization: internal.
208 */
209 bins_t bins[SC_NBINS];
210
211 /*
212 * Base allocator, from which arena metadata are allocated.
213 *
214 * Synchronization: internal.
215 */
216 base_t *base;
217 /* Used to determine uptime. Read-only after initialization. */
218 nstime_t create_time;
219};
220
221/* Used in conjunction with tsd for fast arena-related context lookup. */
222struct arena_tdata_s {
223 ticker_t decay_ticker;
224};
225
226/* Used to pass rtree lookup context down the path. */
227struct alloc_ctx_s {
228 szind_t szind;
229 bool slab;
230};
231
232#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
233