1#ifndef JEMALLOC_INTERNAL_SC_H
2#define JEMALLOC_INTERNAL_SC_H
3
4#include "jemalloc/internal/jemalloc_internal_types.h"
5
6/*
7 * Size class computations:
8 *
9 * These are a little tricky; we'll first start by describing how things
10 * generally work, and then describe some of the details.
11 *
12 * Ignore the first few size classes for a moment. We can then split all the
13 * remaining size classes into groups. The size classes in a group are spaced
14 * such that they cover allocation request sizes in a power-of-2 range. The
15 * power of two is called the base of the group, and the size classes in it
16 * satisfy allocations in the half-open range (base, base * 2]. There are
17 * SC_NGROUP size classes in each group, equally spaced in the range, so that
18 * each one covers allocations for base / SC_NGROUP possible allocation sizes.
19 * We call that value (base / SC_NGROUP) the delta of the group. Each size class
20 * is delta larger than the one before it (including the initial size class in a
21 * group, which is delta larger than base, the largest size class in the
22 * previous group).
23 * To make the math all work out nicely, we require that SC_NGROUP is a power of
24 * two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of
25 * lg_base and lg_delta. For each of these groups then, we have that
26 * lg_delta == lg_base - SC_LG_NGROUP.
27 * The size classes in a group with a given lg_base and lg_delta (which, recall,
28 * can be computed from lg_base for these groups) are therefore:
29 * base + 1 * delta
30 * which covers allocations in (base, base + 1 * delta]
31 * base + 2 * delta
32 * which covers allocations in (base + 1 * delta, base + 2 * delta].
33 * base + 3 * delta
34 * which covers allocations in (base + 2 * delta, base + 3 * delta].
35 * ...
36 * base + SC_NGROUP * delta ( == 2 * base)
37 * which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base].
38 * (Note that currently SC_NGROUP is always 4, so the "..." is empty in
39 * practice.)
40 * Note that the last size class in the group is the next power of two (after
41 * base), so that we've set up the induction correctly for the next group's
42 * selection of delta.
43 *
44 * Now, let's start considering the first few size classes. Two extra constants
45 * come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures
46 * correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger
47 * are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we
48 * never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the
49 * highest required alignment of a platform. For allocation sizes smaller than
50 * (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support
51 * platforms with types with alignment larger than their size). To allow such
52 * allocations (without wasting space unnecessarily), we introduce tiny size
53 * classes; one per power of two, up until we hit the quantum size. There are
54 * therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes.
55 *
56 * Next, we have a size class of size (1 << LG_QUANTUM). This can't be the
57 * start of a group in the sense we described above (covering a power of two
58 * range) since, if we divided into it to pick a value of delta, we'd get a
59 * delta smaller than (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which
60 * is against the rules.
61 *
62 * The first base we can divide by SC_NGROUP while still being at least
63 * (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by
64 * having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size
65 * classes are:
66 * 1 * (1 << LG_QUANTUM)
67 * 2 * (1 << LG_QUANTUM)
68 * 3 * (1 << LG_QUANTUM)
69 * ... (although, as above, this "..." is empty in practice)
70 * SC_NGROUP * (1 << LG_QUANTUM).
71 *
72 * There are SC_NGROUP of these size classes, so we can regard it as a sort of
73 * pseudo-group, even though it spans multiple powers of 2, is divided
74 * differently, and both starts and ends on a power of 2 (as opposed to just
75 * ending). SC_NGROUP is itself a power of two, so the first group after the
76 * pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a
77 * lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP
78 * sizes without violating our LG_QUANTUM requirements, so we can safely set
79 * lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM).
80 *
81 * So, in order, the size classes are:
82 *
83 * Tiny size classes:
84 * - Count: LG_QUANTUM - SC_LG_TINY_MIN.
85 * - Sizes:
86 * 1 << SC_LG_TINY_MIN
87 * 1 << (SC_LG_TINY_MIN + 1)
88 * 1 << (SC_LG_TINY_MIN + 2)
89 * ...
90 * 1 << (LG_QUANTUM - 1)
91 *
92 * Initial pseudo-group:
93 * - Count: SC_NGROUP
94 * - Sizes:
95 * 1 * (1 << LG_QUANTUM)
96 * 2 * (1 << LG_QUANTUM)
97 * 3 * (1 << LG_QUANTUM)
98 * ...
99 * SC_NGROUP * (1 << LG_QUANTUM)
100 *
101 * Regular group 0:
102 * - Count: SC_NGROUP
103 * - Sizes:
104 * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of
105 * lg_base - SC_LG_NGROUP)
106 * (1 << lg_base) + 1 * (1 << lg_delta)
107 * (1 << lg_base) + 2 * (1 << lg_delta)
108 * (1 << lg_base) + 3 * (1 << lg_delta)
109 * ...
110 * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
111 *
112 * Regular group 1:
113 * - Count: SC_NGROUP
114 * - Sizes:
115 * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of
116 * lg_base - SC_LG_NGROUP)
117 * (1 << lg_base) + 1 * (1 << lg_delta)
118 * (1 << lg_base) + 2 * (1 << lg_delta)
119 * (1 << lg_base) + 3 * (1 << lg_delta)
120 * ...
121 * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
122 *
123 * ...
124 *
125 * Regular group N:
126 * - Count: SC_NGROUP
127 * - Sizes:
128 * (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of
129 * lg_base - SC_LG_NGROUP)
130 * (1 << lg_base) + 1 * (1 << lg_delta)
131 * (1 << lg_base) + 2 * (1 << lg_delta)
132 * (1 << lg_base) + 3 * (1 << lg_delta)
133 * ...
134 * (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
135 *
136 *
137 * Representation of metadata:
138 * To make the math easy, we'll mostly work in lg quantities. We record lg_base,
139 * lg_delta, and ndelta (i.e. number of deltas above the base) on a
140 * per-size-class basis, and maintain the invariant that, across all size
141 * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
142 *
143 * For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP),
144 * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
145 *
146 * For the initial tiny size classes (if any), lg_base is lg(size class size).
147 * lg_delta is lg_base for the first size class, and lg_base - 1 for all
148 * subsequent ones. ndelta is always 0.
149 *
150 * For the pseudo-group, if there are no tiny size classes, then we set
151 * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
152 * to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta
153 * is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do
154 * indeed get a power of two that way). If there *are* tiny size classes, then
155 * the first size class needs to have lg_delta relative to the largest tiny size
156 * class. We therefore set lg_base == LG_QUANTUM - 1,
157 * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
158 * pseudo-group the same.
159 *
160 *
161 * Other terminology:
162 * "Small" size classes mean those that are allocated out of bins, which is the
163 * same as those that are slab allocated.
164 * "Large" size classes are those that are not small. The cutoff for counting as
165 * large is page size * group size.
166 */
167
168/*
169 * Size class N + (1 << SC_LG_NGROUP) twice the size of size class N.
170 */
171#define SC_LG_NGROUP 2
172#define SC_LG_TINY_MIN 3
173
174#if SC_LG_TINY_MIN == 0
175/* The div module doesn't support division by 1, which this would require. */
176#error "Unsupported LG_TINY_MIN"
177#endif
178
179/*
180 * The definitions below are all determined by the above settings and system
181 * characteristics.
182 */
183#define SC_NGROUP (1ULL << SC_LG_NGROUP)
184#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8)
185#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN)
186#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1)
187#define SC_NPSEUDO SC_NGROUP
188#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP)
189/*
190 * We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base
191 * we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1
192 * size class shorter than the others).
193 * We could probably save some space in arenas by capping this at LG_VADDR size.
194 */
195#define SC_LG_BASE_MAX (SC_PTR_BITS - 2)
196#define SC_NREGULAR (SC_NGROUP * \
197 (SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
198#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
199
200/*
201 * The number of size classes that are a multiple of the page size.
202 *
203 * Here are the first few bases that have a page-sized SC.
204 *
205 * lg(base) | base | highest SC | page-multiple SCs
206 * --------------|------------------------------------------
207 * LG_PAGE - 1 | PAGE / 2 | PAGE | 1
208 * LG_PAGE | PAGE | 2 * PAGE | 1
209 * LG_PAGE + 1 | 2 * PAGE | 4 * PAGE | 2
210 * LG_PAGE + 2 | 4 * PAGE | 8 * PAGE | 4
211 *
212 * The number of page-multiple SCs continues to grow in powers of two, up until
213 * lg_delta == lg_page, which corresponds to setting lg_base to lg_page +
214 * SC_LG_NGROUP. So, then, the number of size classes that are multiples of the
215 * page size whose lg_delta is less than the page size are
216 * is 1 + (2**0 + 2**1 + ... + 2**(lg_ngroup - 1) == 2**lg_ngroup.
217 *
218 * For each base with lg_base in [lg_page + lg_ngroup, lg_base_max), there are
219 * NGROUP page-sized size classes, and when lg_base == lg_base_max, there are
220 * NGROUP - 1.
221 *
222 * This gives us the quantity we seek.
223 */
224#define SC_NPSIZES ( \
225 SC_NGROUP \
226 + (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \
227 + SC_NGROUP - 1)
228
229/*
230 * We declare a size class is binnable if size < page size * group. Or, in other
231 * words, lg(size) < lg(page size) + lg(group size).
232 */
233#define SC_NBINS ( \
234 /* Sub-regular size classes. */ \
235 SC_NTINY + SC_NPSEUDO \
236 /* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \
237 + SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \
238 /* Last SC of the last group hits the bound exactly; exclude it. */ \
239 - 1)
240
241/*
242 * The size2index_tab lookup table uses uint8_t to encode each bin index, so we
243 * cannot support more than 256 small size classes.
244 */
245#if (SC_NBINS > 256)
246# error "Too many small size classes"
247#endif
248
249/* The largest size class in the lookup table, and its binary log. */
250#define SC_LG_MAX_LOOKUP 12
251#define SC_LOOKUP_MAXCLASS (1 << SC_LG_MAX_LOOKUP)
252
253/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
254#define SC_SMALL_MAX_BASE (1 << (LG_PAGE + SC_LG_NGROUP - 1))
255#define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1))
256
257/* The largest size class allocated out of a slab. */
258#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
259 + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
260
261/* The fastpath assumes all lookup-able sizes are small. */
262#if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS)
263# error "Lookup table sizes must be small"
264#endif
265
266/* The smallest size class not allocated out of a slab. */
267#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
268#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
269
270/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */
271#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2))
272#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP))
273
274/* The largest size class supported. */
275#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
276
277/* Maximum number of regions in one slab. */
278#ifndef CONFIG_LG_SLAB_MAXREGS
279# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
280#else
281# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN)
282# error "Unsupported SC_LG_SLAB_MAXREGS"
283# else
284# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS
285# endif
286#endif
287
288#define SC_SLAB_MAXREGS (1U << SC_LG_SLAB_MAXREGS)
289
290typedef struct sc_s sc_t;
291struct sc_s {
292 /* Size class index, or -1 if not a valid size class. */
293 int index;
294 /* Lg group base size (no deltas added). */
295 int lg_base;
296 /* Lg delta to previous size class. */
297 int lg_delta;
298 /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
299 int ndelta;
300 /*
301 * True if the size class is a multiple of the page size, false
302 * otherwise.
303 */
304 bool psz;
305 /*
306 * True if the size class is a small, bin, size class. False otherwise.
307 */
308 bool bin;
309 /* The slab page count if a small bin size class, 0 otherwise. */
310 int pgs;
311 /* Same as lg_delta if a lookup table size class, 0 otherwise. */
312 int lg_delta_lookup;
313};
314
315typedef struct sc_data_s sc_data_t;
316struct sc_data_s {
317 /* Number of tiny size classes. */
318 unsigned ntiny;
319 /* Number of bins supported by the lookup table. */
320 int nlbins;
321 /* Number of small size class bins. */
322 int nbins;
323 /* Number of size classes. */
324 int nsizes;
325 /* Number of bits required to store NSIZES. */
326 int lg_ceil_nsizes;
327 /* Number of size classes that are a multiple of (1U << LG_PAGE). */
328 unsigned npsizes;
329 /* Lg of maximum tiny size class (or -1, if none). */
330 int lg_tiny_maxclass;
331 /* Maximum size class included in lookup table. */
332 size_t lookup_maxclass;
333 /* Maximum small size class. */
334 size_t small_maxclass;
335 /* Lg of minimum large size class. */
336 int lg_large_minclass;
337 /* The minimum large size class. */
338 size_t large_minclass;
339 /* Maximum (large) size class. */
340 size_t large_maxclass;
341 /* True if the sc_data_t has been initialized (for debugging only). */
342 bool initialized;
343
344 sc_t sc[SC_NSIZES];
345};
346
347size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);
348void sc_data_init(sc_data_t *data);
349/*
350 * Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
351 * Otherwise, does its best to accommodate the request.
352 */
353void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
354 int pgs);
355void sc_boot(sc_data_t *data);
356
357#endif /* JEMALLOC_INTERNAL_SC_H */
358