1 | #ifndef JEMALLOC_INTERNAL_EDATA_H |
2 | #define JEMALLOC_INTERNAL_EDATA_H |
3 | |
4 | #include "jemalloc/internal/atomic.h" |
5 | #include "jemalloc/internal/bin_info.h" |
6 | #include "jemalloc/internal/bit_util.h" |
7 | #include "jemalloc/internal/hpdata.h" |
8 | #include "jemalloc/internal/nstime.h" |
9 | #include "jemalloc/internal/ph.h" |
10 | #include "jemalloc/internal/ql.h" |
11 | #include "jemalloc/internal/sc.h" |
12 | #include "jemalloc/internal/slab_data.h" |
13 | #include "jemalloc/internal/sz.h" |
14 | #include "jemalloc/internal/typed_list.h" |
15 | |
16 | /* |
17 | * sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment |
18 | * to free up the low bits in the rtree leaf. |
19 | */ |
20 | #define EDATA_ALIGNMENT 128 |
21 | |
22 | enum extent_state_e { |
23 | extent_state_active = 0, |
24 | extent_state_dirty = 1, |
25 | extent_state_muzzy = 2, |
26 | extent_state_retained = 3, |
27 | extent_state_transition = 4, /* States below are intermediate. */ |
28 | extent_state_merging = 5, |
29 | extent_state_max = 5 /* Sanity checking only. */ |
30 | }; |
31 | typedef enum extent_state_e extent_state_t; |
32 | |
33 | enum extent_head_state_e { |
34 | EXTENT_NOT_HEAD, |
35 | EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */ |
36 | }; |
37 | typedef enum extent_head_state_e extent_head_state_t; |
38 | |
39 | /* |
40 | * Which implementation of the page allocator interface, (PAI, defined in |
41 | * pai.h) owns the given extent? |
42 | */ |
43 | enum extent_pai_e { |
44 | EXTENT_PAI_PAC = 0, |
45 | EXTENT_PAI_HPA = 1 |
46 | }; |
47 | typedef enum extent_pai_e extent_pai_t; |
48 | |
49 | struct e_prof_info_s { |
50 | /* Time when this was allocated. */ |
51 | nstime_t e_prof_alloc_time; |
52 | /* Allocation request size. */ |
53 | size_t e_prof_alloc_size; |
54 | /* Points to a prof_tctx_t. */ |
55 | atomic_p_t e_prof_tctx; |
56 | /* |
57 | * Points to a prof_recent_t for the allocation; NULL |
58 | * means the recent allocation record no longer exists. |
59 | * Protected by prof_recent_alloc_mtx. |
60 | */ |
61 | atomic_p_t e_prof_recent_alloc; |
62 | }; |
63 | typedef struct e_prof_info_s e_prof_info_t; |
64 | |
65 | /* |
66 | * The information about a particular edata that lives in an emap. Space is |
67 | * more precious there (the information, plus the edata pointer, has to live in |
68 | * a 64-bit word if we want to enable a packed representation. |
69 | * |
70 | * There are two things that are special about the information here: |
71 | * - It's quicker to access. You have one fewer pointer hop, since finding the |
72 | * edata_t associated with an item always requires accessing the rtree leaf in |
73 | * which this data is stored. |
74 | * - It can be read unsynchronized, and without worrying about lifetime issues. |
75 | */ |
76 | typedef struct edata_map_info_s edata_map_info_t; |
77 | struct edata_map_info_s { |
78 | bool slab; |
79 | szind_t szind; |
80 | }; |
81 | |
82 | typedef struct edata_cmp_summary_s edata_cmp_summary_t; |
83 | struct edata_cmp_summary_s { |
84 | uint64_t sn; |
85 | uintptr_t addr; |
86 | }; |
87 | |
88 | /* Extent (span of pages). Use accessor functions for e_* fields. */ |
89 | typedef struct edata_s edata_t; |
90 | ph_structs(edata_avail, edata_t); |
91 | ph_structs(edata_heap, edata_t); |
92 | struct edata_s { |
93 | /* |
94 | * Bitfield containing several fields: |
95 | * |
96 | * a: arena_ind |
97 | * b: slab |
98 | * c: committed |
99 | * p: pai |
100 | * z: zeroed |
101 | * g: guarded |
102 | * t: state |
103 | * i: szind |
104 | * f: nfree |
105 | * s: bin_shard |
106 | * |
107 | * 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa |
108 | * |
109 | * arena_ind: Arena from which this extent came, or all 1 bits if |
110 | * unassociated. |
111 | * |
112 | * slab: The slab flag indicates whether the extent is used for a slab |
113 | * of small regions. This helps differentiate small size classes, |
114 | * and it indicates whether interior pointers can be looked up via |
115 | * iealloc(). |
116 | * |
117 | * committed: The committed flag indicates whether physical memory is |
118 | * committed to the extent, whether explicitly or implicitly |
119 | * as on a system that overcommits and satisfies physical |
120 | * memory needs on demand via soft page faults. |
121 | * |
122 | * pai: The pai flag is an extent_pai_t. |
123 | * |
124 | * zeroed: The zeroed flag is used by extent recycling code to track |
125 | * whether memory is zero-filled. |
126 | * |
127 | * guarded: The guarded flag is use by the sanitizer to track whether |
128 | * the extent has page guards around it. |
129 | * |
130 | * state: The state flag is an extent_state_t. |
131 | * |
132 | * szind: The szind flag indicates usable size class index for |
133 | * allocations residing in this extent, regardless of whether the |
134 | * extent is a slab. Extent size and usable size often differ |
135 | * even for non-slabs, either due to sz_large_pad or promotion of |
136 | * sampled small regions. |
137 | * |
138 | * nfree: Number of free regions in slab. |
139 | * |
140 | * bin_shard: the shard of the bin from which this extent came. |
141 | */ |
142 | uint64_t e_bits; |
143 | #define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT)) |
144 | |
145 | #define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS |
146 | #define EDATA_BITS_ARENA_SHIFT 0 |
147 | #define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT) |
148 | |
149 | #define EDATA_BITS_SLAB_WIDTH 1 |
150 | #define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT) |
151 | #define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT) |
152 | |
153 | #define EDATA_BITS_COMMITTED_WIDTH 1 |
154 | #define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT) |
155 | #define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT) |
156 | |
157 | #define EDATA_BITS_PAI_WIDTH 1 |
158 | #define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT) |
159 | #define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT) |
160 | |
161 | #define EDATA_BITS_ZEROED_WIDTH 1 |
162 | #define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT) |
163 | #define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT) |
164 | |
165 | #define EDATA_BITS_GUARDED_WIDTH 1 |
166 | #define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT) |
167 | #define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT) |
168 | |
169 | #define EDATA_BITS_STATE_WIDTH 3 |
170 | #define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT) |
171 | #define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT) |
172 | |
173 | #define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES) |
174 | #define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT) |
175 | #define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT) |
176 | |
177 | #define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1) |
178 | #define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT) |
179 | #define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT) |
180 | |
181 | #define EDATA_BITS_BINSHARD_WIDTH 6 |
182 | #define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT) |
183 | #define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT) |
184 | |
185 | #define EDATA_BITS_IS_HEAD_WIDTH 1 |
186 | #define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT) |
187 | #define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT) |
188 | |
189 | /* Pointer to the extent that this structure is responsible for. */ |
190 | void *e_addr; |
191 | |
192 | union { |
193 | /* |
194 | * Extent size and serial number associated with the extent |
195 | * structure (different than the serial number for the extent at |
196 | * e_addr). |
197 | * |
198 | * ssssssss [...] ssssssss ssssnnnn nnnnnnnn |
199 | */ |
200 | size_t e_size_esn; |
201 | #define EDATA_SIZE_MASK ((size_t)~(PAGE-1)) |
202 | #define EDATA_ESN_MASK ((size_t)PAGE-1) |
203 | /* Base extent size, which may not be a multiple of PAGE. */ |
204 | size_t e_bsize; |
205 | }; |
206 | |
207 | /* |
208 | * If this edata is a user allocation from an HPA, it comes out of some |
209 | * pageslab (we don't yet support huegpage allocations that don't fit |
210 | * into pageslabs). This tracks it. |
211 | */ |
212 | hpdata_t *e_ps; |
213 | |
214 | /* |
215 | * Serial number. These are not necessarily unique; splitting an extent |
216 | * results in two extents with the same serial number. |
217 | */ |
218 | uint64_t e_sn; |
219 | |
220 | union { |
221 | /* |
222 | * List linkage used when the edata_t is active; either in |
223 | * arena's large allocations or bin_t's slabs_full. |
224 | */ |
225 | ql_elm(edata_t) ql_link_active; |
226 | /* |
227 | * Pairing heap linkage. Used whenever the extent is inactive |
228 | * (in the page allocators), or when it is active and in |
229 | * slabs_nonfull, or when the edata_t is unassociated with an |
230 | * extent and sitting in an edata_cache. |
231 | */ |
232 | union { |
233 | edata_heap_link_t heap_link; |
234 | edata_avail_link_t avail_link; |
235 | }; |
236 | }; |
237 | |
238 | union { |
239 | /* |
240 | * List linkage used when the extent is inactive: |
241 | * - Stashed dirty extents |
242 | * - Ecache LRU functionality. |
243 | */ |
244 | ql_elm(edata_t) ql_link_inactive; |
245 | /* Small region slab metadata. */ |
246 | slab_data_t e_slab_data; |
247 | |
248 | /* Profiling data, used for large objects. */ |
249 | e_prof_info_t e_prof_info; |
250 | }; |
251 | }; |
252 | |
253 | TYPED_LIST(edata_list_active, edata_t, ql_link_active) |
254 | TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive) |
255 | |
256 | static inline unsigned |
257 | edata_arena_ind_get(const edata_t *edata) { |
258 | unsigned arena_ind = (unsigned)((edata->e_bits & |
259 | EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT); |
260 | assert(arena_ind < MALLOCX_ARENA_LIMIT); |
261 | |
262 | return arena_ind; |
263 | } |
264 | |
265 | static inline szind_t |
266 | edata_szind_get_maybe_invalid(const edata_t *edata) { |
267 | szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >> |
268 | EDATA_BITS_SZIND_SHIFT); |
269 | assert(szind <= SC_NSIZES); |
270 | return szind; |
271 | } |
272 | |
273 | static inline szind_t |
274 | edata_szind_get(const edata_t *edata) { |
275 | szind_t szind = edata_szind_get_maybe_invalid(edata); |
276 | assert(szind < SC_NSIZES); /* Never call when "invalid". */ |
277 | return szind; |
278 | } |
279 | |
280 | static inline size_t |
281 | edata_usize_get(const edata_t *edata) { |
282 | return sz_index2size(edata_szind_get(edata)); |
283 | } |
284 | |
285 | static inline unsigned |
286 | edata_binshard_get(const edata_t *edata) { |
287 | unsigned binshard = (unsigned)((edata->e_bits & |
288 | EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT); |
289 | assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); |
290 | return binshard; |
291 | } |
292 | |
293 | static inline uint64_t |
294 | edata_sn_get(const edata_t *edata) { |
295 | return edata->e_sn; |
296 | } |
297 | |
298 | static inline extent_state_t |
299 | edata_state_get(const edata_t *edata) { |
300 | return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >> |
301 | EDATA_BITS_STATE_SHIFT); |
302 | } |
303 | |
304 | static inline bool |
305 | edata_guarded_get(const edata_t *edata) { |
306 | return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >> |
307 | EDATA_BITS_GUARDED_SHIFT); |
308 | } |
309 | |
310 | static inline bool |
311 | edata_zeroed_get(const edata_t *edata) { |
312 | return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >> |
313 | EDATA_BITS_ZEROED_SHIFT); |
314 | } |
315 | |
316 | static inline bool |
317 | edata_committed_get(const edata_t *edata) { |
318 | return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >> |
319 | EDATA_BITS_COMMITTED_SHIFT); |
320 | } |
321 | |
322 | static inline extent_pai_t |
323 | edata_pai_get(const edata_t *edata) { |
324 | return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >> |
325 | EDATA_BITS_PAI_SHIFT); |
326 | } |
327 | |
328 | static inline bool |
329 | edata_slab_get(const edata_t *edata) { |
330 | return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >> |
331 | EDATA_BITS_SLAB_SHIFT); |
332 | } |
333 | |
334 | static inline unsigned |
335 | edata_nfree_get(const edata_t *edata) { |
336 | assert(edata_slab_get(edata)); |
337 | return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >> |
338 | EDATA_BITS_NFREE_SHIFT); |
339 | } |
340 | |
341 | static inline void * |
342 | edata_base_get(const edata_t *edata) { |
343 | assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || |
344 | !edata_slab_get(edata)); |
345 | return PAGE_ADDR2BASE(edata->e_addr); |
346 | } |
347 | |
348 | static inline void * |
349 | edata_addr_get(const edata_t *edata) { |
350 | assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) || |
351 | !edata_slab_get(edata)); |
352 | return edata->e_addr; |
353 | } |
354 | |
355 | static inline size_t |
356 | edata_size_get(const edata_t *edata) { |
357 | return (edata->e_size_esn & EDATA_SIZE_MASK); |
358 | } |
359 | |
360 | static inline size_t |
361 | edata_esn_get(const edata_t *edata) { |
362 | return (edata->e_size_esn & EDATA_ESN_MASK); |
363 | } |
364 | |
365 | static inline size_t |
366 | edata_bsize_get(const edata_t *edata) { |
367 | return edata->e_bsize; |
368 | } |
369 | |
370 | static inline hpdata_t * |
371 | edata_ps_get(const edata_t *edata) { |
372 | assert(edata_pai_get(edata) == EXTENT_PAI_HPA); |
373 | return edata->e_ps; |
374 | } |
375 | |
376 | static inline void * |
377 | edata_before_get(const edata_t *edata) { |
378 | return (void *)((uintptr_t)edata_base_get(edata) - PAGE); |
379 | } |
380 | |
381 | static inline void * |
382 | edata_last_get(const edata_t *edata) { |
383 | return (void *)((uintptr_t)edata_base_get(edata) + |
384 | edata_size_get(edata) - PAGE); |
385 | } |
386 | |
387 | static inline void * |
388 | edata_past_get(const edata_t *edata) { |
389 | return (void *)((uintptr_t)edata_base_get(edata) + |
390 | edata_size_get(edata)); |
391 | } |
392 | |
393 | static inline slab_data_t * |
394 | edata_slab_data_get(edata_t *edata) { |
395 | assert(edata_slab_get(edata)); |
396 | return &edata->e_slab_data; |
397 | } |
398 | |
399 | static inline const slab_data_t * |
400 | edata_slab_data_get_const(const edata_t *edata) { |
401 | assert(edata_slab_get(edata)); |
402 | return &edata->e_slab_data; |
403 | } |
404 | |
405 | static inline prof_tctx_t * |
406 | edata_prof_tctx_get(const edata_t *edata) { |
407 | return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx, |
408 | ATOMIC_ACQUIRE); |
409 | } |
410 | |
411 | static inline const nstime_t * |
412 | edata_prof_alloc_time_get(const edata_t *edata) { |
413 | return &edata->e_prof_info.e_prof_alloc_time; |
414 | } |
415 | |
416 | static inline size_t |
417 | edata_prof_alloc_size_get(const edata_t *edata) { |
418 | return edata->e_prof_info.e_prof_alloc_size; |
419 | } |
420 | |
421 | static inline prof_recent_t * |
422 | edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) { |
423 | return (prof_recent_t *)atomic_load_p( |
424 | &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED); |
425 | } |
426 | |
427 | static inline void |
428 | edata_arena_ind_set(edata_t *edata, unsigned arena_ind) { |
429 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) | |
430 | ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT); |
431 | } |
432 | |
433 | static inline void |
434 | edata_binshard_set(edata_t *edata, unsigned binshard) { |
435 | /* The assertion assumes szind is set already. */ |
436 | assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); |
437 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) | |
438 | ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT); |
439 | } |
440 | |
441 | static inline void |
442 | edata_addr_set(edata_t *edata, void *addr) { |
443 | edata->e_addr = addr; |
444 | } |
445 | |
446 | static inline void |
447 | edata_size_set(edata_t *edata, size_t size) { |
448 | assert((size & ~EDATA_SIZE_MASK) == 0); |
449 | edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK); |
450 | } |
451 | |
452 | static inline void |
453 | edata_esn_set(edata_t *edata, size_t esn) { |
454 | edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn & |
455 | EDATA_ESN_MASK); |
456 | } |
457 | |
458 | static inline void |
459 | edata_bsize_set(edata_t *edata, size_t bsize) { |
460 | edata->e_bsize = bsize; |
461 | } |
462 | |
463 | static inline void |
464 | edata_ps_set(edata_t *edata, hpdata_t *ps) { |
465 | assert(edata_pai_get(edata) == EXTENT_PAI_HPA); |
466 | edata->e_ps = ps; |
467 | } |
468 | |
469 | static inline void |
470 | edata_szind_set(edata_t *edata, szind_t szind) { |
471 | assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */ |
472 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) | |
473 | ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT); |
474 | } |
475 | |
476 | static inline void |
477 | edata_nfree_set(edata_t *edata, unsigned nfree) { |
478 | assert(edata_slab_get(edata)); |
479 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) | |
480 | ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); |
481 | } |
482 | |
483 | static inline void |
484 | edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) { |
485 | /* The assertion assumes szind is set already. */ |
486 | assert(binshard < bin_infos[edata_szind_get(edata)].n_shards); |
487 | edata->e_bits = (edata->e_bits & |
488 | (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) | |
489 | ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) | |
490 | ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT); |
491 | } |
492 | |
493 | static inline void |
494 | edata_nfree_inc(edata_t *edata) { |
495 | assert(edata_slab_get(edata)); |
496 | edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT); |
497 | } |
498 | |
499 | static inline void |
500 | edata_nfree_dec(edata_t *edata) { |
501 | assert(edata_slab_get(edata)); |
502 | edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT); |
503 | } |
504 | |
505 | static inline void |
506 | edata_nfree_sub(edata_t *edata, uint64_t n) { |
507 | assert(edata_slab_get(edata)); |
508 | edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT); |
509 | } |
510 | |
511 | static inline void |
512 | edata_sn_set(edata_t *edata, uint64_t sn) { |
513 | edata->e_sn = sn; |
514 | } |
515 | |
516 | static inline void |
517 | edata_state_set(edata_t *edata, extent_state_t state) { |
518 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) | |
519 | ((uint64_t)state << EDATA_BITS_STATE_SHIFT); |
520 | } |
521 | |
522 | static inline void |
523 | edata_guarded_set(edata_t *edata, bool guarded) { |
524 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) | |
525 | ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT); |
526 | } |
527 | |
528 | static inline void |
529 | edata_zeroed_set(edata_t *edata, bool zeroed) { |
530 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) | |
531 | ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT); |
532 | } |
533 | |
534 | static inline void |
535 | edata_committed_set(edata_t *edata, bool committed) { |
536 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) | |
537 | ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT); |
538 | } |
539 | |
540 | static inline void |
541 | edata_pai_set(edata_t *edata, extent_pai_t pai) { |
542 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) | |
543 | ((uint64_t)pai << EDATA_BITS_PAI_SHIFT); |
544 | } |
545 | |
546 | static inline void |
547 | edata_slab_set(edata_t *edata, bool slab) { |
548 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) | |
549 | ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT); |
550 | } |
551 | |
552 | static inline void |
553 | edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) { |
554 | atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE); |
555 | } |
556 | |
557 | static inline void |
558 | edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) { |
559 | nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t); |
560 | } |
561 | |
562 | static inline void |
563 | edata_prof_alloc_size_set(edata_t *edata, size_t size) { |
564 | edata->e_prof_info.e_prof_alloc_size = size; |
565 | } |
566 | |
567 | static inline void |
568 | edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata, |
569 | prof_recent_t *recent_alloc) { |
570 | atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc, |
571 | ATOMIC_RELAXED); |
572 | } |
573 | |
574 | static inline bool |
575 | edata_is_head_get(edata_t *edata) { |
576 | return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >> |
577 | EDATA_BITS_IS_HEAD_SHIFT); |
578 | } |
579 | |
580 | static inline void |
581 | edata_is_head_set(edata_t *edata, bool is_head) { |
582 | edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) | |
583 | ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT); |
584 | } |
585 | |
586 | static inline bool |
587 | edata_state_in_transition(extent_state_t state) { |
588 | return state >= extent_state_transition; |
589 | } |
590 | |
591 | /* |
592 | * Because this function is implemented as a sequence of bitfield modifications, |
593 | * even though each individual bit is properly initialized, we technically read |
594 | * uninitialized data within it. This is mostly fine, since most callers get |
595 | * their edatas from zeroing sources, but callers who make stack edata_ts need |
596 | * to manually zero them. |
597 | */ |
598 | static inline void |
599 | edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size, |
600 | bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed, |
601 | bool committed, extent_pai_t pai, extent_head_state_t is_head) { |
602 | assert(addr == PAGE_ADDR2BASE(addr) || !slab); |
603 | |
604 | edata_arena_ind_set(edata, arena_ind); |
605 | edata_addr_set(edata, addr); |
606 | edata_size_set(edata, size); |
607 | edata_slab_set(edata, slab); |
608 | edata_szind_set(edata, szind); |
609 | edata_sn_set(edata, sn); |
610 | edata_state_set(edata, state); |
611 | edata_guarded_set(edata, false); |
612 | edata_zeroed_set(edata, zeroed); |
613 | edata_committed_set(edata, committed); |
614 | edata_pai_set(edata, pai); |
615 | edata_is_head_set(edata, is_head == EXTENT_IS_HEAD); |
616 | if (config_prof) { |
617 | edata_prof_tctx_set(edata, NULL); |
618 | } |
619 | } |
620 | |
621 | static inline void |
622 | edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) { |
623 | edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1); |
624 | edata_addr_set(edata, addr); |
625 | edata_bsize_set(edata, bsize); |
626 | edata_slab_set(edata, false); |
627 | edata_szind_set(edata, SC_NSIZES); |
628 | edata_sn_set(edata, sn); |
629 | edata_state_set(edata, extent_state_active); |
630 | edata_guarded_set(edata, false); |
631 | edata_zeroed_set(edata, true); |
632 | edata_committed_set(edata, true); |
633 | /* |
634 | * This isn't strictly true, but base allocated extents never get |
635 | * deallocated and can't be looked up in the emap, but no sense in |
636 | * wasting a state bit to encode this fact. |
637 | */ |
638 | edata_pai_set(edata, EXTENT_PAI_PAC); |
639 | } |
640 | |
641 | static inline int |
642 | edata_esn_comp(const edata_t *a, const edata_t *b) { |
643 | size_t a_esn = edata_esn_get(a); |
644 | size_t b_esn = edata_esn_get(b); |
645 | |
646 | return (a_esn > b_esn) - (a_esn < b_esn); |
647 | } |
648 | |
649 | static inline int |
650 | edata_ead_comp(const edata_t *a, const edata_t *b) { |
651 | uintptr_t a_eaddr = (uintptr_t)a; |
652 | uintptr_t b_eaddr = (uintptr_t)b; |
653 | |
654 | return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); |
655 | } |
656 | |
657 | static inline edata_cmp_summary_t |
658 | edata_cmp_summary_get(const edata_t *edata) { |
659 | edata_cmp_summary_t result; |
660 | result.sn = edata_sn_get(edata); |
661 | result.addr = (uintptr_t)edata_addr_get(edata); |
662 | return result; |
663 | } |
664 | |
665 | static inline int |
666 | edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) { |
667 | int ret; |
668 | ret = (a.sn > b.sn) - (a.sn < b.sn); |
669 | if (ret != 0) { |
670 | return ret; |
671 | } |
672 | ret = (a.addr > b.addr) - (a.addr < b.addr); |
673 | return ret; |
674 | } |
675 | |
676 | static inline int |
677 | edata_snad_comp(const edata_t *a, const edata_t *b) { |
678 | edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a); |
679 | edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b); |
680 | |
681 | return edata_cmp_summary_comp(a_cmp, b_cmp); |
682 | } |
683 | |
684 | static inline int |
685 | edata_esnead_comp(const edata_t *a, const edata_t *b) { |
686 | int ret; |
687 | |
688 | ret = edata_esn_comp(a, b); |
689 | if (ret != 0) { |
690 | return ret; |
691 | } |
692 | |
693 | ret = edata_ead_comp(a, b); |
694 | return ret; |
695 | } |
696 | |
697 | ph_proto(, edata_avail, edata_t) |
698 | ph_proto(, edata_heap, edata_t) |
699 | |
700 | #endif /* JEMALLOC_INTERNAL_EDATA_H */ |
701 | |