1#define JEMALLOC_EXTENT_C_
2#include "jemalloc/internal/jemalloc_preamble.h"
3#include "jemalloc/internal/jemalloc_internal_includes.h"
4
5#include "jemalloc/internal/assert.h"
6#include "jemalloc/internal/extent_dss.h"
7#include "jemalloc/internal/extent_mmap.h"
8#include "jemalloc/internal/ph.h"
9#include "jemalloc/internal/rtree.h"
10#include "jemalloc/internal/mutex.h"
11#include "jemalloc/internal/mutex_pool.h"
12
13/******************************************************************************/
14/* Data. */
15
16rtree_t extents_rtree;
17/* Keyed by the address of the extent_t being protected. */
18mutex_pool_t extent_mutex_pool;
19
20size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
21
22static const bitmap_info_t extents_bitmap_info =
23 BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
24
25static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26 size_t size, size_t alignment, bool *zero, bool *commit,
27 unsigned arena_ind);
28static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29 size_t size, bool committed, unsigned arena_ind);
30static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31 size_t size, bool committed, unsigned arena_ind);
32static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33 size_t size, size_t offset, size_t length, unsigned arena_ind);
34static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36 size_t length, bool growing_retained);
37static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39#ifdef PAGES_CAN_PURGE_LAZY
40static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41 size_t size, size_t offset, size_t length, unsigned arena_ind);
42#endif
43static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45 size_t length, bool growing_retained);
46#ifdef PAGES_CAN_PURGE_FORCED
47static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
49#endif
50static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52 size_t length, bool growing_retained);
53static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
54 size_t size, size_t size_a, size_t size_b, bool committed,
55 unsigned arena_ind);
56static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
57 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
58 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
59 bool growing_retained);
60static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
61 size_t size_a, void *addr_b, size_t size_b, bool committed,
62 unsigned arena_ind);
63static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
64 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
65 bool growing_retained);
66
67const extent_hooks_t extent_hooks_default = {
68 extent_alloc_default,
69 extent_dalloc_default,
70 extent_destroy_default,
71 extent_commit_default,
72 extent_decommit_default
73#ifdef PAGES_CAN_PURGE_LAZY
74 ,
75 extent_purge_lazy_default
76#else
77 ,
78 NULL
79#endif
80#ifdef PAGES_CAN_PURGE_FORCED
81 ,
82 extent_purge_forced_default
83#else
84 ,
85 NULL
86#endif
87 ,
88 extent_split_default,
89 extent_merge_default
90};
91
92/* Used exclusively for gdump triggering. */
93static atomic_zu_t curpages;
94static atomic_zu_t highpages;
95
96/******************************************************************************/
97/*
98 * Function prototypes for static functions that are referenced prior to
99 * definition.
100 */
101
102static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
103static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
104 extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
105 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
106 bool *zero, bool *commit, bool growing_retained);
107static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
108 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
109 extent_t *extent, bool *coalesced, bool growing_retained);
110static void extent_record(tsdn_t *tsdn, arena_t *arena,
111 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
112 bool growing_retained);
113
114/******************************************************************************/
115
116#define ATTR_NONE /* does nothing */
117
118ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
119 extent_esnead_comp)
120
121#undef ATTR_NONE
122
123typedef enum {
124 lock_result_success,
125 lock_result_failure,
126 lock_result_no_extent
127} lock_result_t;
128
129static lock_result_t
130extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
131 extent_t **result, bool inactive_only) {
132 extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
133 elm, true);
134
135 /* Slab implies active extents and should be skipped. */
136 if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
137 &extents_rtree, elm, true))) {
138 return lock_result_no_extent;
139 }
140
141 /*
142 * It's possible that the extent changed out from under us, and with it
143 * the leaf->extent mapping. We have to recheck while holding the lock.
144 */
145 extent_lock(tsdn, extent1);
146 extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
147 &extents_rtree, elm, true);
148
149 if (extent1 == extent2) {
150 *result = extent1;
151 return lock_result_success;
152 } else {
153 extent_unlock(tsdn, extent1);
154 return lock_result_failure;
155 }
156}
157
158/*
159 * Returns a pool-locked extent_t * if there's one associated with the given
160 * address, and NULL otherwise.
161 */
162static extent_t *
163extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
164 bool inactive_only) {
165 extent_t *ret = NULL;
166 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
167 rtree_ctx, (uintptr_t)addr, false, false);
168 if (elm == NULL) {
169 return NULL;
170 }
171 lock_result_t lock_result;
172 do {
173 lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
174 inactive_only);
175 } while (lock_result == lock_result_failure);
176 return ret;
177}
178
179extent_t *
180extent_alloc(tsdn_t *tsdn, arena_t *arena) {
181 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
182 extent_t *extent = extent_avail_first(&arena->extent_avail);
183 if (extent == NULL) {
184 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
185 return base_alloc_extent(tsdn, arena->base);
186 }
187 extent_avail_remove(&arena->extent_avail, extent);
188 atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
189 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
190 return extent;
191}
192
193void
194extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
195 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
196 extent_avail_insert(&arena->extent_avail, extent);
197 atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
198 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
199}
200
201extent_hooks_t *
202extent_hooks_get(arena_t *arena) {
203 return base_extent_hooks_get(arena->base);
204}
205
206extent_hooks_t *
207extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
208 background_thread_info_t *info;
209 if (have_background_thread) {
210 info = arena_background_thread_info_get(arena);
211 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
212 }
213 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
214 if (have_background_thread) {
215 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
216 }
217
218 return ret;
219}
220
221static void
222extent_hooks_assure_initialized(arena_t *arena,
223 extent_hooks_t **r_extent_hooks) {
224 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
225 *r_extent_hooks = extent_hooks_get(arena);
226 }
227}
228
229#ifndef JEMALLOC_JET
230static
231#endif
232size_t
233extent_size_quantize_floor(size_t size) {
234 size_t ret;
235 pszind_t pind;
236
237 assert(size > 0);
238 assert((size & PAGE_MASK) == 0);
239
240 pind = sz_psz2ind(size - sz_large_pad + 1);
241 if (pind == 0) {
242 /*
243 * Avoid underflow. This short-circuit would also do the right
244 * thing for all sizes in the range for which there are
245 * PAGE-spaced size classes, but it's simplest to just handle
246 * the one case that would cause erroneous results.
247 */
248 return size;
249 }
250 ret = sz_pind2sz(pind - 1) + sz_large_pad;
251 assert(ret <= size);
252 return ret;
253}
254
255#ifndef JEMALLOC_JET
256static
257#endif
258size_t
259extent_size_quantize_ceil(size_t size) {
260 size_t ret;
261
262 assert(size > 0);
263 assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
264 assert((size & PAGE_MASK) == 0);
265
266 ret = extent_size_quantize_floor(size);
267 if (ret < size) {
268 /*
269 * Skip a quantization that may have an adequately large extent,
270 * because under-sized extents may be mixed in. This only
271 * happens when an unusual size is requested, i.e. for aligned
272 * allocation, and is just one of several places where linear
273 * search would potentially find sufficiently aligned available
274 * memory somewhere lower.
275 */
276 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
277 sz_large_pad;
278 }
279 return ret;
280}
281
282/* Generate pairing heap functions. */
283ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
284
285bool
286extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
287 bool delay_coalesce) {
288 if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
289 malloc_mutex_rank_exclusive)) {
290 return true;
291 }
292 for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
293 extent_heap_new(&extents->heaps[i]);
294 }
295 bitmap_init(extents->bitmap, &extents_bitmap_info, true);
296 extent_list_init(&extents->lru);
297 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
298 extents->state = state;
299 extents->delay_coalesce = delay_coalesce;
300 return false;
301}
302
303extent_state_t
304extents_state_get(const extents_t *extents) {
305 return extents->state;
306}
307
308size_t
309extents_npages_get(extents_t *extents) {
310 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
311}
312
313size_t
314extents_nextents_get(extents_t *extents, pszind_t pind) {
315 return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
316}
317
318size_t
319extents_nbytes_get(extents_t *extents, pszind_t pind) {
320 return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
321}
322
323static void
324extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
325 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
326 atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
327 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
328 atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
329}
330
331static void
332extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
333 size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
334 atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
335 cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
336 atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
337}
338
339static void
340extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
341 malloc_mutex_assert_owner(tsdn, &extents->mtx);
342 assert(extent_state_get(extent) == extents->state);
343
344 size_t size = extent_size_get(extent);
345 size_t psz = extent_size_quantize_floor(size);
346 pszind_t pind = sz_psz2ind(psz);
347 if (extent_heap_empty(&extents->heaps[pind])) {
348 bitmap_unset(extents->bitmap, &extents_bitmap_info,
349 (size_t)pind);
350 }
351 extent_heap_insert(&extents->heaps[pind], extent);
352
353 if (config_stats) {
354 extents_stats_add(extents, pind, size);
355 }
356
357 extent_list_append(&extents->lru, extent);
358 size_t npages = size >> LG_PAGE;
359 /*
360 * All modifications to npages hold the mutex (as asserted above), so we
361 * don't need an atomic fetch-add; we can get by with a load followed by
362 * a store.
363 */
364 size_t cur_extents_npages =
365 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
366 atomic_store_zu(&extents->npages, cur_extents_npages + npages,
367 ATOMIC_RELAXED);
368}
369
370static void
371extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
372 malloc_mutex_assert_owner(tsdn, &extents->mtx);
373 assert(extent_state_get(extent) == extents->state);
374
375 size_t size = extent_size_get(extent);
376 size_t psz = extent_size_quantize_floor(size);
377 pszind_t pind = sz_psz2ind(psz);
378 extent_heap_remove(&extents->heaps[pind], extent);
379
380 if (config_stats) {
381 extents_stats_sub(extents, pind, size);
382 }
383
384 if (extent_heap_empty(&extents->heaps[pind])) {
385 bitmap_set(extents->bitmap, &extents_bitmap_info,
386 (size_t)pind);
387 }
388 extent_list_remove(&extents->lru, extent);
389 size_t npages = size >> LG_PAGE;
390 /*
391 * As in extents_insert_locked, we hold extents->mtx and so don't need
392 * atomic operations for updating extents->npages.
393 */
394 size_t cur_extents_npages =
395 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
396 assert(cur_extents_npages >= npages);
397 atomic_store_zu(&extents->npages,
398 cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
399}
400
401/*
402 * Find an extent with size [min_size, max_size) to satisfy the alignment
403 * requirement. For each size, try only the first extent in the heap.
404 */
405static extent_t *
406extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
407 size_t alignment) {
408 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
409 pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
410
411 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
412 &extents_bitmap_info, (size_t)pind); i < pind_max; i =
413 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
414 (size_t)i+1)) {
415 assert(i < SC_NPSIZES);
416 assert(!extent_heap_empty(&extents->heaps[i]));
417 extent_t *extent = extent_heap_first(&extents->heaps[i]);
418 uintptr_t base = (uintptr_t)extent_base_get(extent);
419 size_t candidate_size = extent_size_get(extent);
420 assert(candidate_size >= min_size);
421
422 uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
423 PAGE_CEILING(alignment));
424 if (base > next_align || base + candidate_size <= next_align) {
425 /* Overflow or not crossing the next alignment. */
426 continue;
427 }
428
429 size_t leadsize = next_align - base;
430 if (candidate_size - leadsize >= min_size) {
431 return extent;
432 }
433 }
434
435 return NULL;
436}
437
438/*
439 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
440 * large enough.
441 */
442static extent_t *
443extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
444 size_t size) {
445 extent_t *ret = NULL;
446
447 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
448
449 if (!maps_coalesce && !opt_retain) {
450 /*
451 * No split / merge allowed (Windows w/o retain). Try exact fit
452 * only.
453 */
454 return extent_heap_empty(&extents->heaps[pind]) ? NULL :
455 extent_heap_first(&extents->heaps[pind]);
456 }
457
458 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
459 &extents_bitmap_info, (size_t)pind);
460 i < SC_NPSIZES + 1;
461 i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
462 (size_t)i+1)) {
463 assert(!extent_heap_empty(&extents->heaps[i]));
464 extent_t *extent = extent_heap_first(&extents->heaps[i]);
465 assert(extent_size_get(extent) >= size);
466 /*
467 * In order to reduce fragmentation, avoid reusing and splitting
468 * large extents for much smaller sizes.
469 *
470 * Only do check for dirty extents (delay_coalesce).
471 */
472 if (extents->delay_coalesce &&
473 (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
474 break;
475 }
476 if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
477 ret = extent;
478 }
479 if (i == SC_NPSIZES) {
480 break;
481 }
482 assert(i < SC_NPSIZES);
483 }
484
485 return ret;
486}
487
488/*
489 * Do first-fit extent selection, where the selection policy choice is
490 * based on extents->delay_coalesce.
491 */
492static extent_t *
493extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
494 size_t esize, size_t alignment) {
495 malloc_mutex_assert_owner(tsdn, &extents->mtx);
496
497 size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
498 /* Beware size_t wrap-around. */
499 if (max_size < esize) {
500 return NULL;
501 }
502
503 extent_t *extent =
504 extents_first_fit_locked(tsdn, arena, extents, max_size);
505
506 if (alignment > PAGE && extent == NULL) {
507 /*
508 * max_size guarantees the alignment requirement but is rather
509 * pessimistic. Next we try to satisfy the aligned allocation
510 * with sizes in [esize, max_size).
511 */
512 extent = extents_fit_alignment(extents, esize, max_size,
513 alignment);
514 }
515
516 return extent;
517}
518
519static bool
520extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
521 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
522 extent_t *extent) {
523 extent_state_set(extent, extent_state_active);
524 bool coalesced;
525 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
526 extents, extent, &coalesced, false);
527 extent_state_set(extent, extents_state_get(extents));
528
529 if (!coalesced) {
530 return true;
531 }
532 extents_insert_locked(tsdn, extents, extent);
533 return false;
534}
535
536extent_t *
537extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
538 extents_t *extents, void *new_addr, size_t size, size_t pad,
539 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
540 assert(size + pad != 0);
541 assert(alignment != 0);
542 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
543 WITNESS_RANK_CORE, 0);
544
545 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
546 new_addr, size, pad, alignment, slab, szind, zero, commit, false);
547 assert(extent == NULL || extent_dumpable_get(extent));
548 return extent;
549}
550
551void
552extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
553 extents_t *extents, extent_t *extent) {
554 assert(extent_base_get(extent) != NULL);
555 assert(extent_size_get(extent) != 0);
556 assert(extent_dumpable_get(extent));
557 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
558 WITNESS_RANK_CORE, 0);
559
560 extent_addr_set(extent, extent_base_get(extent));
561 extent_zeroed_set(extent, false);
562
563 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
564}
565
566extent_t *
567extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
568 extents_t *extents, size_t npages_min) {
569 rtree_ctx_t rtree_ctx_fallback;
570 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
571
572 malloc_mutex_lock(tsdn, &extents->mtx);
573
574 /*
575 * Get the LRU coalesced extent, if any. If coalescing was delayed,
576 * the loop will iterate until the LRU extent is fully coalesced.
577 */
578 extent_t *extent;
579 while (true) {
580 /* Get the LRU extent, if any. */
581 extent = extent_list_first(&extents->lru);
582 if (extent == NULL) {
583 goto label_return;
584 }
585 /* Check the eviction limit. */
586 size_t extents_npages = atomic_load_zu(&extents->npages,
587 ATOMIC_RELAXED);
588 if (extents_npages <= npages_min) {
589 extent = NULL;
590 goto label_return;
591 }
592 extents_remove_locked(tsdn, extents, extent);
593 if (!extents->delay_coalesce) {
594 break;
595 }
596 /* Try to coalesce. */
597 if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
598 rtree_ctx, extents, extent)) {
599 break;
600 }
601 /*
602 * The LRU extent was just coalesced and the result placed in
603 * the LRU at its neighbor's position. Start over.
604 */
605 }
606
607 /*
608 * Either mark the extent active or deregister it to protect against
609 * concurrent operations.
610 */
611 switch (extents_state_get(extents)) {
612 case extent_state_active:
613 not_reached();
614 case extent_state_dirty:
615 case extent_state_muzzy:
616 extent_state_set(extent, extent_state_active);
617 break;
618 case extent_state_retained:
619 extent_deregister(tsdn, extent);
620 break;
621 default:
622 not_reached();
623 }
624
625label_return:
626 malloc_mutex_unlock(tsdn, &extents->mtx);
627 return extent;
628}
629
630/*
631 * This can only happen when we fail to allocate a new extent struct (which
632 * indicates OOM), e.g. when trying to split an existing extent.
633 */
634static void
635extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
636 extents_t *extents, extent_t *extent, bool growing_retained) {
637 size_t sz = extent_size_get(extent);
638 if (config_stats) {
639 arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
640 }
641 /*
642 * Leak extent after making sure its pages have already been purged, so
643 * that this is only a virtual memory leak.
644 */
645 if (extents_state_get(extents) == extent_state_dirty) {
646 if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
647 extent, 0, sz, growing_retained)) {
648 extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
649 extent, 0, extent_size_get(extent),
650 growing_retained);
651 }
652 }
653 extent_dalloc(tsdn, arena, extent);
654}
655
656void
657extents_prefork(tsdn_t *tsdn, extents_t *extents) {
658 malloc_mutex_prefork(tsdn, &extents->mtx);
659}
660
661void
662extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
663 malloc_mutex_postfork_parent(tsdn, &extents->mtx);
664}
665
666void
667extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
668 malloc_mutex_postfork_child(tsdn, &extents->mtx);
669}
670
671static void
672extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
673 extent_t *extent) {
674 assert(extent_arena_get(extent) == arena);
675 assert(extent_state_get(extent) == extent_state_active);
676
677 extent_state_set(extent, extents_state_get(extents));
678 extents_insert_locked(tsdn, extents, extent);
679}
680
681static void
682extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
683 extent_t *extent) {
684 malloc_mutex_lock(tsdn, &extents->mtx);
685 extent_deactivate_locked(tsdn, arena, extents, extent);
686 malloc_mutex_unlock(tsdn, &extents->mtx);
687}
688
689static void
690extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
691 extent_t *extent) {
692 assert(extent_arena_get(extent) == arena);
693 assert(extent_state_get(extent) == extents_state_get(extents));
694
695 extents_remove_locked(tsdn, extents, extent);
696 extent_state_set(extent, extent_state_active);
697}
698
699static bool
700extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
701 const extent_t *extent, bool dependent, bool init_missing,
702 rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
703 *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
704 (uintptr_t)extent_base_get(extent), dependent, init_missing);
705 if (!dependent && *r_elm_a == NULL) {
706 return true;
707 }
708 assert(*r_elm_a != NULL);
709
710 *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
711 (uintptr_t)extent_last_get(extent), dependent, init_missing);
712 if (!dependent && *r_elm_b == NULL) {
713 return true;
714 }
715 assert(*r_elm_b != NULL);
716
717 return false;
718}
719
720static void
721extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
722 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
723 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
724 if (elm_b != NULL) {
725 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
726 slab);
727 }
728}
729
730static void
731extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
732 szind_t szind) {
733 assert(extent_slab_get(extent));
734
735 /* Register interior. */
736 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
737 rtree_write(tsdn, &extents_rtree, rtree_ctx,
738 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
739 LG_PAGE), extent, szind, true);
740 }
741}
742
743static void
744extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
745 cassert(config_prof);
746 /* prof_gdump() requirement. */
747 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
748 WITNESS_RANK_CORE, 0);
749
750 if (opt_prof && extent_state_get(extent) == extent_state_active) {
751 size_t nadd = extent_size_get(extent) >> LG_PAGE;
752 size_t cur = atomic_fetch_add_zu(&curpages, nadd,
753 ATOMIC_RELAXED) + nadd;
754 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
755 while (cur > high && !atomic_compare_exchange_weak_zu(
756 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
757 /*
758 * Don't refresh cur, because it may have decreased
759 * since this thread lost the highpages update race.
760 * Note that high is updated in case of CAS failure.
761 */
762 }
763 if (cur > high && prof_gdump_get_unlocked()) {
764 prof_gdump(tsdn);
765 }
766 }
767}
768
769static void
770extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
771 cassert(config_prof);
772
773 if (opt_prof && extent_state_get(extent) == extent_state_active) {
774 size_t nsub = extent_size_get(extent) >> LG_PAGE;
775 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
776 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
777 }
778}
779
780static bool
781extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
782 rtree_ctx_t rtree_ctx_fallback;
783 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
784 rtree_leaf_elm_t *elm_a, *elm_b;
785
786 /*
787 * We need to hold the lock to protect against a concurrent coalesce
788 * operation that sees us in a partial state.
789 */
790 extent_lock(tsdn, extent);
791
792 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
793 &elm_a, &elm_b)) {
794 extent_unlock(tsdn, extent);
795 return true;
796 }
797
798 szind_t szind = extent_szind_get_maybe_invalid(extent);
799 bool slab = extent_slab_get(extent);
800 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
801 if (slab) {
802 extent_interior_register(tsdn, rtree_ctx, extent, szind);
803 }
804
805 extent_unlock(tsdn, extent);
806
807 if (config_prof && gdump_add) {
808 extent_gdump_add(tsdn, extent);
809 }
810
811 return false;
812}
813
814static bool
815extent_register(tsdn_t *tsdn, extent_t *extent) {
816 return extent_register_impl(tsdn, extent, true);
817}
818
819static bool
820extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
821 return extent_register_impl(tsdn, extent, false);
822}
823
824static void
825extent_reregister(tsdn_t *tsdn, extent_t *extent) {
826 bool err = extent_register(tsdn, extent);
827 assert(!err);
828}
829
830/*
831 * Removes all pointers to the given extent from the global rtree indices for
832 * its interior. This is relevant for slab extents, for which we need to do
833 * metadata lookups at places other than the head of the extent. We deregister
834 * on the interior, then, when an extent moves from being an active slab to an
835 * inactive state.
836 */
837static void
838extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
839 extent_t *extent) {
840 size_t i;
841
842 assert(extent_slab_get(extent));
843
844 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
845 rtree_clear(tsdn, &extents_rtree, rtree_ctx,
846 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
847 LG_PAGE));
848 }
849}
850
851/*
852 * Removes all pointers to the given extent from the global rtree.
853 */
854static void
855extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
856 rtree_ctx_t rtree_ctx_fallback;
857 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
858 rtree_leaf_elm_t *elm_a, *elm_b;
859 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
860 &elm_a, &elm_b);
861
862 extent_lock(tsdn, extent);
863
864 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
865 if (extent_slab_get(extent)) {
866 extent_interior_deregister(tsdn, rtree_ctx, extent);
867 extent_slab_set(extent, false);
868 }
869
870 extent_unlock(tsdn, extent);
871
872 if (config_prof && gdump) {
873 extent_gdump_sub(tsdn, extent);
874 }
875}
876
877static void
878extent_deregister(tsdn_t *tsdn, extent_t *extent) {
879 extent_deregister_impl(tsdn, extent, true);
880}
881
882static void
883extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
884 extent_deregister_impl(tsdn, extent, false);
885}
886
887/*
888 * Tries to find and remove an extent from extents that can be used for the
889 * given allocation request.
890 */
891static extent_t *
892extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
893 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
894 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
895 bool growing_retained) {
896 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
897 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
898 assert(alignment > 0);
899 if (config_debug && new_addr != NULL) {
900 /*
901 * Non-NULL new_addr has two use cases:
902 *
903 * 1) Recycle a known-extant extent, e.g. during purging.
904 * 2) Perform in-place expanding reallocation.
905 *
906 * Regardless of use case, new_addr must either refer to a
907 * non-existing extent, or to the base of an extant extent,
908 * since only active slabs support interior lookups (which of
909 * course cannot be recycled).
910 */
911 assert(PAGE_ADDR2BASE(new_addr) == new_addr);
912 assert(pad == 0);
913 assert(alignment <= PAGE);
914 }
915
916 size_t esize = size + pad;
917 malloc_mutex_lock(tsdn, &extents->mtx);
918 extent_hooks_assure_initialized(arena, r_extent_hooks);
919 extent_t *extent;
920 if (new_addr != NULL) {
921 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
922 false);
923 if (extent != NULL) {
924 /*
925 * We might null-out extent to report an error, but we
926 * still need to unlock the associated mutex after.
927 */
928 extent_t *unlock_extent = extent;
929 assert(extent_base_get(extent) == new_addr);
930 if (extent_arena_get(extent) != arena ||
931 extent_size_get(extent) < esize ||
932 extent_state_get(extent) !=
933 extents_state_get(extents)) {
934 extent = NULL;
935 }
936 extent_unlock(tsdn, unlock_extent);
937 }
938 } else {
939 extent = extents_fit_locked(tsdn, arena, extents, esize,
940 alignment);
941 }
942 if (extent == NULL) {
943 malloc_mutex_unlock(tsdn, &extents->mtx);
944 return NULL;
945 }
946
947 extent_activate_locked(tsdn, arena, extents, extent);
948 malloc_mutex_unlock(tsdn, &extents->mtx);
949
950 return extent;
951}
952
953/*
954 * Given an allocation request and an extent guaranteed to be able to satisfy
955 * it, this splits off lead and trail extents, leaving extent pointing to an
956 * extent satisfying the allocation.
957 * This function doesn't put lead or trail into any extents_t; it's the caller's
958 * job to ensure that they can be reused.
959 */
960typedef enum {
961 /*
962 * Split successfully. lead, extent, and trail, are modified to extents
963 * describing the ranges before, in, and after the given allocation.
964 */
965 extent_split_interior_ok,
966 /*
967 * The extent can't satisfy the given allocation request. None of the
968 * input extent_t *s are touched.
969 */
970 extent_split_interior_cant_alloc,
971 /*
972 * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
973 * and salvage what's still salvageable (if *to_salvage is non-NULL).
974 * None of lead, extent, or trail are valid.
975 */
976 extent_split_interior_error
977} extent_split_interior_result_t;
978
979static extent_split_interior_result_t
980extent_split_interior(tsdn_t *tsdn, arena_t *arena,
981 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
982 /* The result of splitting, in case of success. */
983 extent_t **extent, extent_t **lead, extent_t **trail,
984 /* The mess to clean up, in case of error. */
985 extent_t **to_leak, extent_t **to_salvage,
986 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
987 szind_t szind, bool growing_retained) {
988 size_t esize = size + pad;
989 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
990 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
991 assert(new_addr == NULL || leadsize == 0);
992 if (extent_size_get(*extent) < leadsize + esize) {
993 return extent_split_interior_cant_alloc;
994 }
995 size_t trailsize = extent_size_get(*extent) - leadsize - esize;
996
997 *lead = NULL;
998 *trail = NULL;
999 *to_leak = NULL;
1000 *to_salvage = NULL;
1001
1002 /* Split the lead. */
1003 if (leadsize != 0) {
1004 *lead = *extent;
1005 *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
1006 *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
1007 slab, growing_retained);
1008 if (*extent == NULL) {
1009 *to_leak = *lead;
1010 *lead = NULL;
1011 return extent_split_interior_error;
1012 }
1013 }
1014
1015 /* Split the trail. */
1016 if (trailsize != 0) {
1017 *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
1018 esize, szind, slab, trailsize, SC_NSIZES, false,
1019 growing_retained);
1020 if (*trail == NULL) {
1021 *to_leak = *extent;
1022 *to_salvage = *lead;
1023 *lead = NULL;
1024 *extent = NULL;
1025 return extent_split_interior_error;
1026 }
1027 }
1028
1029 if (leadsize == 0 && trailsize == 0) {
1030 /*
1031 * Splitting causes szind to be set as a side effect, but no
1032 * splitting occurred.
1033 */
1034 extent_szind_set(*extent, szind);
1035 if (szind != SC_NSIZES) {
1036 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
1037 (uintptr_t)extent_addr_get(*extent), szind, slab);
1038 if (slab && extent_size_get(*extent) > PAGE) {
1039 rtree_szind_slab_update(tsdn, &extents_rtree,
1040 rtree_ctx,
1041 (uintptr_t)extent_past_get(*extent) -
1042 (uintptr_t)PAGE, szind, slab);
1043 }
1044 }
1045 }
1046
1047 return extent_split_interior_ok;
1048}
1049
1050/*
1051 * This fulfills the indicated allocation request out of the given extent (which
1052 * the caller should have ensured was big enough). If there's any unused space
1053 * before or after the resulting allocation, that space is given its own extent
1054 * and put back into extents.
1055 */
1056static extent_t *
1057extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1058 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1059 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1060 szind_t szind, extent_t *extent, bool growing_retained) {
1061 extent_t *lead;
1062 extent_t *trail;
1063 extent_t *to_leak;
1064 extent_t *to_salvage;
1065
1066 extent_split_interior_result_t result = extent_split_interior(
1067 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1068 &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1069 growing_retained);
1070
1071 if (!maps_coalesce && result != extent_split_interior_ok
1072 && !opt_retain) {
1073 /*
1074 * Split isn't supported (implies Windows w/o retain). Avoid
1075 * leaking the extents.
1076 */
1077 assert(to_leak != NULL && lead == NULL && trail == NULL);
1078 extent_deactivate(tsdn, arena, extents, to_leak);
1079 return NULL;
1080 }
1081
1082 if (result == extent_split_interior_ok) {
1083 if (lead != NULL) {
1084 extent_deactivate(tsdn, arena, extents, lead);
1085 }
1086 if (trail != NULL) {
1087 extent_deactivate(tsdn, arena, extents, trail);
1088 }
1089 return extent;
1090 } else {
1091 /*
1092 * We should have picked an extent that was large enough to
1093 * fulfill our allocation request.
1094 */
1095 assert(result == extent_split_interior_error);
1096 if (to_salvage != NULL) {
1097 extent_deregister(tsdn, to_salvage);
1098 }
1099 if (to_leak != NULL) {
1100 void *leak = extent_base_get(to_leak);
1101 extent_deregister_no_gdump_sub(tsdn, to_leak);
1102 extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
1103 to_leak, growing_retained);
1104 assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
1105 false) == NULL);
1106 }
1107 return NULL;
1108 }
1109 unreachable();
1110}
1111
1112static bool
1113extent_need_manual_zero(arena_t *arena) {
1114 /*
1115 * Need to manually zero the extent on repopulating if either; 1) non
1116 * default extent hooks installed (in which case the purge semantics may
1117 * change); or 2) transparent huge pages enabled.
1118 */
1119 return (!arena_has_default_hooks(arena) ||
1120 (opt_thp == thp_mode_always));
1121}
1122
1123/*
1124 * Tries to satisfy the given allocation request by reusing one of the extents
1125 * in the given extents_t.
1126 */
1127static extent_t *
1128extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1129 extents_t *extents, void *new_addr, size_t size, size_t pad,
1130 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1131 bool growing_retained) {
1132 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1133 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1134 assert(new_addr == NULL || !slab);
1135 assert(pad == 0 || !slab);
1136 assert(!*zero || !slab);
1137
1138 rtree_ctx_t rtree_ctx_fallback;
1139 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1140
1141 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1142 rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1143 growing_retained);
1144 if (extent == NULL) {
1145 return NULL;
1146 }
1147
1148 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1149 extents, new_addr, size, pad, alignment, slab, szind, extent,
1150 growing_retained);
1151 if (extent == NULL) {
1152 return NULL;
1153 }
1154
1155 if (*commit && !extent_committed_get(extent)) {
1156 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1157 0, extent_size_get(extent), growing_retained)) {
1158 extent_record(tsdn, arena, r_extent_hooks, extents,
1159 extent, growing_retained);
1160 return NULL;
1161 }
1162 if (!extent_need_manual_zero(arena)) {
1163 extent_zeroed_set(extent, true);
1164 }
1165 }
1166
1167 if (extent_committed_get(extent)) {
1168 *commit = true;
1169 }
1170 if (extent_zeroed_get(extent)) {
1171 *zero = true;
1172 }
1173
1174 if (pad != 0) {
1175 extent_addr_randomize(tsdn, extent, alignment);
1176 }
1177 assert(extent_state_get(extent) == extent_state_active);
1178 if (slab) {
1179 extent_slab_set(extent, slab);
1180 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1181 }
1182
1183 if (*zero) {
1184 void *addr = extent_base_get(extent);
1185 if (!extent_zeroed_get(extent)) {
1186 size_t size = extent_size_get(extent);
1187 if (extent_need_manual_zero(arena) ||
1188 pages_purge_forced(addr, size)) {
1189 memset(addr, 0, size);
1190 }
1191 } else if (config_debug) {
1192 size_t *p = (size_t *)(uintptr_t)addr;
1193 /* Check the first page only. */
1194 for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
1195 assert(p[i] == 0);
1196 }
1197 }
1198 }
1199 return extent;
1200}
1201
1202/*
1203 * If the caller specifies (!*zero), it is still possible to receive zeroed
1204 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
1205 * advantage of this to avoid demanding zeroed extents, but taking advantage of
1206 * them if they are returned.
1207 */
1208static void *
1209extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1210 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1211 void *ret;
1212
1213 assert(size != 0);
1214 assert(alignment != 0);
1215
1216 /* "primary" dss. */
1217 if (have_dss && dss_prec == dss_prec_primary && (ret =
1218 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1219 commit)) != NULL) {
1220 return ret;
1221 }
1222 /* mmap. */
1223 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1224 != NULL) {
1225 return ret;
1226 }
1227 /* "secondary" dss. */
1228 if (have_dss && dss_prec == dss_prec_secondary && (ret =
1229 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1230 commit)) != NULL) {
1231 return ret;
1232 }
1233
1234 /* All strategies for allocation failed. */
1235 return NULL;
1236}
1237
1238static void *
1239extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1240 size_t size, size_t alignment, bool *zero, bool *commit) {
1241 void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1242 commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1243 ATOMIC_RELAXED));
1244 if (have_madvise_huge && ret) {
1245 pages_set_thp_state(ret, size);
1246 }
1247 return ret;
1248}
1249
1250static void *
1251extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1252 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1253 tsdn_t *tsdn;
1254 arena_t *arena;
1255
1256 tsdn = tsdn_fetch();
1257 arena = arena_get(tsdn, arena_ind, false);
1258 /*
1259 * The arena we're allocating on behalf of must have been initialized
1260 * already.
1261 */
1262 assert(arena != NULL);
1263
1264 return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1265 ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
1266}
1267
1268static void
1269extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1270 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1271 if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1272 /*
1273 * The only legitimate case of customized extent hooks for a0 is
1274 * hooks with no allocation activities. One such example is to
1275 * place metadata on pre-allocated resources such as huge pages.
1276 * In that case, rely on reentrancy_level checks to catch
1277 * infinite recursions.
1278 */
1279 pre_reentrancy(tsd, NULL);
1280 } else {
1281 pre_reentrancy(tsd, arena);
1282 }
1283}
1284
1285static void
1286extent_hook_post_reentrancy(tsdn_t *tsdn) {
1287 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1288 post_reentrancy(tsd);
1289}
1290
1291/*
1292 * If virtual memory is retained, create increasingly larger extents from which
1293 * to split requested extents in order to limit the total number of disjoint
1294 * virtual memory ranges retained by each arena.
1295 */
1296static extent_t *
1297extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1298 extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1299 bool slab, szind_t szind, bool *zero, bool *commit) {
1300 malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1301 assert(pad == 0 || !slab);
1302 assert(!*zero || !slab);
1303
1304 size_t esize = size + pad;
1305 size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1306 /* Beware size_t wrap-around. */
1307 if (alloc_size_min < esize) {
1308 goto label_err;
1309 }
1310 /*
1311 * Find the next extent size in the series that would be large enough to
1312 * satisfy this request.
1313 */
1314 pszind_t egn_skip = 0;
1315 size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1316 while (alloc_size < alloc_size_min) {
1317 egn_skip++;
1318 if (arena->extent_grow_next + egn_skip >=
1319 sz_psz2ind(SC_LARGE_MAXCLASS)) {
1320 /* Outside legal range. */
1321 goto label_err;
1322 }
1323 alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1324 }
1325
1326 extent_t *extent = extent_alloc(tsdn, arena);
1327 if (extent == NULL) {
1328 goto label_err;
1329 }
1330 bool zeroed = false;
1331 bool committed = false;
1332
1333 void *ptr;
1334 if (*r_extent_hooks == &extent_hooks_default) {
1335 ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1336 alloc_size, PAGE, &zeroed, &committed);
1337 } else {
1338 extent_hook_pre_reentrancy(tsdn, arena);
1339 ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1340 alloc_size, PAGE, &zeroed, &committed,
1341 arena_ind_get(arena));
1342 extent_hook_post_reentrancy(tsdn);
1343 }
1344
1345 extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
1346 arena_extent_sn_next(arena), extent_state_active, zeroed,
1347 committed, true, EXTENT_IS_HEAD);
1348 if (ptr == NULL) {
1349 extent_dalloc(tsdn, arena, extent);
1350 goto label_err;
1351 }
1352
1353 if (extent_register_no_gdump_add(tsdn, extent)) {
1354 extent_dalloc(tsdn, arena, extent);
1355 goto label_err;
1356 }
1357
1358 if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1359 *zero = true;
1360 }
1361 if (extent_committed_get(extent)) {
1362 *commit = true;
1363 }
1364
1365 rtree_ctx_t rtree_ctx_fallback;
1366 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1367
1368 extent_t *lead;
1369 extent_t *trail;
1370 extent_t *to_leak;
1371 extent_t *to_salvage;
1372 extent_split_interior_result_t result = extent_split_interior(
1373 tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1374 &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1375 true);
1376
1377 if (result == extent_split_interior_ok) {
1378 if (lead != NULL) {
1379 extent_record(tsdn, arena, r_extent_hooks,
1380 &arena->extents_retained, lead, true);
1381 }
1382 if (trail != NULL) {
1383 extent_record(tsdn, arena, r_extent_hooks,
1384 &arena->extents_retained, trail, true);
1385 }
1386 } else {
1387 /*
1388 * We should have allocated a sufficiently large extent; the
1389 * cant_alloc case should not occur.
1390 */
1391 assert(result == extent_split_interior_error);
1392 if (to_salvage != NULL) {
1393 if (config_prof) {
1394 extent_gdump_add(tsdn, to_salvage);
1395 }
1396 extent_record(tsdn, arena, r_extent_hooks,
1397 &arena->extents_retained, to_salvage, true);
1398 }
1399 if (to_leak != NULL) {
1400 extent_deregister_no_gdump_sub(tsdn, to_leak);
1401 extents_abandon_vm(tsdn, arena, r_extent_hooks,
1402 &arena->extents_retained, to_leak, true);
1403 }
1404 goto label_err;
1405 }
1406
1407 if (*commit && !extent_committed_get(extent)) {
1408 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1409 extent_size_get(extent), true)) {
1410 extent_record(tsdn, arena, r_extent_hooks,
1411 &arena->extents_retained, extent, true);
1412 goto label_err;
1413 }
1414 if (!extent_need_manual_zero(arena)) {
1415 extent_zeroed_set(extent, true);
1416 }
1417 }
1418
1419 /*
1420 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1421 * range.
1422 */
1423 if (arena->extent_grow_next + egn_skip + 1 <=
1424 arena->retain_grow_limit) {
1425 arena->extent_grow_next += egn_skip + 1;
1426 } else {
1427 arena->extent_grow_next = arena->retain_grow_limit;
1428 }
1429 /* All opportunities for failure are past. */
1430 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1431
1432 if (config_prof) {
1433 /* Adjust gdump stats now that extent is final size. */
1434 extent_gdump_add(tsdn, extent);
1435 }
1436 if (pad != 0) {
1437 extent_addr_randomize(tsdn, extent, alignment);
1438 }
1439 if (slab) {
1440 rtree_ctx_t rtree_ctx_fallback;
1441 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1442 &rtree_ctx_fallback);
1443
1444 extent_slab_set(extent, true);
1445 extent_interior_register(tsdn, rtree_ctx, extent, szind);
1446 }
1447 if (*zero && !extent_zeroed_get(extent)) {
1448 void *addr = extent_base_get(extent);
1449 size_t size = extent_size_get(extent);
1450 if (extent_need_manual_zero(arena) ||
1451 pages_purge_forced(addr, size)) {
1452 memset(addr, 0, size);
1453 }
1454 }
1455
1456 return extent;
1457label_err:
1458 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1459 return NULL;
1460}
1461
1462static extent_t *
1463extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1464 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1465 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1466 assert(size != 0);
1467 assert(alignment != 0);
1468
1469 malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1470
1471 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1472 &arena->extents_retained, new_addr, size, pad, alignment, slab,
1473 szind, zero, commit, true);
1474 if (extent != NULL) {
1475 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1476 if (config_prof) {
1477 extent_gdump_add(tsdn, extent);
1478 }
1479 } else if (opt_retain && new_addr == NULL) {
1480 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1481 pad, alignment, slab, szind, zero, commit);
1482 /* extent_grow_retained() always releases extent_grow_mtx. */
1483 } else {
1484 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1485 }
1486 malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1487
1488 return extent;
1489}
1490
1491static extent_t *
1492extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1493 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1494 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1495 size_t esize = size + pad;
1496 extent_t *extent = extent_alloc(tsdn, arena);
1497 if (extent == NULL) {
1498 return NULL;
1499 }
1500 void *addr;
1501 size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
1502 if (*r_extent_hooks == &extent_hooks_default) {
1503 /* Call directly to propagate tsdn. */
1504 addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1505 palignment, zero, commit);
1506 } else {
1507 extent_hook_pre_reentrancy(tsdn, arena);
1508 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1509 esize, palignment, zero, commit, arena_ind_get(arena));
1510 extent_hook_post_reentrancy(tsdn);
1511 }
1512 if (addr == NULL) {
1513 extent_dalloc(tsdn, arena, extent);
1514 return NULL;
1515 }
1516 extent_init(extent, arena, addr, esize, slab, szind,
1517 arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1518 true, EXTENT_NOT_HEAD);
1519 if (pad != 0) {
1520 extent_addr_randomize(tsdn, extent, alignment);
1521 }
1522 if (extent_register(tsdn, extent)) {
1523 extent_dalloc(tsdn, arena, extent);
1524 return NULL;
1525 }
1526
1527 return extent;
1528}
1529
1530extent_t *
1531extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1532 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1533 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1534 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1535 WITNESS_RANK_CORE, 0);
1536
1537 extent_hooks_assure_initialized(arena, r_extent_hooks);
1538
1539 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1540 new_addr, size, pad, alignment, slab, szind, zero, commit);
1541 if (extent == NULL) {
1542 if (opt_retain && new_addr != NULL) {
1543 /*
1544 * When retain is enabled and new_addr is set, we do not
1545 * attempt extent_alloc_wrapper_hard which does mmap
1546 * that is very unlikely to succeed (unless it happens
1547 * to be at the end).
1548 */
1549 return NULL;
1550 }
1551 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1552 new_addr, size, pad, alignment, slab, szind, zero, commit);
1553 }
1554
1555 assert(extent == NULL || extent_dumpable_get(extent));
1556 return extent;
1557}
1558
1559static bool
1560extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1561 const extent_t *outer) {
1562 assert(extent_arena_get(inner) == arena);
1563 if (extent_arena_get(outer) != arena) {
1564 return false;
1565 }
1566
1567 assert(extent_state_get(inner) == extent_state_active);
1568 if (extent_state_get(outer) != extents->state) {
1569 return false;
1570 }
1571
1572 if (extent_committed_get(inner) != extent_committed_get(outer)) {
1573 return false;
1574 }
1575
1576 return true;
1577}
1578
1579static bool
1580extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1581 extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1582 bool growing_retained) {
1583 assert(extent_can_coalesce(arena, extents, inner, outer));
1584
1585 extent_activate_locked(tsdn, arena, extents, outer);
1586
1587 malloc_mutex_unlock(tsdn, &extents->mtx);
1588 bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1589 forward ? inner : outer, forward ? outer : inner, growing_retained);
1590 malloc_mutex_lock(tsdn, &extents->mtx);
1591
1592 if (err) {
1593 extent_deactivate_locked(tsdn, arena, extents, outer);
1594 }
1595
1596 return err;
1597}
1598
1599static extent_t *
1600extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
1601 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1602 extent_t *extent, bool *coalesced, bool growing_retained,
1603 bool inactive_only) {
1604 /*
1605 * We avoid checking / locking inactive neighbors for large size
1606 * classes, since they are eagerly coalesced on deallocation which can
1607 * cause lock contention.
1608 */
1609 /*
1610 * Continue attempting to coalesce until failure, to protect against
1611 * races with other threads that are thwarted by this one.
1612 */
1613 bool again;
1614 do {
1615 again = false;
1616
1617 /* Try to coalesce forward. */
1618 extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1619 extent_past_get(extent), inactive_only);
1620 if (next != NULL) {
1621 /*
1622 * extents->mtx only protects against races for
1623 * like-state extents, so call extent_can_coalesce()
1624 * before releasing next's pool lock.
1625 */
1626 bool can_coalesce = extent_can_coalesce(arena, extents,
1627 extent, next);
1628
1629 extent_unlock(tsdn, next);
1630
1631 if (can_coalesce && !extent_coalesce(tsdn, arena,
1632 r_extent_hooks, extents, extent, next, true,
1633 growing_retained)) {
1634 if (extents->delay_coalesce) {
1635 /* Do minimal coalescing. */
1636 *coalesced = true;
1637 return extent;
1638 }
1639 again = true;
1640 }
1641 }
1642
1643 /* Try to coalesce backward. */
1644 extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1645 extent_before_get(extent), inactive_only);
1646 if (prev != NULL) {
1647 bool can_coalesce = extent_can_coalesce(arena, extents,
1648 extent, prev);
1649 extent_unlock(tsdn, prev);
1650
1651 if (can_coalesce && !extent_coalesce(tsdn, arena,
1652 r_extent_hooks, extents, extent, prev, false,
1653 growing_retained)) {
1654 extent = prev;
1655 if (extents->delay_coalesce) {
1656 /* Do minimal coalescing. */
1657 *coalesced = true;
1658 return extent;
1659 }
1660 again = true;
1661 }
1662 }
1663 } while (again);
1664
1665 if (extents->delay_coalesce) {
1666 *coalesced = false;
1667 }
1668 return extent;
1669}
1670
1671static extent_t *
1672extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1673 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1674 extent_t *extent, bool *coalesced, bool growing_retained) {
1675 return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1676 extents, extent, coalesced, growing_retained, false);
1677}
1678
1679static extent_t *
1680extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
1681 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1682 extent_t *extent, bool *coalesced, bool growing_retained) {
1683 return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1684 extents, extent, coalesced, growing_retained, true);
1685}
1686
1687/*
1688 * Does the metadata management portions of putting an unused extent into the
1689 * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1690 */
1691static void
1692extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1693 extents_t *extents, extent_t *extent, bool growing_retained) {
1694 rtree_ctx_t rtree_ctx_fallback;
1695 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1696
1697 assert((extents_state_get(extents) != extent_state_dirty &&
1698 extents_state_get(extents) != extent_state_muzzy) ||
1699 !extent_zeroed_get(extent));
1700
1701 malloc_mutex_lock(tsdn, &extents->mtx);
1702 extent_hooks_assure_initialized(arena, r_extent_hooks);
1703
1704 extent_szind_set(extent, SC_NSIZES);
1705 if (extent_slab_get(extent)) {
1706 extent_interior_deregister(tsdn, rtree_ctx, extent);
1707 extent_slab_set(extent, false);
1708 }
1709
1710 assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1711 (uintptr_t)extent_base_get(extent), true) == extent);
1712
1713 if (!extents->delay_coalesce) {
1714 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1715 rtree_ctx, extents, extent, NULL, growing_retained);
1716 } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
1717 assert(extents == &arena->extents_dirty);
1718 /* Always coalesce large extents eagerly. */
1719 bool coalesced;
1720 do {
1721 assert(extent_state_get(extent) == extent_state_active);
1722 extent = extent_try_coalesce_large(tsdn, arena,
1723 r_extent_hooks, rtree_ctx, extents, extent,
1724 &coalesced, growing_retained);
1725 } while (coalesced);
1726 if (extent_size_get(extent) >= oversize_threshold) {
1727 /* Shortcut to purge the oversize extent eagerly. */
1728 malloc_mutex_unlock(tsdn, &extents->mtx);
1729 arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
1730 return;
1731 }
1732 }
1733 extent_deactivate_locked(tsdn, arena, extents, extent);
1734
1735 malloc_mutex_unlock(tsdn, &extents->mtx);
1736}
1737
1738void
1739extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1740 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1741
1742 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1743 WITNESS_RANK_CORE, 0);
1744
1745 if (extent_register(tsdn, extent)) {
1746 extent_dalloc(tsdn, arena, extent);
1747 return;
1748 }
1749 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1750}
1751
1752static bool
1753extent_may_dalloc(void) {
1754 /* With retain enabled, the default dalloc always fails. */
1755 return !opt_retain;
1756}
1757
1758static bool
1759extent_dalloc_default_impl(void *addr, size_t size) {
1760 if (!have_dss || !extent_in_dss(addr)) {
1761 return extent_dalloc_mmap(addr, size);
1762 }
1763 return true;
1764}
1765
1766static bool
1767extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1768 bool committed, unsigned arena_ind) {
1769 return extent_dalloc_default_impl(addr, size);
1770}
1771
1772static bool
1773extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1774 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1775 bool err;
1776
1777 assert(extent_base_get(extent) != NULL);
1778 assert(extent_size_get(extent) != 0);
1779 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1780 WITNESS_RANK_CORE, 0);
1781
1782 extent_addr_set(extent, extent_base_get(extent));
1783
1784 extent_hooks_assure_initialized(arena, r_extent_hooks);
1785 /* Try to deallocate. */
1786 if (*r_extent_hooks == &extent_hooks_default) {
1787 /* Call directly to propagate tsdn. */
1788 err = extent_dalloc_default_impl(extent_base_get(extent),
1789 extent_size_get(extent));
1790 } else {
1791 extent_hook_pre_reentrancy(tsdn, arena);
1792 err = ((*r_extent_hooks)->dalloc == NULL ||
1793 (*r_extent_hooks)->dalloc(*r_extent_hooks,
1794 extent_base_get(extent), extent_size_get(extent),
1795 extent_committed_get(extent), arena_ind_get(arena)));
1796 extent_hook_post_reentrancy(tsdn);
1797 }
1798
1799 if (!err) {
1800 extent_dalloc(tsdn, arena, extent);
1801 }
1802
1803 return err;
1804}
1805
1806void
1807extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1808 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1809 assert(extent_dumpable_get(extent));
1810 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1811 WITNESS_RANK_CORE, 0);
1812
1813 /* Avoid calling the default extent_dalloc unless have to. */
1814 if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
1815 /*
1816 * Deregister first to avoid a race with other allocating
1817 * threads, and reregister if deallocation fails.
1818 */
1819 extent_deregister(tsdn, extent);
1820 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
1821 extent)) {
1822 return;
1823 }
1824 extent_reregister(tsdn, extent);
1825 }
1826
1827 if (*r_extent_hooks != &extent_hooks_default) {
1828 extent_hook_pre_reentrancy(tsdn, arena);
1829 }
1830 /* Try to decommit; purge if that fails. */
1831 bool zeroed;
1832 if (!extent_committed_get(extent)) {
1833 zeroed = true;
1834 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1835 0, extent_size_get(extent))) {
1836 zeroed = true;
1837 } else if ((*r_extent_hooks)->purge_forced != NULL &&
1838 !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1839 extent_base_get(extent), extent_size_get(extent), 0,
1840 extent_size_get(extent), arena_ind_get(arena))) {
1841 zeroed = true;
1842 } else if (extent_state_get(extent) == extent_state_muzzy ||
1843 ((*r_extent_hooks)->purge_lazy != NULL &&
1844 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1845 extent_base_get(extent), extent_size_get(extent), 0,
1846 extent_size_get(extent), arena_ind_get(arena)))) {
1847 zeroed = false;
1848 } else {
1849 zeroed = false;
1850 }
1851 if (*r_extent_hooks != &extent_hooks_default) {
1852 extent_hook_post_reentrancy(tsdn);
1853 }
1854 extent_zeroed_set(extent, zeroed);
1855
1856 if (config_prof) {
1857 extent_gdump_sub(tsdn, extent);
1858 }
1859
1860 extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1861 extent, false);
1862}
1863
1864static void
1865extent_destroy_default_impl(void *addr, size_t size) {
1866 if (!have_dss || !extent_in_dss(addr)) {
1867 pages_unmap(addr, size);
1868 }
1869}
1870
1871static void
1872extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1873 bool committed, unsigned arena_ind) {
1874 extent_destroy_default_impl(addr, size);
1875}
1876
1877void
1878extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1879 extent_hooks_t **r_extent_hooks, extent_t *extent) {
1880 assert(extent_base_get(extent) != NULL);
1881 assert(extent_size_get(extent) != 0);
1882 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1883 WITNESS_RANK_CORE, 0);
1884
1885 /* Deregister first to avoid a race with other allocating threads. */
1886 extent_deregister(tsdn, extent);
1887
1888 extent_addr_set(extent, extent_base_get(extent));
1889
1890 extent_hooks_assure_initialized(arena, r_extent_hooks);
1891 /* Try to destroy; silently fail otherwise. */
1892 if (*r_extent_hooks == &extent_hooks_default) {
1893 /* Call directly to propagate tsdn. */
1894 extent_destroy_default_impl(extent_base_get(extent),
1895 extent_size_get(extent));
1896 } else if ((*r_extent_hooks)->destroy != NULL) {
1897 extent_hook_pre_reentrancy(tsdn, arena);
1898 (*r_extent_hooks)->destroy(*r_extent_hooks,
1899 extent_base_get(extent), extent_size_get(extent),
1900 extent_committed_get(extent), arena_ind_get(arena));
1901 extent_hook_post_reentrancy(tsdn);
1902 }
1903
1904 extent_dalloc(tsdn, arena, extent);
1905}
1906
1907static bool
1908extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1909 size_t offset, size_t length, unsigned arena_ind) {
1910 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1911 length);
1912}
1913
1914static bool
1915extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1916 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1917 size_t length, bool growing_retained) {
1918 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1919 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1920
1921 extent_hooks_assure_initialized(arena, r_extent_hooks);
1922 if (*r_extent_hooks != &extent_hooks_default) {
1923 extent_hook_pre_reentrancy(tsdn, arena);
1924 }
1925 bool err = ((*r_extent_hooks)->commit == NULL ||
1926 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1927 extent_size_get(extent), offset, length, arena_ind_get(arena)));
1928 if (*r_extent_hooks != &extent_hooks_default) {
1929 extent_hook_post_reentrancy(tsdn);
1930 }
1931 extent_committed_set(extent, extent_committed_get(extent) || !err);
1932 return err;
1933}
1934
1935bool
1936extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1937 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1938 size_t length) {
1939 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1940 length, false);
1941}
1942
1943static bool
1944extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1945 size_t offset, size_t length, unsigned arena_ind) {
1946 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1947 length);
1948}
1949
1950bool
1951extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1952 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1953 size_t length) {
1954 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1955 WITNESS_RANK_CORE, 0);
1956
1957 extent_hooks_assure_initialized(arena, r_extent_hooks);
1958
1959 if (*r_extent_hooks != &extent_hooks_default) {
1960 extent_hook_pre_reentrancy(tsdn, arena);
1961 }
1962 bool err = ((*r_extent_hooks)->decommit == NULL ||
1963 (*r_extent_hooks)->decommit(*r_extent_hooks,
1964 extent_base_get(extent), extent_size_get(extent), offset, length,
1965 arena_ind_get(arena)));
1966 if (*r_extent_hooks != &extent_hooks_default) {
1967 extent_hook_post_reentrancy(tsdn);
1968 }
1969 extent_committed_set(extent, extent_committed_get(extent) && err);
1970 return err;
1971}
1972
1973#ifdef PAGES_CAN_PURGE_LAZY
1974static bool
1975extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1976 size_t offset, size_t length, unsigned arena_ind) {
1977 assert(addr != NULL);
1978 assert((offset & PAGE_MASK) == 0);
1979 assert(length != 0);
1980 assert((length & PAGE_MASK) == 0);
1981
1982 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1983 length);
1984}
1985#endif
1986
1987static bool
1988extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1989 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1990 size_t length, bool growing_retained) {
1991 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1992 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1993
1994 extent_hooks_assure_initialized(arena, r_extent_hooks);
1995
1996 if ((*r_extent_hooks)->purge_lazy == NULL) {
1997 return true;
1998 }
1999 if (*r_extent_hooks != &extent_hooks_default) {
2000 extent_hook_pre_reentrancy(tsdn, arena);
2001 }
2002 bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
2003 extent_base_get(extent), extent_size_get(extent), offset, length,
2004 arena_ind_get(arena));
2005 if (*r_extent_hooks != &extent_hooks_default) {
2006 extent_hook_post_reentrancy(tsdn);
2007 }
2008
2009 return err;
2010}
2011
2012bool
2013extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
2014 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2015 size_t length) {
2016 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
2017 offset, length, false);
2018}
2019
2020#ifdef PAGES_CAN_PURGE_FORCED
2021static bool
2022extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
2023 size_t size, size_t offset, size_t length, unsigned arena_ind) {
2024 assert(addr != NULL);
2025 assert((offset & PAGE_MASK) == 0);
2026 assert(length != 0);
2027 assert((length & PAGE_MASK) == 0);
2028
2029 return pages_purge_forced((void *)((uintptr_t)addr +
2030 (uintptr_t)offset), length);
2031}
2032#endif
2033
2034static bool
2035extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
2036 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2037 size_t length, bool growing_retained) {
2038 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2039 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2040
2041 extent_hooks_assure_initialized(arena, r_extent_hooks);
2042
2043 if ((*r_extent_hooks)->purge_forced == NULL) {
2044 return true;
2045 }
2046 if (*r_extent_hooks != &extent_hooks_default) {
2047 extent_hook_pre_reentrancy(tsdn, arena);
2048 }
2049 bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
2050 extent_base_get(extent), extent_size_get(extent), offset, length,
2051 arena_ind_get(arena));
2052 if (*r_extent_hooks != &extent_hooks_default) {
2053 extent_hook_post_reentrancy(tsdn);
2054 }
2055 return err;
2056}
2057
2058bool
2059extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
2060 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2061 size_t length) {
2062 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
2063 offset, length, false);
2064}
2065
2066static bool
2067extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
2068 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
2069 if (!maps_coalesce) {
2070 /*
2071 * Without retain, only whole regions can be purged (required by
2072 * MEM_RELEASE on Windows) -- therefore disallow splitting. See
2073 * comments in extent_head_no_merge().
2074 */
2075 return !opt_retain;
2076 }
2077
2078 return false;
2079}
2080
2081/*
2082 * Accepts the extent to split, and the characteristics of each side of the
2083 * split. The 'a' parameters go with the 'lead' of the resulting pair of
2084 * extents (the lower addressed portion of the split), and the 'b' parameters go
2085 * with the trail (the higher addressed portion). This makes 'extent' the lead,
2086 * and returns the trail (except in case of error).
2087 */
2088static extent_t *
2089extent_split_impl(tsdn_t *tsdn, arena_t *arena,
2090 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2091 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
2092 bool growing_retained) {
2093 assert(extent_size_get(extent) == size_a + size_b);
2094 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2095 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2096
2097 extent_hooks_assure_initialized(arena, r_extent_hooks);
2098
2099 if ((*r_extent_hooks)->split == NULL) {
2100 return NULL;
2101 }
2102
2103 extent_t *trail = extent_alloc(tsdn, arena);
2104 if (trail == NULL) {
2105 goto label_error_a;
2106 }
2107
2108 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
2109 size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
2110 extent_state_get(extent), extent_zeroed_get(extent),
2111 extent_committed_get(extent), extent_dumpable_get(extent),
2112 EXTENT_NOT_HEAD);
2113
2114 rtree_ctx_t rtree_ctx_fallback;
2115 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2116 rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2117 {
2118 extent_t lead;
2119
2120 extent_init(&lead, arena, extent_addr_get(extent), size_a,
2121 slab_a, szind_a, extent_sn_get(extent),
2122 extent_state_get(extent), extent_zeroed_get(extent),
2123 extent_committed_get(extent), extent_dumpable_get(extent),
2124 EXTENT_NOT_HEAD);
2125
2126 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2127 true, &lead_elm_a, &lead_elm_b);
2128 }
2129 rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2130 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2131 &trail_elm_a, &trail_elm_b);
2132
2133 if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2134 || trail_elm_b == NULL) {
2135 goto label_error_b;
2136 }
2137
2138 extent_lock2(tsdn, extent, trail);
2139
2140 if (*r_extent_hooks != &extent_hooks_default) {
2141 extent_hook_pre_reentrancy(tsdn, arena);
2142 }
2143 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2144 size_a + size_b, size_a, size_b, extent_committed_get(extent),
2145 arena_ind_get(arena));
2146 if (*r_extent_hooks != &extent_hooks_default) {
2147 extent_hook_post_reentrancy(tsdn);
2148 }
2149 if (err) {
2150 goto label_error_c;
2151 }
2152
2153 extent_size_set(extent, size_a);
2154 extent_szind_set(extent, szind_a);
2155
2156 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2157 szind_a, slab_a);
2158 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2159 szind_b, slab_b);
2160
2161 extent_unlock2(tsdn, extent, trail);
2162
2163 return trail;
2164label_error_c:
2165 extent_unlock2(tsdn, extent, trail);
2166label_error_b:
2167 extent_dalloc(tsdn, arena, trail);
2168label_error_a:
2169 return NULL;
2170}
2171
2172extent_t *
2173extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2174 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2175 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2176 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2177 szind_a, slab_a, size_b, szind_b, slab_b, false);
2178}
2179
2180static bool
2181extent_merge_default_impl(void *addr_a, void *addr_b) {
2182 if (!maps_coalesce && !opt_retain) {
2183 return true;
2184 }
2185 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2186 return true;
2187 }
2188
2189 return false;
2190}
2191
2192/*
2193 * Returns true if the given extents can't be merged because of their head bit
2194 * settings. Assumes the second extent has the higher address.
2195 */
2196static bool
2197extent_head_no_merge(extent_t *a, extent_t *b) {
2198 assert(extent_base_get(a) < extent_base_get(b));
2199 /*
2200 * When coalesce is not always allowed (Windows), only merge extents
2201 * from the same VirtualAlloc region under opt.retain (in which case
2202 * MEM_DECOMMIT is utilized for purging).
2203 */
2204 if (maps_coalesce) {
2205 return false;
2206 }
2207 if (!opt_retain) {
2208 return true;
2209 }
2210 /* If b is a head extent, disallow the cross-region merge. */
2211 if (extent_is_head_get(b)) {
2212 /*
2213 * Additionally, sn should not overflow with retain; sanity
2214 * check that different regions have unique sn.
2215 */
2216 assert(extent_sn_comp(a, b) != 0);
2217 return true;
2218 }
2219 assert(extent_sn_comp(a, b) == 0);
2220
2221 return false;
2222}
2223
2224static bool
2225extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2226 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2227 if (!maps_coalesce) {
2228 tsdn_t *tsdn = tsdn_fetch();
2229 extent_t *a = iealloc(tsdn, addr_a);
2230 extent_t *b = iealloc(tsdn, addr_b);
2231 if (extent_head_no_merge(a, b)) {
2232 return true;
2233 }
2234 }
2235 return extent_merge_default_impl(addr_a, addr_b);
2236}
2237
2238static bool
2239extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2240 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2241 bool growing_retained) {
2242 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2243 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2244 assert(extent_base_get(a) < extent_base_get(b));
2245
2246 extent_hooks_assure_initialized(arena, r_extent_hooks);
2247
2248 if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
2249 return true;
2250 }
2251
2252 bool err;
2253 if (*r_extent_hooks == &extent_hooks_default) {
2254 /* Call directly to propagate tsdn. */
2255 err = extent_merge_default_impl(extent_base_get(a),
2256 extent_base_get(b));
2257 } else {
2258 extent_hook_pre_reentrancy(tsdn, arena);
2259 err = (*r_extent_hooks)->merge(*r_extent_hooks,
2260 extent_base_get(a), extent_size_get(a), extent_base_get(b),
2261 extent_size_get(b), extent_committed_get(a),
2262 arena_ind_get(arena));
2263 extent_hook_post_reentrancy(tsdn);
2264 }
2265
2266 if (err) {
2267 return true;
2268 }
2269
2270 /*
2271 * The rtree writes must happen while all the relevant elements are
2272 * owned, so the following code uses decomposed helper functions rather
2273 * than extent_{,de}register() to do things in the right order.
2274 */
2275 rtree_ctx_t rtree_ctx_fallback;
2276 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2277 rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2278 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2279 &a_elm_b);
2280 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2281 &b_elm_b);
2282
2283 extent_lock2(tsdn, a, b);
2284
2285 if (a_elm_b != NULL) {
2286 rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2287 SC_NSIZES, false);
2288 }
2289 if (b_elm_b != NULL) {
2290 rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2291 SC_NSIZES, false);
2292 } else {
2293 b_elm_b = b_elm_a;
2294 }
2295
2296 extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2297 extent_szind_set(a, SC_NSIZES);
2298 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2299 extent_sn_get(a) : extent_sn_get(b));
2300 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2301
2302 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
2303 false);
2304
2305 extent_unlock2(tsdn, a, b);
2306
2307 extent_dalloc(tsdn, extent_arena_get(b), b);
2308
2309 return false;
2310}
2311
2312bool
2313extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2314 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2315 return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2316}
2317
2318bool
2319extent_boot(void) {
2320 if (rtree_new(&extents_rtree, true)) {
2321 return true;
2322 }
2323
2324 if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2325 WITNESS_RANK_EXTENT_POOL)) {
2326 return true;
2327 }
2328
2329 if (have_dss) {
2330 extent_dss_boot();
2331 }
2332
2333 return false;
2334}
2335
2336void
2337extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
2338 size_t *nfree, size_t *nregs, size_t *size) {
2339 assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
2340
2341 const extent_t *extent = iealloc(tsdn, ptr);
2342 if (unlikely(extent == NULL)) {
2343 *nfree = *nregs = *size = 0;
2344 return;
2345 }
2346
2347 *size = extent_size_get(extent);
2348 if (!extent_slab_get(extent)) {
2349 *nfree = 0;
2350 *nregs = 1;
2351 } else {
2352 *nfree = extent_nfree_get(extent);
2353 *nregs = bin_infos[extent_szind_get(extent)].nregs;
2354 assert(*nfree <= *nregs);
2355 assert(*nfree * extent_usize_get(extent) <= *size);
2356 }
2357}
2358
2359void
2360extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
2361 size_t *nfree, size_t *nregs, size_t *size,
2362 size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
2363 assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
2364 && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
2365
2366 const extent_t *extent = iealloc(tsdn, ptr);
2367 if (unlikely(extent == NULL)) {
2368 *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
2369 *slabcur_addr = NULL;
2370 return;
2371 }
2372
2373 *size = extent_size_get(extent);
2374 if (!extent_slab_get(extent)) {
2375 *nfree = *bin_nfree = *bin_nregs = 0;
2376 *nregs = 1;
2377 *slabcur_addr = NULL;
2378 return;
2379 }
2380
2381 *nfree = extent_nfree_get(extent);
2382 const szind_t szind = extent_szind_get(extent);
2383 *nregs = bin_infos[szind].nregs;
2384 assert(*nfree <= *nregs);
2385 assert(*nfree * extent_usize_get(extent) <= *size);
2386
2387 const arena_t *arena = extent_arena_get(extent);
2388 assert(arena != NULL);
2389 const unsigned binshard = extent_binshard_get(extent);
2390 bin_t *bin = &arena->bins[szind].bin_shards[binshard];
2391
2392 malloc_mutex_lock(tsdn, &bin->lock);
2393 if (config_stats) {
2394 *bin_nregs = *nregs * bin->stats.curslabs;
2395 assert(*bin_nregs >= bin->stats.curregs);
2396 *bin_nfree = *bin_nregs - bin->stats.curregs;
2397 } else {
2398 *bin_nfree = *bin_nregs = 0;
2399 }
2400 *slabcur_addr = extent_addr_get(bin->slabcur);
2401 assert(*slabcur_addr != NULL);
2402 malloc_mutex_unlock(tsdn, &bin->lock);
2403}
2404