1#include "jemalloc/internal/jemalloc_preamble.h"
2#include "jemalloc/internal/jemalloc_internal_includes.h"
3
4#include "jemalloc/internal/assert.h"
5#include "jemalloc/internal/emap.h"
6#include "jemalloc/internal/extent_dss.h"
7#include "jemalloc/internal/extent_mmap.h"
8#include "jemalloc/internal/ph.h"
9#include "jemalloc/internal/mutex.h"
10
11/******************************************************************************/
12/* Data. */
13
14size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
15
16static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
17 size_t offset, size_t length, bool growing_retained);
18static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
19 edata_t *edata, size_t offset, size_t length, bool growing_retained);
20static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
21 edata_t *edata, size_t offset, size_t length, bool growing_retained);
22static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
23 edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
24static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
25 edata_t *a, edata_t *b, bool holding_core_locks);
26
27/* Used exclusively for gdump triggering. */
28static atomic_zu_t curpages;
29static atomic_zu_t highpages;
30
31/******************************************************************************/
32/*
33 * Function prototypes for static functions that are referenced prior to
34 * definition.
35 */
36
37static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
38static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
39 ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
40 bool zero, bool *commit, bool growing_retained, bool guarded);
41static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
42 ecache_t *ecache, edata_t *edata, bool *coalesced);
43static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
44 ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
45 bool zero, bool *commit, bool guarded);
46
47/******************************************************************************/
48
49size_t
50extent_sn_next(pac_t *pac) {
51 return atomic_fetch_add_zu(&pac->extent_sn_next, 1, ATOMIC_RELAXED);
52}
53
54static inline bool
55extent_may_force_decay(pac_t *pac) {
56 return !(pac_decay_ms_get(pac, extent_state_dirty) == -1
57 || pac_decay_ms_get(pac, extent_state_muzzy) == -1);
58}
59
60static bool
61extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
62 ecache_t *ecache, edata_t *edata) {
63 emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
64
65 bool coalesced;
66 edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
67 edata, &coalesced);
68 emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
69
70 if (!coalesced) {
71 return true;
72 }
73 eset_insert(&ecache->eset, edata);
74 return false;
75}
76
77edata_t *
78ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
79 edata_t *expand_edata, size_t size, size_t alignment, bool zero,
80 bool guarded) {
81 assert(size != 0);
82 assert(alignment != 0);
83 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
84 WITNESS_RANK_CORE, 0);
85
86 bool commit = true;
87 edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
88 size, alignment, zero, &commit, false, guarded);
89 assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
90 assert(edata == NULL || edata_guarded_get(edata) == guarded);
91 return edata;
92}
93
94edata_t *
95ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
96 edata_t *expand_edata, size_t size, size_t alignment, bool zero,
97 bool guarded) {
98 assert(size != 0);
99 assert(alignment != 0);
100 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
101 WITNESS_RANK_CORE, 0);
102
103 bool commit = true;
104 edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
105 size, alignment, zero, &commit, guarded);
106 if (edata == NULL) {
107 if (opt_retain && expand_edata != NULL) {
108 /*
109 * When retain is enabled and trying to expand, we do
110 * not attempt extent_alloc_wrapper which does mmap that
111 * is very unlikely to succeed (unless it happens to be
112 * at the end).
113 */
114 return NULL;
115 }
116 if (guarded) {
117 /*
118 * Means no cached guarded extents available (and no
119 * grow_retained was attempted). The pac_alloc flow
120 * will alloc regular extents to make new guarded ones.
121 */
122 return NULL;
123 }
124 void *new_addr = (expand_edata == NULL) ? NULL :
125 edata_past_get(expand_edata);
126 edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
127 size, alignment, zero, &commit,
128 /* growing_retained */ false);
129 }
130
131 assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
132 return edata;
133}
134
135void
136ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
137 edata_t *edata) {
138 assert(edata_base_get(edata) != NULL);
139 assert(edata_size_get(edata) != 0);
140 assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
141 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
142 WITNESS_RANK_CORE, 0);
143
144 edata_addr_set(edata, edata_base_get(edata));
145 edata_zeroed_set(edata, false);
146
147 extent_record(tsdn, pac, ehooks, ecache, edata);
148}
149
150edata_t *
151ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
152 ecache_t *ecache, size_t npages_min) {
153 malloc_mutex_lock(tsdn, &ecache->mtx);
154
155 /*
156 * Get the LRU coalesced extent, if any. If coalescing was delayed,
157 * the loop will iterate until the LRU extent is fully coalesced.
158 */
159 edata_t *edata;
160 while (true) {
161 /* Get the LRU extent, if any. */
162 eset_t *eset = &ecache->eset;
163 edata = edata_list_inactive_first(&eset->lru);
164 if (edata == NULL) {
165 /*
166 * Next check if there are guarded extents. They are
167 * more expensive to purge (since they are not
168 * mergeable), thus in favor of caching them longer.
169 */
170 eset = &ecache->guarded_eset;
171 edata = edata_list_inactive_first(&eset->lru);
172 if (edata == NULL) {
173 goto label_return;
174 }
175 }
176 /* Check the eviction limit. */
177 size_t extents_npages = ecache_npages_get(ecache);
178 if (extents_npages <= npages_min) {
179 edata = NULL;
180 goto label_return;
181 }
182 eset_remove(eset, edata);
183 if (!ecache->delay_coalesce || edata_guarded_get(edata)) {
184 break;
185 }
186 /* Try to coalesce. */
187 if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache,
188 edata)) {
189 break;
190 }
191 /*
192 * The LRU extent was just coalesced and the result placed in
193 * the LRU at its neighbor's position. Start over.
194 */
195 }
196
197 /*
198 * Either mark the extent active or deregister it to protect against
199 * concurrent operations.
200 */
201 switch (ecache->state) {
202 case extent_state_active:
203 not_reached();
204 case extent_state_dirty:
205 case extent_state_muzzy:
206 emap_update_edata_state(tsdn, pac->emap, edata,
207 extent_state_active);
208 break;
209 case extent_state_retained:
210 extent_deregister(tsdn, pac, edata);
211 break;
212 default:
213 not_reached();
214 }
215
216label_return:
217 malloc_mutex_unlock(tsdn, &ecache->mtx);
218 return edata;
219}
220
221/*
222 * This can only happen when we fail to allocate a new extent struct (which
223 * indicates OOM), e.g. when trying to split an existing extent.
224 */
225static void
226extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
227 edata_t *edata, bool growing_retained) {
228 size_t sz = edata_size_get(edata);
229 if (config_stats) {
230 atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz,
231 ATOMIC_RELAXED);
232 }
233 /*
234 * Leak extent after making sure its pages have already been purged, so
235 * that this is only a virtual memory leak.
236 */
237 if (ecache->state == extent_state_dirty) {
238 if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz,
239 growing_retained)) {
240 extent_purge_forced_impl(tsdn, ehooks, edata, 0,
241 edata_size_get(edata), growing_retained);
242 }
243 }
244 edata_cache_put(tsdn, pac->edata_cache, edata);
245}
246
247static void
248extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
249 edata_t *edata) {
250 malloc_mutex_assert_owner(tsdn, &ecache->mtx);
251 assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
252
253 emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
254 eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
255 &ecache->eset;
256 eset_insert(eset, edata);
257}
258
259static void
260extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
261 edata_t *edata) {
262 assert(edata_state_get(edata) == extent_state_active);
263 extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
264}
265
266static void
267extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
268 edata_t *edata, extent_state_t expected_state) {
269 assert(edata_state_get(edata) == expected_state);
270 extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
271}
272
273static void
274extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
275 edata_t *edata) {
276 assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
277 assert(edata_state_get(edata) == ecache->state ||
278 edata_state_get(edata) == extent_state_merging);
279
280 eset_remove(eset, edata);
281 emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
282}
283
284void
285extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
286 cassert(config_prof);
287 /* prof_gdump() requirement. */
288 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
289 WITNESS_RANK_CORE, 0);
290
291 if (opt_prof && edata_state_get(edata) == extent_state_active) {
292 size_t nadd = edata_size_get(edata) >> LG_PAGE;
293 size_t cur = atomic_fetch_add_zu(&curpages, nadd,
294 ATOMIC_RELAXED) + nadd;
295 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
296 while (cur > high && !atomic_compare_exchange_weak_zu(
297 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
298 /*
299 * Don't refresh cur, because it may have decreased
300 * since this thread lost the highpages update race.
301 * Note that high is updated in case of CAS failure.
302 */
303 }
304 if (cur > high && prof_gdump_get_unlocked()) {
305 prof_gdump(tsdn);
306 }
307 }
308}
309
310static void
311extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
312 cassert(config_prof);
313
314 if (opt_prof && edata_state_get(edata) == extent_state_active) {
315 size_t nsub = edata_size_get(edata) >> LG_PAGE;
316 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
317 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
318 }
319}
320
321static bool
322extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) {
323 assert(edata_state_get(edata) == extent_state_active);
324 /*
325 * No locking needed, as the edata must be in active state, which
326 * prevents other threads from accessing the edata.
327 */
328 if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES,
329 /* slab */ false)) {
330 return true;
331 }
332
333 if (config_prof && gdump_add) {
334 extent_gdump_add(tsdn, edata);
335 }
336
337 return false;
338}
339
340static bool
341extent_register(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
342 return extent_register_impl(tsdn, pac, edata, true);
343}
344
345static bool
346extent_register_no_gdump_add(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
347 return extent_register_impl(tsdn, pac, edata, false);
348}
349
350static void
351extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
352 bool err = extent_register(tsdn, pac, edata);
353 assert(!err);
354}
355
356/*
357 * Removes all pointers to the given extent from the global rtree.
358 */
359static void
360extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
361 bool gdump) {
362 emap_deregister_boundary(tsdn, pac->emap, edata);
363
364 if (config_prof && gdump) {
365 extent_gdump_sub(tsdn, edata);
366 }
367}
368
369static void
370extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
371 extent_deregister_impl(tsdn, pac, edata, true);
372}
373
374static void
375extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
376 edata_t *edata) {
377 extent_deregister_impl(tsdn, pac, edata, false);
378}
379
380/*
381 * Tries to find and remove an extent from ecache that can be used for the
382 * given allocation request.
383 */
384static edata_t *
385extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
386 ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
387 bool guarded) {
388 malloc_mutex_assert_owner(tsdn, &ecache->mtx);
389 assert(alignment > 0);
390 if (config_debug && expand_edata != NULL) {
391 /*
392 * Non-NULL expand_edata indicates in-place expanding realloc.
393 * new_addr must either refer to a non-existing extent, or to
394 * the base of an extant extent, since only active slabs support
395 * interior lookups (which of course cannot be recycled).
396 */
397 void *new_addr = edata_past_get(expand_edata);
398 assert(PAGE_ADDR2BASE(new_addr) == new_addr);
399 assert(alignment <= PAGE);
400 }
401
402 edata_t *edata;
403 eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
404 if (expand_edata != NULL) {
405 edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
406 expand_edata, EXTENT_PAI_PAC, ecache->state);
407 if (edata != NULL) {
408 extent_assert_can_expand(expand_edata, edata);
409 if (edata_size_get(edata) < size) {
410 emap_release_edata(tsdn, pac->emap, edata,
411 ecache->state);
412 edata = NULL;
413 }
414 }
415 } else {
416 /*
417 * A large extent might be broken up from its original size to
418 * some small size to satisfy a small request. When that small
419 * request is freed, though, it won't merge back with the larger
420 * extent if delayed coalescing is on. The large extent can
421 * then no longer satify a request for its original size. To
422 * limit this effect, when delayed coalescing is enabled, we
423 * put a cap on how big an extent we can split for a request.
424 */
425 unsigned lg_max_fit = ecache->delay_coalesce
426 ? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
427
428 /*
429 * If split and merge are not allowed (Windows w/o retain), try
430 * exact fit only.
431 *
432 * For simplicity purposes, splitting guarded extents is not
433 * supported. Hence, we do only exact fit for guarded
434 * allocations.
435 */
436 bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
437 edata = eset_fit(eset, size, alignment, exact_only,
438 lg_max_fit);
439 }
440 if (edata == NULL) {
441 return NULL;
442 }
443 assert(!guarded || edata_guarded_get(edata));
444 extent_activate_locked(tsdn, pac, ecache, eset, edata);
445
446 return edata;
447}
448
449/*
450 * Given an allocation request and an extent guaranteed to be able to satisfy
451 * it, this splits off lead and trail extents, leaving edata pointing to an
452 * extent satisfying the allocation.
453 * This function doesn't put lead or trail into any ecache; it's the caller's
454 * job to ensure that they can be reused.
455 */
456typedef enum {
457 /*
458 * Split successfully. lead, edata, and trail, are modified to extents
459 * describing the ranges before, in, and after the given allocation.
460 */
461 extent_split_interior_ok,
462 /*
463 * The extent can't satisfy the given allocation request. None of the
464 * input edata_t *s are touched.
465 */
466 extent_split_interior_cant_alloc,
467 /*
468 * In a potentially invalid state. Must leak (if *to_leak is non-NULL),
469 * and salvage what's still salvageable (if *to_salvage is non-NULL).
470 * None of lead, edata, or trail are valid.
471 */
472 extent_split_interior_error
473} extent_split_interior_result_t;
474
475static extent_split_interior_result_t
476extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
477 /* The result of splitting, in case of success. */
478 edata_t **edata, edata_t **lead, edata_t **trail,
479 /* The mess to clean up, in case of error. */
480 edata_t **to_leak, edata_t **to_salvage,
481 edata_t *expand_edata, size_t size, size_t alignment) {
482 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
483 PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
484 assert(expand_edata == NULL || leadsize == 0);
485 if (edata_size_get(*edata) < leadsize + size) {
486 return extent_split_interior_cant_alloc;
487 }
488 size_t trailsize = edata_size_get(*edata) - leadsize - size;
489
490 *lead = NULL;
491 *trail = NULL;
492 *to_leak = NULL;
493 *to_salvage = NULL;
494
495 /* Split the lead. */
496 if (leadsize != 0) {
497 assert(!edata_guarded_get(*edata));
498 *lead = *edata;
499 *edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
500 size + trailsize, /* holding_core_locks*/ true);
501 if (*edata == NULL) {
502 *to_leak = *lead;
503 *lead = NULL;
504 return extent_split_interior_error;
505 }
506 }
507
508 /* Split the trail. */
509 if (trailsize != 0) {
510 assert(!edata_guarded_get(*edata));
511 *trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
512 trailsize, /* holding_core_locks */ true);
513 if (*trail == NULL) {
514 *to_leak = *edata;
515 *to_salvage = *lead;
516 *lead = NULL;
517 *edata = NULL;
518 return extent_split_interior_error;
519 }
520 }
521
522 return extent_split_interior_ok;
523}
524
525/*
526 * This fulfills the indicated allocation request out of the given extent (which
527 * the caller should have ensured was big enough). If there's any unused space
528 * before or after the resulting allocation, that space is given its own extent
529 * and put back into ecache.
530 */
531static edata_t *
532extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
533 ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
534 edata_t *edata, bool growing_retained) {
535 assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
536 malloc_mutex_assert_owner(tsdn, &ecache->mtx);
537
538 edata_t *lead;
539 edata_t *trail;
540 edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
541 edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
542
543 extent_split_interior_result_t result = extent_split_interior(
544 tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
545 expand_edata, size, alignment);
546
547 if (!maps_coalesce && result != extent_split_interior_ok
548 && !opt_retain) {
549 /*
550 * Split isn't supported (implies Windows w/o retain). Avoid
551 * leaking the extent.
552 */
553 assert(to_leak != NULL && lead == NULL && trail == NULL);
554 extent_deactivate_locked(tsdn, pac, ecache, to_leak);
555 return NULL;
556 }
557
558 if (result == extent_split_interior_ok) {
559 if (lead != NULL) {
560 extent_deactivate_locked(tsdn, pac, ecache, lead);
561 }
562 if (trail != NULL) {
563 extent_deactivate_locked(tsdn, pac, ecache, trail);
564 }
565 return edata;
566 } else {
567 /*
568 * We should have picked an extent that was large enough to
569 * fulfill our allocation request.
570 */
571 assert(result == extent_split_interior_error);
572 if (to_salvage != NULL) {
573 extent_deregister(tsdn, pac, to_salvage);
574 }
575 if (to_leak != NULL) {
576 extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
577 /*
578 * May go down the purge path (which assume no ecache
579 * locks). Only happens with OOM caused split failures.
580 */
581 malloc_mutex_unlock(tsdn, &ecache->mtx);
582 extents_abandon_vm(tsdn, pac, ehooks, ecache, to_leak,
583 growing_retained);
584 malloc_mutex_lock(tsdn, &ecache->mtx);
585 }
586 return NULL;
587 }
588 unreachable();
589}
590
591/*
592 * Tries to satisfy the given allocation request by reusing one of the extents
593 * in the given ecache_t.
594 */
595static edata_t *
596extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
597 edata_t *expand_edata, size_t size, size_t alignment, bool zero,
598 bool *commit, bool growing_retained, bool guarded) {
599 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
600 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
601 assert(!guarded || expand_edata == NULL);
602 assert(!guarded || alignment <= PAGE);
603
604 malloc_mutex_lock(tsdn, &ecache->mtx);
605
606 edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
607 expand_edata, size, alignment, guarded);
608 if (edata == NULL) {
609 malloc_mutex_unlock(tsdn, &ecache->mtx);
610 return NULL;
611 }
612
613 edata = extent_recycle_split(tsdn, pac, ehooks, ecache, expand_edata,
614 size, alignment, edata, growing_retained);
615 malloc_mutex_unlock(tsdn, &ecache->mtx);
616 if (edata == NULL) {
617 return NULL;
618 }
619
620 assert(edata_state_get(edata) == extent_state_active);
621 if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero,
622 growing_retained)) {
623 extent_record(tsdn, pac, ehooks, ecache, edata);
624 return NULL;
625 }
626 if (edata_committed_get(edata)) {
627 /*
628 * This reverses the purpose of this variable - previously it
629 * was treated as an input parameter, now it turns into an
630 * output parameter, reporting if the edata has actually been
631 * committed.
632 */
633 *commit = true;
634 }
635 return edata;
636}
637
638/*
639 * If virtual memory is retained, create increasingly larger extents from which
640 * to split requested extents in order to limit the total number of disjoint
641 * virtual memory ranges retained by each shard.
642 */
643static edata_t *
644extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
645 size_t size, size_t alignment, bool zero, bool *commit) {
646 malloc_mutex_assert_owner(tsdn, &pac->grow_mtx);
647
648 size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
649 /* Beware size_t wrap-around. */
650 if (alloc_size_min < size) {
651 goto label_err;
652 }
653 /*
654 * Find the next extent size in the series that would be large enough to
655 * satisfy this request.
656 */
657 size_t alloc_size;
658 pszind_t exp_grow_skip;
659 bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min,
660 &alloc_size, &exp_grow_skip);
661 if (err) {
662 goto label_err;
663 }
664
665 edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
666 if (edata == NULL) {
667 goto label_err;
668 }
669 bool zeroed = false;
670 bool committed = false;
671
672 void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
673 &committed);
674
675 if (ptr == NULL) {
676 edata_cache_put(tsdn, pac->edata_cache, edata);
677 goto label_err;
678 }
679
680 edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr,
681 alloc_size, false, SC_NSIZES, extent_sn_next(pac),
682 extent_state_active, zeroed, committed, EXTENT_PAI_PAC,
683 EXTENT_IS_HEAD);
684
685 if (extent_register_no_gdump_add(tsdn, pac, edata)) {
686 edata_cache_put(tsdn, pac->edata_cache, edata);
687 goto label_err;
688 }
689
690 if (edata_committed_get(edata)) {
691 *commit = true;
692 }
693
694 edata_t *lead;
695 edata_t *trail;
696 edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
697 edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
698
699 extent_split_interior_result_t result = extent_split_interior(tsdn,
700 pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
701 size, alignment);
702
703 if (result == extent_split_interior_ok) {
704 if (lead != NULL) {
705 extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
706 lead);
707 }
708 if (trail != NULL) {
709 extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
710 trail);
711 }
712 } else {
713 /*
714 * We should have allocated a sufficiently large extent; the
715 * cant_alloc case should not occur.
716 */
717 assert(result == extent_split_interior_error);
718 if (to_salvage != NULL) {
719 if (config_prof) {
720 extent_gdump_add(tsdn, to_salvage);
721 }
722 extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
723 to_salvage);
724 }
725 if (to_leak != NULL) {
726 extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
727 extents_abandon_vm(tsdn, pac, ehooks,
728 &pac->ecache_retained, to_leak, true);
729 }
730 goto label_err;
731 }
732
733 if (*commit && !edata_committed_get(edata)) {
734 if (extent_commit_impl(tsdn, ehooks, edata, 0,
735 edata_size_get(edata), true)) {
736 extent_record(tsdn, pac, ehooks,
737 &pac->ecache_retained, edata);
738 goto label_err;
739 }
740 /* A successful commit should return zeroed memory. */
741 if (config_debug) {
742 void *addr = edata_addr_get(edata);
743 size_t *p = (size_t *)(uintptr_t)addr;
744 /* Check the first page only. */
745 for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
746 assert(p[i] == 0);
747 }
748 }
749 }
750
751 /*
752 * Increment extent_grow_next if doing so wouldn't exceed the allowed
753 * range.
754 */
755 /* All opportunities for failure are past. */
756 exp_grow_size_commit(&pac->exp_grow, exp_grow_skip);
757 malloc_mutex_unlock(tsdn, &pac->grow_mtx);
758
759 if (config_prof) {
760 /* Adjust gdump stats now that extent is final size. */
761 extent_gdump_add(tsdn, edata);
762 }
763 if (zero && !edata_zeroed_get(edata)) {
764 ehooks_zero(tsdn, ehooks, edata_base_get(edata),
765 edata_size_get(edata));
766 }
767 return edata;
768label_err:
769 malloc_mutex_unlock(tsdn, &pac->grow_mtx);
770 return NULL;
771}
772
773static edata_t *
774extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
775 edata_t *expand_edata, size_t size, size_t alignment, bool zero,
776 bool *commit, bool guarded) {
777 assert(size != 0);
778 assert(alignment != 0);
779
780 malloc_mutex_lock(tsdn, &pac->grow_mtx);
781
782 edata_t *edata = extent_recycle(tsdn, pac, ehooks,
783 &pac->ecache_retained, expand_edata, size, alignment, zero, commit,
784 /* growing_retained */ true, guarded);
785 if (edata != NULL) {
786 malloc_mutex_unlock(tsdn, &pac->grow_mtx);
787 if (config_prof) {
788 extent_gdump_add(tsdn, edata);
789 }
790 } else if (opt_retain && expand_edata == NULL && !guarded) {
791 edata = extent_grow_retained(tsdn, pac, ehooks, size,
792 alignment, zero, commit);
793 /* extent_grow_retained() always releases pac->grow_mtx. */
794 } else {
795 malloc_mutex_unlock(tsdn, &pac->grow_mtx);
796 }
797 malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx);
798
799 return edata;
800}
801
802static bool
803extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
804 edata_t *inner, edata_t *outer, bool forward) {
805 extent_assert_can_coalesce(inner, outer);
806 eset_remove(&ecache->eset, outer);
807
808 bool err = extent_merge_impl(tsdn, pac, ehooks,
809 forward ? inner : outer, forward ? outer : inner,
810 /* holding_core_locks */ true);
811 if (err) {
812 extent_deactivate_check_state_locked(tsdn, pac, ecache, outer,
813 extent_state_merging);
814 }
815
816 return err;
817}
818
819static edata_t *
820extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
821 ecache_t *ecache, edata_t *edata, bool *coalesced) {
822 assert(!edata_guarded_get(edata));
823 /*
824 * We avoid checking / locking inactive neighbors for large size
825 * classes, since they are eagerly coalesced on deallocation which can
826 * cause lock contention.
827 */
828 /*
829 * Continue attempting to coalesce until failure, to protect against
830 * races with other threads that are thwarted by this one.
831 */
832 bool again;
833 do {
834 again = false;
835
836 /* Try to coalesce forward. */
837 edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
838 edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true);
839 if (next != NULL) {
840 if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
841 next, true)) {
842 if (ecache->delay_coalesce) {
843 /* Do minimal coalescing. */
844 *coalesced = true;
845 return edata;
846 }
847 again = true;
848 }
849 }
850
851 /* Try to coalesce backward. */
852 edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
853 edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false);
854 if (prev != NULL) {
855 if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
856 prev, false)) {
857 edata = prev;
858 if (ecache->delay_coalesce) {
859 /* Do minimal coalescing. */
860 *coalesced = true;
861 return edata;
862 }
863 again = true;
864 }
865 }
866 } while (again);
867
868 if (ecache->delay_coalesce) {
869 *coalesced = false;
870 }
871 return edata;
872}
873
874static edata_t *
875extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
876 ecache_t *ecache, edata_t *edata, bool *coalesced) {
877 return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
878 coalesced);
879}
880
881static edata_t *
882extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
883 ecache_t *ecache, edata_t *edata, bool *coalesced) {
884 return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
885 coalesced);
886}
887
888/* Purge a single extent to retained / unmapped directly. */
889static void
890extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
891 edata_t *edata) {
892 size_t extent_size = edata_size_get(edata);
893 extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
894 if (config_stats) {
895 /* Update stats accordingly. */
896 LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
897 locked_inc_u64(tsdn,
898 LOCKEDINT_MTX(*pac->stats_mtx),
899 &pac->stats->decay_dirty.nmadvise, 1);
900 locked_inc_u64(tsdn,
901 LOCKEDINT_MTX(*pac->stats_mtx),
902 &pac->stats->decay_dirty.purged,
903 extent_size >> LG_PAGE);
904 LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
905 atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size,
906 ATOMIC_RELAXED);
907 }
908}
909
910/*
911 * Does the metadata management portions of putting an unused extent into the
912 * given ecache_t (coalesces and inserts into the eset).
913 */
914void
915extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
916 edata_t *edata) {
917 assert((ecache->state != extent_state_dirty &&
918 ecache->state != extent_state_muzzy) ||
919 !edata_zeroed_get(edata));
920
921 malloc_mutex_lock(tsdn, &ecache->mtx);
922
923 emap_assert_mapped(tsdn, pac->emap, edata);
924
925 if (edata_guarded_get(edata)) {
926 goto label_skip_coalesce;
927 }
928 if (!ecache->delay_coalesce) {
929 edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
930 NULL);
931 } else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
932 assert(ecache == &pac->ecache_dirty);
933 /* Always coalesce large extents eagerly. */
934 bool coalesced;
935 do {
936 assert(edata_state_get(edata) == extent_state_active);
937 edata = extent_try_coalesce_large(tsdn, pac, ehooks,
938 ecache, edata, &coalesced);
939 } while (coalesced);
940 if (edata_size_get(edata) >=
941 atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED)
942 && extent_may_force_decay(pac)) {
943 /* Shortcut to purge the oversize extent eagerly. */
944 malloc_mutex_unlock(tsdn, &ecache->mtx);
945 extent_maximally_purge(tsdn, pac, ehooks, edata);
946 return;
947 }
948 }
949label_skip_coalesce:
950 extent_deactivate_locked(tsdn, pac, ecache, edata);
951
952 malloc_mutex_unlock(tsdn, &ecache->mtx);
953}
954
955void
956extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
957 edata_t *edata) {
958 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
959 WITNESS_RANK_CORE, 0);
960
961 if (extent_register(tsdn, pac, edata)) {
962 edata_cache_put(tsdn, pac->edata_cache, edata);
963 return;
964 }
965 extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
966}
967
968static bool
969extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
970 edata_t *edata) {
971 bool err;
972
973 assert(edata_base_get(edata) != NULL);
974 assert(edata_size_get(edata) != 0);
975 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
976 WITNESS_RANK_CORE, 0);
977
978 edata_addr_set(edata, edata_base_get(edata));
979
980 /* Try to deallocate. */
981 err = ehooks_dalloc(tsdn, ehooks, edata_base_get(edata),
982 edata_size_get(edata), edata_committed_get(edata));
983
984 if (!err) {
985 edata_cache_put(tsdn, pac->edata_cache, edata);
986 }
987
988 return err;
989}
990
991edata_t *
992extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
993 void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
994 bool growing_retained) {
995 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
996 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
997
998 edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
999 if (edata == NULL) {
1000 return NULL;
1001 }
1002 size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
1003 void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
1004 &zero, commit);
1005 if (addr == NULL) {
1006 edata_cache_put(tsdn, pac->edata_cache, edata);
1007 return NULL;
1008 }
1009 edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
1010 size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
1011 extent_state_active, zero, *commit, EXTENT_PAI_PAC,
1012 opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
1013 /*
1014 * Retained memory is not counted towards gdump. Only if an extent is
1015 * allocated as a separate mapping, i.e. growing_retained is false, then
1016 * gdump should be updated.
1017 */
1018 bool gdump_add = !growing_retained;
1019 if (extent_register_impl(tsdn, pac, edata, gdump_add)) {
1020 edata_cache_put(tsdn, pac->edata_cache, edata);
1021 return NULL;
1022 }
1023
1024 return edata;
1025}
1026
1027void
1028extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
1029 edata_t *edata) {
1030 assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
1031 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1032 WITNESS_RANK_CORE, 0);
1033
1034 /* Avoid calling the default extent_dalloc unless have to. */
1035 if (!ehooks_dalloc_will_fail(ehooks)) {
1036 /* Remove guard pages for dalloc / unmap. */
1037 if (edata_guarded_get(edata)) {
1038 assert(ehooks_are_default(ehooks));
1039 san_unguard_pages_two_sided(tsdn, ehooks, edata,
1040 pac->emap);
1041 }
1042 /*
1043 * Deregister first to avoid a race with other allocating
1044 * threads, and reregister if deallocation fails.
1045 */
1046 extent_deregister(tsdn, pac, edata);
1047 if (!extent_dalloc_wrapper_try(tsdn, pac, ehooks, edata)) {
1048 return;
1049 }
1050 extent_reregister(tsdn, pac, edata);
1051 }
1052
1053 /* Try to decommit; purge if that fails. */
1054 bool zeroed;
1055 if (!edata_committed_get(edata)) {
1056 zeroed = true;
1057 } else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0,
1058 edata_size_get(edata))) {
1059 zeroed = true;
1060 } else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
1061 edata_size_get(edata), 0, edata_size_get(edata))) {
1062 zeroed = true;
1063 } else if (edata_state_get(edata) == extent_state_muzzy ||
1064 !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
1065 edata_size_get(edata), 0, edata_size_get(edata))) {
1066 zeroed = false;
1067 } else {
1068 zeroed = false;
1069 }
1070 edata_zeroed_set(edata, zeroed);
1071
1072 if (config_prof) {
1073 extent_gdump_sub(tsdn, edata);
1074 }
1075
1076 extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata);
1077}
1078
1079void
1080extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
1081 edata_t *edata) {
1082 assert(edata_base_get(edata) != NULL);
1083 assert(edata_size_get(edata) != 0);
1084 extent_state_t state = edata_state_get(edata);
1085 assert(state == extent_state_retained || state == extent_state_active);
1086 assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
1087 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1088 WITNESS_RANK_CORE, 0);
1089
1090 if (edata_guarded_get(edata)) {
1091 assert(opt_retain);
1092 san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap);
1093 }
1094 edata_addr_set(edata, edata_base_get(edata));
1095
1096 /* Try to destroy; silently fail otherwise. */
1097 ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
1098 edata_size_get(edata), edata_committed_get(edata));
1099
1100 edata_cache_put(tsdn, pac->edata_cache, edata);
1101}
1102
1103static bool
1104extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
1105 size_t offset, size_t length, bool growing_retained) {
1106 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1107 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1108 bool err = ehooks_commit(tsdn, ehooks, edata_base_get(edata),
1109 edata_size_get(edata), offset, length);
1110 edata_committed_set(edata, edata_committed_get(edata) || !err);
1111 return err;
1112}
1113
1114bool
1115extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
1116 size_t offset, size_t length) {
1117 return extent_commit_impl(tsdn, ehooks, edata, offset, length,
1118 /* growing_retained */ false);
1119}
1120
1121bool
1122extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
1123 size_t offset, size_t length) {
1124 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1125 WITNESS_RANK_CORE, 0);
1126 bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata),
1127 edata_size_get(edata), offset, length);
1128 edata_committed_set(edata, edata_committed_get(edata) && err);
1129 return err;
1130}
1131
1132static bool
1133extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
1134 size_t offset, size_t length, bool growing_retained) {
1135 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1136 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1137 bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
1138 edata_size_get(edata), offset, length);
1139 return err;
1140}
1141
1142bool
1143extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
1144 size_t offset, size_t length) {
1145 return extent_purge_lazy_impl(tsdn, ehooks, edata, offset,
1146 length, false);
1147}
1148
1149static bool
1150extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
1151 size_t offset, size_t length, bool growing_retained) {
1152 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1153 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1154 bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
1155 edata_size_get(edata), offset, length);
1156 return err;
1157}
1158
1159bool
1160extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
1161 size_t offset, size_t length) {
1162 return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length,
1163 false);
1164}
1165
1166/*
1167 * Accepts the extent to split, and the characteristics of each side of the
1168 * split. The 'a' parameters go with the 'lead' of the resulting pair of
1169 * extents (the lower addressed portion of the split), and the 'b' parameters go
1170 * with the trail (the higher addressed portion). This makes 'extent' the lead,
1171 * and returns the trail (except in case of error).
1172 */
1173static edata_t *
1174extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
1175 edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) {
1176 assert(edata_size_get(edata) == size_a + size_b);
1177 /* Only the shrink path may split w/o holding core locks. */
1178 if (holding_core_locks) {
1179 witness_assert_positive_depth_to_rank(
1180 tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
1181 } else {
1182 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1183 WITNESS_RANK_CORE, 0);
1184 }
1185
1186 if (ehooks_split_will_fail(ehooks)) {
1187 return NULL;
1188 }
1189
1190 edata_t *trail = edata_cache_get(tsdn, pac->edata_cache);
1191 if (trail == NULL) {
1192 goto label_error_a;
1193 }
1194
1195 edata_init(trail, edata_arena_ind_get(edata),
1196 (void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
1197 /* slab */ false, SC_NSIZES, edata_sn_get(edata),
1198 edata_state_get(edata), edata_zeroed_get(edata),
1199 edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
1200 emap_prepare_t prepare;
1201 bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata,
1202 size_a, trail, size_b);
1203 if (err) {
1204 goto label_error_b;
1205 }
1206
1207 /*
1208 * No need to acquire trail or edata, because: 1) trail was new (just
1209 * allocated); and 2) edata is either an active allocation (the shrink
1210 * path), or in an acquired state (extracted from the ecache on the
1211 * extent_recycle_split path).
1212 */
1213 assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
1214 assert(emap_edata_is_acquired(tsdn, pac->emap, trail));
1215
1216 err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b,
1217 size_a, size_b, edata_committed_get(edata));
1218
1219 if (err) {
1220 goto label_error_b;
1221 }
1222
1223 edata_size_set(edata, size_a);
1224 emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail,
1225 size_b);
1226
1227 return trail;
1228label_error_b:
1229 edata_cache_put(tsdn, pac->edata_cache, trail);
1230label_error_a:
1231 return NULL;
1232}
1233
1234edata_t *
1235extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
1236 size_t size_a, size_t size_b, bool holding_core_locks) {
1237 return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b,
1238 holding_core_locks);
1239}
1240
1241static bool
1242extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
1243 edata_t *b, bool holding_core_locks) {
1244 /* Only the expanding path may merge w/o holding ecache locks. */
1245 if (holding_core_locks) {
1246 witness_assert_positive_depth_to_rank(
1247 tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
1248 } else {
1249 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1250 WITNESS_RANK_CORE, 0);
1251 }
1252
1253 assert(edata_base_get(a) < edata_base_get(b));
1254 assert(edata_arena_ind_get(a) == edata_arena_ind_get(b));
1255 assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks));
1256 emap_assert_mapped(tsdn, pac->emap, a);
1257 emap_assert_mapped(tsdn, pac->emap, b);
1258
1259 bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a),
1260 edata_size_get(a), edata_base_get(b), edata_size_get(b),
1261 edata_committed_get(a));
1262
1263 if (err) {
1264 return true;
1265 }
1266
1267 /*
1268 * The rtree writes must happen while all the relevant elements are
1269 * owned, so the following code uses decomposed helper functions rather
1270 * than extent_{,de}register() to do things in the right order.
1271 */
1272 emap_prepare_t prepare;
1273 emap_merge_prepare(tsdn, pac->emap, &prepare, a, b);
1274
1275 assert(edata_state_get(a) == extent_state_active ||
1276 edata_state_get(a) == extent_state_merging);
1277 edata_state_set(a, extent_state_active);
1278 edata_size_set(a, edata_size_get(a) + edata_size_get(b));
1279 edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ?
1280 edata_sn_get(a) : edata_sn_get(b));
1281 edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
1282
1283 emap_merge_commit(tsdn, pac->emap, &prepare, a, b);
1284
1285 edata_cache_put(tsdn, pac->edata_cache, b);
1286
1287 return false;
1288}
1289
1290bool
1291extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
1292 edata_t *a, edata_t *b) {
1293 return extent_merge_impl(tsdn, pac, ehooks, a, b,
1294 /* holding_core_locks */ false);
1295}
1296
1297bool
1298extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
1299 bool commit, bool zero, bool growing_retained) {
1300 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1301 WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1302
1303 if (commit && !edata_committed_get(edata)) {
1304 if (extent_commit_impl(tsdn, ehooks, edata, 0,
1305 edata_size_get(edata), growing_retained)) {
1306 return true;
1307 }
1308 }
1309 if (zero && !edata_zeroed_get(edata)) {
1310 void *addr = edata_base_get(edata);
1311 size_t size = edata_size_get(edata);
1312 ehooks_zero(tsdn, ehooks, addr, size);
1313 }
1314 return false;
1315}
1316
1317bool
1318extent_boot(void) {
1319 assert(sizeof(slab_data_t) >= sizeof(e_prof_info_t));
1320
1321 if (have_dss) {
1322 extent_dss_boot();
1323 }
1324
1325 return false;
1326}
1327